input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>1-10
# create plot as in Figure 4d to study joint occupancy at neighboring CTCF sites
# ## <NAME>
# ## 06.30.21
import sys
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pysam
import multiprocessing
from joblib import Parallel, delayed
from pybedtools import BedTool
from matplotlib import colors
from sklearn.cluster import KMeans
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Region(object):
def __init__(self, region):
self.chromosome = region[1][0]
# now being and end correspond to motif begin and end
self.begin = region[1][1]
self.end = region[1][2]
self.size = self.end - self.begin
self.string = f"{self.chromosome}_{self.begin}_{self.end}"
# additional info to store about CTCF regions
self.strand = region[1][3]
self.peak_strength = region[1][4]
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def make_windows(bed):
reg = []
for row in bed.iterrows():
reg.append(Region(row))
return reg
def extract_peak_pairs(bed, min_dist, max_dist, out):
b = BedTool(bed)
b.sort().saveas(out + '/tmp.sorted.bed');
bed = pd.read_csv(out + '/tmp.sorted.bed', sep='\t', header=None)
# find middle
left_keep = []
right_keep = []
bed['middle'] = (bed[1] + bed[2]) / 2
for i in range(0, len(bed) - 1):
if (bed.iloc[i+1].middle - bed.iloc[i].middle >= min_dist) & (bed.iloc[i+1].middle - bed.iloc[i].middle <= max_dist):
left_keep.append(i)
right_keep.append(i+1)
return bed.iloc[left_keep], bed.iloc[right_keep]
def get_data(bam, peak_left, peak_right, num_cores):
meth_data = Parallel(n_jobs=num_cores)(delayed(parse_ont_bam)(bam, w1, w2) for w1, w2 in zip(peak_left, peak_right))
return meth_data
def parse_ont_bam(filename, w1, w2):
bam = pysam.AlignmentFile(filename, "rb")
data = []
reads1 = bam.fetch(reference=w1.chromosome, start=w1.begin, end=w1.end)
reads2 = bam.fetch(reference=w2.chromosome, start=w2.begin, end=w2.end)
for read in reads1:
# only add reads that span both sites
# includeunique identifier that is w1-w2-read_name
if read in reads2:
[(mod, positionsL, positionsR, quals), (mod2, positions2L, positions2R, quals2)] = get_modified_reference_positions(read, w1, w2)
for posL, posR, qual in zip(positionsL, positionsR, quals):
if posL is not None:
if abs(posL) <= 1000:
data.append((read.query_name,
'-' if read.is_reverse else '+',
posL,
qual,
mod,
w1.peak_strength,
w1.string + '-' + w2.string + '-' + read.query_name))
if abs(posR) <= 1000:
posR_adj = posR + 3000
data.append((read.query_name,
'-' if read.is_reverse else '+',
posR_adj,
qual,
mod,
w2.peak_strength,
w1.string + '-' + w2.string + '-' + read.query_name))
for posL, posR, qual in zip(positions2L, positions2R, quals2):
if posL is not None:
if abs(posL) <= 1000:
data.append((read.query_name,
'-' if read.is_reverse else '+',
posL,
qual,
mod2,
w1.peak_strength,
w1.string + '-' + w2.string + '-' + read.query_name))
if abs(posR) <= 1000:
posR_adj = posR + 3000
data.append((read.query_name,
'-' if read.is_reverse else '+',
posR_adj,
qual,
mod2,
w2.peak_strength,
w1.string + '-' + w2.string + '-' + read.query_name))
data_return = Methylation(
table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod', 'peak_strength', 'id'])
.astype(dtype={'mod': 'category', 'pos': 'int16', 'quality': 'int16'})# .astype(dtype={'mod': 'category', 'quality': 'float'})
.sort_values(['read_name', 'pos']),
data_type="ont-bam",
name='double peaks',
called_sites=len(data))
return data_return
def get_modified_reference_positions(read, w1, w2):
if (read.has_tag('Mm')) & (';' in read.get_tag('Mm')):
mod1 = read.get_tag('Mm').split(';')[0].split(',', 1)[0]
mod2 = read.get_tag('Mm').split(';')[1].split(',', 1)[0]
mod1_list = read.get_tag('Mm').split(';')[0].split(',', 1)
mod2_list = read.get_tag('Mm').split(';')[1].split(',', 1)
if len(mod1_list) > 1:
mod1_return = get_pos_prob(read, mod1, 0, w1, w2)
else:
mod1_return = (None, [None], [None], [None])
if len(mod2_list) > 1:
mod2_return = get_pos_prob(read, mod2, 1, w1, w2)
return (mod1_return, mod2_return)
else:
return (mod1_return, (None, [None], [None], [None]))
else:
return ((None, [None], [None], [None]), (None, [None], [None], [None]))
def get_pos_prob(read, basemod, index, w1, w2):
if '-' in basemod:
sys.exit("ERROR: modifications on negative strand currently unsupported.")
if 'A' not in basemod:
if 'C' not in basemod:
return (None, [None], [None])
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('Mm').split(';')[index].split(',')[1:]]
num_base = len(read.get_tag('Mm').split(';')[index].split(','))-1
Ml = read.get_tag('Ml')
if index == 0:
probabilities = np.array(Ml[0:num_base],dtype=int)
if index == 1:
probabilities = np.array(Ml[0-num_base:],dtype=int)
base_index = np.array([i for i, letter in enumerate(read.get_forward_sequence()) if letter == base])
# determine locations of the modified bases, where index_adj is the adjustment of the base_index
# based on the cumulative sum of the deltas
locations = np.cumsum(deltas)
# loop through locations and increment index_adj by the difference between the next location and current one + 1
# if the difference is zero, therefore, the index adjustment just gets incremented by one because no base should be skipped
index_adj = []
index_adj.append(locations[0])
i = 0
for i in range(len(locations) - 1):
diff = locations[i+1] - locations[i]
index_adj.append(index_adj[i] + diff + 1)
# get the indices of the modified bases
modified_bases = base_index[index_adj]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
# probabilities = probabilities[::-1]
# extract CpG sites only rather than all mC
keep = []
prob_keep = []
i = 0
seq = read.get_forward_sequence()
# deal with None for refpos from soft clipped / unaligned bases
if 'C' in basemod: #if 'C+m' in basemod:
for m in modified_bases:
if m < len(seq) - 1: # if modified C is not the last base in the read
if (refpos[m] is not None) & (refpos[m+1] is not None):
if seq[m + 1] == 'G':
if abs(refpos[m+1] - refpos[m]) == 1: # ensure there isn't a gap
keep.append(m)
prob_keep.append(i)
i=i+1
# for m6A no need to look at neighboring base; do need to remove refpos that are None
else:
for m in modified_bases:
if refpos[m] is not None:
keep.append(m)
prob_keep.append(i)
i=i+1
# adjust position and return both left anchored and right anchored positions
# account for strand of motif and flip pos if on - strand
if (w1.strand == '+'):
w1_pos = np.array(refpos[keep]) - round(((w1.end-w1.begin)/2 + w1.begin))
if (w1.strand == '-'):
w1_pos = -1*(np.array(refpos[keep]) - round(((w1.end-w1.begin)/2 + w1.begin)))
if (w2.strand == '+'):
w2_pos = np.array(refpos[keep]) - round(((w2.end-w2.begin)/2 + w2.begin))
if (w2.strand == '-'):
w2_pos = -1*(np.array(refpos[keep]) - round(((w2.end-w2.begin)/2 + w2.begin)))
return (basemod, w1_pos, w2_pos, probabilities[prob_keep])
# instead return with respec to motif center
def bin_qualities(all_data, mod):
'''
bin data for 10 A's as slide across the read
calculate the probability at least one base is methylated
'''
all_data_mod = all_data[all_data['mod'].str.contains(mod)]
all_data_mod.loc[:, 'quality'] = all_data_mod['quality'] / 255
read_ids = all_data_mod['id'].unique()
for r in read_ids:
subset = all_data_mod[all_data_mod['id'] == r]
qualities = subset['quality']
binned_qualities = qualities.rolling(window=20, center=True).apply(lambda b: prob_bin(b))
all_data_mod.loc[all_data_mod['id'] == r,'quality'] = binned_qualities
return all_data_mod
def prob_bin(bin):
# probability a base in the window is methylated by:
# calculating probability that no base in the window is methylated and then taking the complement
# treat p=1 as 254/255 for prevent log(0)
probs = [np.log(1-p) for p in bin if ((p < 1) and (p >= 0.5))] # only consider probabilities > 0.5 and handle 1 later
probs1 = [np.log(1-254/255) for p in bin if p == 1]
probsAll = probs + probs1
prob = 1 - np.exp(sum(probsAll))
return prob
def make_cluster_plot(all_data, all_data_C, thresh, out, name):
print(name + ' A: ' + str(all_data.shape[0]))
all_data_t = all_data[all_data['quality'] > thresh]
# require that quality > thresh be within 100 bp of the peak center on either side
peak = all_data_t[abs(all_data_t['pos'] <= 100)]
peak2 = all_data_t[abs(all_data_t['pos'] > 2900) & abs(all_data_t['pos'] < 3100) ]
peak_ids = peak['id'].unique()
peak_ids2 = peak2['id'].unique()
boolean_keep_series = (all_data_t.id.isin(peak_ids) | all_data_t.id.isin(peak_ids2)) #reads_keep
all_data_t_p = all_data_t[boolean_keep_series]
print(name + ' mA: ' + str(all_data_t.shape[0]))
print(name + ' mA with peak: ' + str(all_data_t_p.shape[0]))
all_data_pivoted = pd.pivot_table(all_data_t_p, values = 'quality', columns = 'pos', index='id') # index was read_name #p
r = range(-1000, 4000+1, 1)
for bp in r:
if bp not in all_data_pivoted.columns:
all_data_pivoted[bp] = np.nan
all_data_pivoted = all_data_pivoted.sort_index(axis=1)
all_data_pivoted_0 = all_data_pivoted.fillna(0)
cmapA = colors.LinearSegmentedColormap.from_list('custom A', ['white', '#053C5E'], N=255)
all_data_pivoted_mod_0_rolling = pd.DataFrame()
for i in range(0, all_data_pivoted_0.shape[0]):
all_data_pivoted_mod_0_rolling = all_data_pivoted_mod_0_rolling.append(all_data_pivoted_0.iloc[i,:].rolling(window=5).mean()) #33
all_data_pivoted_mod_0_rolling_0 = all_data_pivoted_mod_0_rolling.fillna(0)
k = KMeans(n_clusters=4, random_state=1) # try different numbers of clusters #2
k.fit(all_data_pivoted_mod_0_rolling_0)
# sort by left peak signal strength after labels
all_data_pivoted_0['left_sum'] = 0
subset_left_peak = all_data_pivoted_0.iloc[:,900:1100]
for idx, row in subset_left_peak.iterrows():
left_sum = row.sum()
all_data_pivoted_0.loc[idx, 'left_sum'] = left_sum
all_data_pivoted_0['labels'] = k.labels_
all_data_pivoted_0 = all_data_pivoted_0.sort_values(by=['labels', 'left_sum'], axis=0, ascending=False)
to_plot = all_data_pivoted_0
to_plot_2 = to_plot.loc[:, (to_plot.columns != 'labels') & (to_plot.columns != 'left_sum')]
fig=plt.figure()
g = sns.heatmap(to_plot_2, cmap=cmapA, xticklabels=False, yticklabels=False,
cbar_kws = dict(use_gridspec=False,location="top"))
plt.show()
fig.savefig(out + '/' + name + '_cluster_double_peak.4.rolling.05.thresh90.w20.peak.noSmooth.png', dpi=500)
# also plot 1D heatmap with the peak signal strength in order of reads shown
# updated to purple
cmapYellow = colors.LinearSegmentedColormap.from_list('custom yellow', ['white', '#610345'], N=200)
ordered_read_ids = to_plot.index.values
print('all clusters: ' + str(len(all_data_pivoted_mod_0_rolling_0.index.values)))
w1_signal_strength = []
w2_signal_strength = []
for r in ordered_read_ids:
read_data = all_data[all_data['id'] == r]
w1_peak = read_data[read_data['pos'] <= 1000]['peak_strength'].unique()
w2_peak = read_data[read_data['pos'] >= 2000]['peak_strength'].unique()
w1_signal_strength.append(w1_peak[0])
w2_signal_strength.append(w2_peak[0])
fig = plt.figure()
x = np.linspace(0, len(ordered_read_ids), num=len(ordered_read_ids))
y = pd.Series(w1_signal_strength)
fig, (ax,ax2) = plt.subplots(nrows=2, sharex=True)
extent = [x[0]-(x[1]-x[0])/2., x[-1]+(x[1]-x[0])/2.,0,1]
im = ax.imshow(y[np.newaxis,:], cmap=cmapYellow, aspect="auto", extent=extent)
divider = make_axes_locatable(ax)
cax = divider.append_axes('top', size='5%', pad=0.25)
fig.colorbar(im, cax=cax, orientation='horizontal')
ax.set_yticks([])
ax.set_xlim(extent[0], extent[1])
ax2.plot(x,y,'o',ms=0.5,color='#2D1E2F')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_ticks([])
plt.tight_layout()
plt.show()
fig.savefig(out + '/' + name + '_peak_signal_w1_t90.png', dpi=300)
# window 2
# from positions 2000-4000
fig = plt.figure()
x = np.linspace(0, len(ordered_read_ids), num=len(ordered_read_ids))
y = pd.Series(w2_signal_strength)
fig, (ax,ax2) = plt.subplots(nrows=2, sharex=True)
extent = [x[0]-(x[1]-x[0])/2., x[-1]+(x[1]-x[0])/2.,0,1]
im = ax.imshow(y[np.newaxis,:], cmap=cmapYellow, aspect="auto", extent=extent)
divider = make_axes_locatable(ax)
cax = divider.append_axes('top', size='5%', pad=0.25)
fig.colorbar(im, cax=cax, orientation='horizontal')
ax.set_yticks([])
ax.set_xlim(extent[0], extent[1])
ax2.plot(x,y,'o',ms=0.5,color='#2D1E2F')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_ticks([])
plt.tight_layout()
plt.show()
fig.savefig(out + '/' + name + '_peak_signal_w2_t90.png', dpi=300)
peak_ids = peak['id'].unique()
boolean_keep_series = all_data_t.id.isin(peak_ids)
C_data_plot = pd.DataFrame()
all_data_C_t = all_data_C[all_data_C['quality'] > thresh]
id_list = []
quality_list = []
pos_list = []
for r in ordered_read_ids:
sub = all_data_C_t[all_data_C_t['id'] == r]
if sub is None:
print('here')
else:
id_list.append(sub['id'].values.tolist())
quality_list.append(sub['quality'].values.tolist())
pos_list.append(sub['pos'].values.tolist())
id_flat_list = [item for sublist in id_list for item in sublist]
quality_flat_list = [item for sublist in quality_list for item in sublist]
pos_flat_list = [item for sublist in pos_list for item in sublist]
C_data_plot = pd.DataFrame({'id': id_flat_list, 'quality': quality_flat_list, 'pos': pos_flat_list})
C_data_plot_pivoted = pd.pivot_table(C_data_plot, values = 'quality', columns = 'pos', index='id')
r = range(-1000, 4000+1, 1)
for bp in r:
if bp not in C_data_plot_pivoted.columns:
C_data_plot_pivoted[bp] = np.nan
C_data_plot_pivoted = C_data_plot_pivoted.sort_index(axis=1)
C_data_plot_pivoted_0 = C_data_plot_pivoted.fillna(0)
cmapC = colors.LinearSegmentedColormap.from_list('custom A', ['white', '#BB4430'], N=255)
fig=plt.figure()
g = sns.heatmap(C_data_plot_pivoted_0, cmap=cmapC, xticklabels=False, yticklabels=False,
cbar_kws = dict(use_gridspec=False,location="top"))
plt.show()
fig.savefig(out + '/' + name + '_cluster_double_peak.4.rolling.05.thresh90.w20.peak.noSmooth.C.png', dpi=500)
def main():
bams = ["prod_ctcf_mod_mappings_merge.sorted.bam",
"prod_free_Hia5_mod_mappings.sorted.bam",
"prod_IgG_mod_mappings.sorted.bam",
"prod_untreated_mod_mappings.sorted.bam"]
names = ['CTCF', 'Hia5', 'IgG', 'untreated']
bed = "intersection.motifs.chip.formatted.chm13.bed"
top_bed_path = "top_ctcf_peaks_motif.chm13.bed"
out | |
import os
import sys
import json
import torch
import random
import inspect
import requests
import argparse
import importlib
import numpy as np
from pathlib import Path
from itertools import product
from tqdm import tqdm
from PyInquirer import prompt, Separator, Validator, ValidationError
DATA_FOLDER = 'data/'
class FastaValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
data_folder = os.path.abspath(DATA_FOLDER)
my_file = Path(text)
if not my_file.is_file():
raise ValidationError(
message='Please enter a valid file',
cursor_position=len(text))
elif my_file.suffix not in ['.fa', '.fna', '.fsa', '.fasta']:
raise ValidationError(
message='Please enter a FASTA file. Extensions: .fa .fna .fsa .fasta',
cursor_position=len(text))
elif not str(os.path.dirname(my_file.absolute())) == data_folder:
raise ValidationError(
message='Make sure your data is in the correct folder',
cursor_position=len(text)
)
class AnnotationValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
data_folder = os.path.abspath(DATA_FOLDER)
my_file = Path(text)
if not my_file.is_file():
raise ValidationError(
message='Please enter a valid file',
cursor_position=len(text))
elif my_file.suffix not in ['.sga', '.bed']:
raise ValidationError(
message='Please enter an annotation file. Extensions: .sga and .bed',
cursor_position=len(text))
elif not str(os.path.dirname(my_file.absolute())) == data_folder:
raise ValidationError(
message='Make sure your data is in the correct folder',
cursor_position=len(text)
)
class FolderValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
my_folder = Path(text)
if my_folder.exists():
if not my_folder.is_dir():
raise ValidationError(
message='Please enter a valid folder',
cursor_position=len(text))
else:
os.makedirs(my_folder)
class IsFileValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
my_file = Path(text)
if not my_file.exists():
raise ValidationError(
message='Please enter a file that exists',
cursor_position=len(text))
class FileExistsValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
my_file = Path(text)
if my_file.exists():
raise ValidationError(
message='Please enter a file that does not exist',
cursor_position=len(text))
class ConfigValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
config = Path(text)
if config.is_file():
if not config.suffix == '.json':
raise ValidationError(
message='Please enter a configuration file. Extension: .json',
cursor_position=len(text))
else:
raise ValidationError(
message='Please enter a valid file',
cursor_position=len(text))
class PositiveValidator(Validator):
def validate(self, document):
if(type(document) == int):
text = str(document)
elif(type(document) == float):
text = str(document)
elif(type(document) == str):
text = document
else:
text = document.text
try:
if not float(text) > 0:
raise ValidationError(
message='Please enter a positive number',
cursor_position=len(text))
except:
raise ValidationError(
message='Input %s was not a valid number' % text,
cursor_position=len(text))
class SeedValidator(Validator):
def validate(self, document):
if(type(document) == int):
text = str(document)
elif(type(document) == str):
text = document
else:
text = document.text
try:
if not int(text) >= 0:
raise ValidationError(
message='Please enter a positive number or use 0 to generate random seed',
cursor_position=len(text))
except:
raise ValidationError(
message='Seed was not a valid number',
cursor_position=len(text))
class FunctionValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
if not text in Helper._NN_OUTPUT_FUNCTIONS:
raise ValidationError(
message=f'Please enter a valid output function. Functions: {Helper._NN_OUTPUT_FUNCTIONS}',
cursor_position=len(text))
class ModuleValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
if not text in Helper._NN_MODULES:
raise ValidationError(
message=f'Please enter a valid module. Functions: {Helper._NN_MODULES}',
cursor_position=len(text))
class OptimizerValidator(Validator):
def validate(self, document):
if(type(document) == str):
text = document
else:
text = document.text
if not text in Helper._NN_OPTIMIZERS:
raise ValidationError(
message=f'Please enter a valid optimizer. Functions: {Helper._NN_OPTIMIZERS}',
cursor_position=len(text))
class Helper:
DNA_DICT = {}
_k = 1
CONF_DICT: dict
_FILE_TYPES = ['sld', 'fasta', 'kmer']
_EXPERIMENT_CONFIGURATION_FILENAME = 'ExperimentConfiguration.json'
_EXPERIMENT_MODULE_ARGS_FILENAME = 'module.args'
_EXPERIMENT_OPTIMIZER_ARGS_FILENAME = 'optimizer.args'
_NN_OUTPUT_FUNCTIONS = ['sigmoid', 'softmax']
_NN_MODULES: dict
_NN_MODULE_ARGS = {}
_NN_OPTIMIZER_ARGS = {}
_NN_OPTIMIZERS = ['Adam', 'Adadelta', 'Adagrad', 'AdamW', 'SparseAdam', 'Adamax', 'ASGD', 'LBFGS', 'NAdam', 'RAdam', 'RMSprop', 'Rprop', 'SGD'] # Algorithms: https://pytorch.org/docs/stable/optim.html
_F5_SPECIES = ['human', 'mouse', 'rat', 'dog', 'chicken', 'rhesus']
_EPD_DATABASES = ['human', 'human_nc', 'M_mulatta', 'mouse', 'mouse_nc', 'R_norvegicus', 'C_familiaris', 'G_gallus', 'drosophila', 'A_mellifera', 'zebrafish', 'worm', 'arabidopsis', 'Z_mays', 'S_cerevisiae', 'S_pombe', 'P_falciparum']
_EPD_TATA_FILTERS = ['all', 'with', 'without']
_DATA_FOLDER = 'data/'
_DICTKEY_NN_OUTPUT_FUNCTION = 'function'
_DICTKEY_NN_MODULE = 'module'
_DICTKEY_NN_TRAIN_ARGS = 'training_arguments'
_DICTKEY_NN_MAX_EPOCHS = 'max_epochs'
_DICTKEY_NN_PATIENCE = 'patience'
_DICTKEY_NN_LEARNING_RATE = 'learning_rate'
_DICTKEY_NN_BATCH_SIZE = 'batch_size'
_DICTKEY_NN_OPTIMIZER = 'optimizer'
_DICTKEY_INPUT_FILE = 'input'
_DICTKEY_OUTPUT_FILE = 'output'
_DICTKEY_ANNOTATIONS = 'annotations'
_DICTKEY_CONFIGURATION = 'dataset_configuration'
_DICTKEY_DATASET_TYPE = 'dataset'
_DICTKEY_EXPERIMENT_FOLDER = 'experiment'
_DICTKEY_SEQ_UPSTREAM_LEN = 'sequence_upstream_length'
_DICTKEY_SEQ_DOWNSTREAM_LEN = 'sequence_downstream_length'
_DICTKEY_STRIDE = 'stride'
_DICTKEY_ERROR_TYPE = 'error_type'
_DICTKEY_ERROR_MARGIN = 'error_margin'
_DICTKEY_SEED = 'seed'
_DICTKEY_F5_SPECIES = 'species'
_DICTKEY_EPD_DATABASE = 'database'
_DICTKEY_EPD_TATA_FILTER = 'tata_filter'
_DICTKEY_INPUT_TYPE = 'input_type'
_DICTKEY_OUTPUT_TYPE = 'output_type'
_overwrite = False
_maxepochs_default = 100
_batchsize_default = 16
_learningrate_default = 0.001
_patience_default = 10
_upstream_default = 1000 # promoter upstream length for the annotations
_downstream_default = 400 # promoter downstream length for the annotations
_stride_default = 50 # step size for the number of nucleotides to skip while moving the window
_error_type_default = 'tss-proximity' # error types: proximity to TSS, sequence overlap
_IO_ERROR_TYPES = ['sequence-overlap', 'tss-proximity', 'seq-proximity']
_error_margin_default = [100] # list, if 1 value then its TSS proximity, if 2 values then its sequence overlap (from upstream, from downstream)
_DATASET_CHOICES = ['IO', 'BIO', 'BME', 'CBG', 'CBPS', 'LITERATURE'] # "CBG" cluster by gene, "CBPS" cluster by promoter similarity
def __init__(self):
self.create_kmers()
self._NN_MODULES = self.get_modules()
dataset_configuration = {self._DICTKEY_SEQ_UPSTREAM_LEN: self._upstream_default,
self._DICTKEY_SEQ_DOWNSTREAM_LEN: self._downstream_default,
self._DICTKEY_STRIDE: self._stride_default,
self._DICTKEY_SEED: 0}
self.CONF_DICT = {self._DICTKEY_CONFIGURATION: dataset_configuration}
def create_kmers(self):
bases=['A','T','G','C']
for i, p in enumerate(product(bases, repeat=self._k)):
self.DNA_DICT[''.join(p)] = i
def load_experiment(self):
experiment_folder = self.CONF_DICT[self._DICTKEY_EXPERIMENT_FOLDER]
config_file = os.path.join(experiment_folder, self._EXPERIMENT_CONFIGURATION_FILENAME)
with open(config_file) as f:
self.CONF_DICT = json.load(f)
module_args_file = os.path.join(experiment_folder, self._EXPERIMENT_MODULE_ARGS_FILENAME)
if os.path.isfile(module_args_file):
with open(module_args_file) as f:
self._NN_MODULE_ARGS = json.load(f)
self._NN_MODULE_ARGS = {f'module__{k}': v for k, v in self._NN_MODULE_ARGS.items()}
optimizer_args_file = os.path.join(experiment_folder, self._EXPERIMENT_OPTIMIZER_ARGS_FILENAME)
if os.path.isfile(optimizer_args_file):
with open(optimizer_args_file) as f:
self._NN_OPTIMIZER_ARGS = json.load(f)
self._NN_OPTIMIZER_ARGS = {f'optimizer__{k}': v for k, v in self._NN_OPTIMIZER_ARGS.items()}
#TODO: validate loaded json
def read_create_arguments(self):
parser = argparse.ArgumentParser(description='Create a dataset')
parser.add_argument('-i', f'--{self._DICTKEY_INPUT_FILE}', metavar='input file', required=True,
type=str, help='path to the input genome')
parser.add_argument('-a', f'--{self._DICTKEY_ANNOTATIONS}', metavar='annotations file', required=True,
type=str, help='path to the TSS annotations for the genome')
parser.add_argument('-t', f'--{self._DICTKEY_DATASET_TYPE}', metavar='dataset type', required=True,
type=str, help='Type of dataset to be created', choices=self._DATASET_CHOICES)
parser.add_argument('-c', f'--{self._DICTKEY_CONFIGURATION}', metavar='configuration file', required=False,
type=str, help='path to the configuration file. If not supplied, defaults will be used')
parser.add_argument('-e', f'--{self._DICTKEY_EXPERIMENT_FOLDER}', metavar='experiment folder', required=True,
type=str, help='path to the experiment folder')
args = parser.parse_args(sys.argv[3:])
input_file = args.__dict__[self._DICTKEY_INPUT_FILE]
FastaValidator().validate(input_file)
self.CONF_DICT[self._DICTKEY_INPUT_FILE] = os.path.relpath(input_file)
annotations_file = args.__dict__[self._DICTKEY_ANNOTATIONS]
AnnotationValidator().validate(annotations_file)
self.CONF_DICT[self._DICTKEY_ANNOTATIONS] = os.path.relpath(annotations_file)
self.validate_config(args.__dict__[self._DICTKEY_DATASET_TYPE], args.__dict__[self._DICTKEY_CONFIGURATION])
experiment_folder = args.__dict__[self._DICTKEY_EXPERIMENT_FOLDER]
FolderValidator().validate(experiment_folder)
self.CONF_DICT[self._DICTKEY_EXPERIMENT_FOLDER] = os.path.abspath(experiment_folder)
def read_train_arguments(self):
parser = argparse.ArgumentParser(description='Train an experiment')
parser.add_argument('-e', f'--{self._DICTKEY_EXPERIMENT_FOLDER}', metavar='experiment folder', required=True,
type=str, help='Path to the experiment folder')
parser.add_argument('-x', f'--{self._DICTKEY_NN_MAX_EPOCHS}', metavar='max epochs', required=False, default=self._maxepochs_default,
type=int, help='Maximum number of epochs')
parser.add_argument('-p', f'--{self._DICTKEY_NN_PATIENCE}', metavar='patience', required=False, default=self._patience_default,
type=int, help='Stop training after this many epochs without progress')
parser.add_argument('-l', f'--{self._DICTKEY_NN_LEARNING_RATE}', metavar='learning rate', required=False, default=self._learningrate_default,
type=int, help='Value for the learning rate parameter')
parser.add_argument('-b', f'--{self._DICTKEY_NN_BATCH_SIZE}', metavar='batch size', required=False, default=self._batchsize_default,
type=int, help='Value for the batch size parameter')
parser.add_argument('-t', f'--{self._DICTKEY_NN_OPTIMIZER}', metavar='optimizer', required=False, default=self._NN_OPTIMIZERS[0],
type=str, help='Optimizer for training the neural network', choices=self._NN_OPTIMIZERS)
parser.add_argument('-f', f'--{self._DICTKEY_NN_OUTPUT_FUNCTION}', metavar='output function', required=False, default=self._NN_OUTPUT_FUNCTIONS[0],
type=str, help='Function of the last layer of the neural network', choices=self._NN_OUTPUT_FUNCTIONS)
parser.add_argument('-m', f'--{self._DICTKEY_NN_MODULE}', metavar='pytorch neural network module', required=False, default='dprom',
type=str, help='Pytorch neural network architecture module to train', choices=self._NN_MODULES)
args = parser.parse_args(sys.argv[2:])
experiment_folder = args.__dict__[self._DICTKEY_EXPERIMENT_FOLDER]
FolderValidator().validate(experiment_folder)
self.CONF_DICT[self._DICTKEY_EXPERIMENT_FOLDER] = experiment_folder
for k in [self._DICTKEY_NN_MAX_EPOCHS, self._DICTKEY_NN_PATIENCE,
self._DICTKEY_NN_LEARNING_RATE, self._DICTKEY_NN_BATCH_SIZE]:
PositiveValidator().validate(args.__dict__[k])
args.__dict__.pop(self._DICTKEY_EXPERIMENT_FOLDER)
return args.__dict__
def read_convert_arguments(self):
parser = argparse.ArgumentParser(description='File conversion system')
parser.add_argument('-i', f'--{self._DICTKEY_INPUT_FILE}', metavar='input file', required=True,
type=str, help='Path to the input file')
parser.add_argument('-o', f'--{self._DICTKEY_OUTPUT_FILE}', metavar='output file', required=True,
type=str, help='Path to save the converted file')
parser.add_argument('-t', f'--{self._DICTKEY_INPUT_TYPE}', metavar='input type', required=True, default=self._FILE_TYPES[0],
type=str, help='Type of the input file', choices=self._FILE_TYPES)
parser.add_argument('-e', f'--{self._DICTKEY_OUTPUT_TYPE}', metavar='output type', required=True, default=self._FILE_TYPES[1],
type=str, help='Type of the output file', choices=self._FILE_TYPES)
args = parser.parse_args(sys.argv[2:])
input_file = args.__dict__[self._DICTKEY_INPUT_FILE]
if input_file:
IsFileValidator().validate(input_file)
output_file = args.__dict__[self._DICTKEY_OUTPUT_FILE]
if output_file:
FileExistsValidator().validate(output_file)
input_type = args.__dict__[self._DICTKEY_INPUT_TYPE]
output_type = args.__dict__[self._DICTKEY_OUTPUT_TYPE]
if input_type == output_type:
raise RuntimeError(msg="No conversion necessary. Input type = Output type")
self.CONF_DICT = args.__dict__
def read_epd_download_arguments(self):
parser = argparse.ArgumentParser(description='Download annotations from Eukaryotic Promoter Database (EPDnew)')
parser.add_argument('-o', f'--{self._DICTKEY_OUTPUT_FILE}', metavar='output file', required=False,
type=str, help='Path where the downloaded file will be saved')
parser.add_argument('-d', f'--{self._DICTKEY_EPD_DATABASE}', metavar='database', required=True, default=self._EPD_DATABASES[0],
type=str, help='Database (species) to query', choices=self._EPD_DATABASES)
parser.add_argument('-t', f'--{self._DICTKEY_EPD_TATA_FILTER}', metavar='TATA motif filter', required=True, default=self._EPD_TATA_FILTERS[0],
type=str, help='Filter promoters by TATA motif', choices=self._EPD_TATA_FILTERS)
args = parser.parse_args(sys.argv[3:])
output_file = args.__dict__[self._DICTKEY_OUTPUT_FILE]
if output_file:
FolderValidator().validate(os.path.dirname(output_file))
return args
def read_f5_download_arguments(self):
parser = argparse.ArgumentParser(description='Download annotations from Riken Fantom (5) project')
parser.add_argument('-o', f'--{self._DICTKEY_OUTPUT_FILE}', metavar='output file', required=False,
type=str, help='Path where the downloaded file will be saved')
parser.add_argument('-s', | |
search_locations(self, search_token, query="name"):
"""Function to search for location data from the database based on a given search token (or from the dictionary if in local mode)
Searches based on either location name or by author name or to just retrieve all location data from the DB
By default it searches based on location name unless specified for author name or all.
Args:
search_token (str): Name of what is being searched for
query (str): Query method, i.e. search by location name or by author name (Default: location name)
Returns:
list or bool: Returns a list of location data in a dictionary/map format and returns False if it's an illegal query
"""
# Handle case of unsupported query data
if query not in self.query_types:
print("Unsupported query type {}, supported query types are {}".format(query, ",".join(self.query_types)))
return False
# List to hold all found results
found_locations = []
# Local dictionary, dev mode
if self.local:
# Traverse all location entries to search for the proper location
for entry in self.locations:
# Case for serach by location name
if query == "name":
# Soft search, check if search key appears in location name
if search_token in entry["name"]:
found_locations.append(entry)
# Case for search by users name
elif query == "author":
# Strict search, search key must be identical to current author name
if search_token == entry["author"]["name"]:
found_locations.append(entry)
# SQL DATABASE METHOD
else:
try:
# Connect to the database
con = psycopg2.connect(self.config.DATABASE_URL, sslmode='require')
# Create cursor to perform commands
cur = con.cursor()
# Search for location based on location name
if query == "name":
cur.execute("SELECT name, author, x_coord, y_coord, z_coord, description FROM {} WHERE UPPER(name) LIKE UPPER('%{}%')".format(self.location_table_name, search_token))
# Search for location based on author name
elif query == "author":
cur.execute("SELECT name, author, x_coord, y_coord, z_coord, description FROM {} WHERE UPPER(author) LIKE UPPER('%{}%')".format(self.location_table_name, search_token))
# Retrieve all locations
elif query == "all":
cur.execute("SELECT name, author, x_coord, y_coord, z_coord, description FROM {}".format(self.location_table_name))
# Fetch all results
rows = cur.fetchall()
# Close DB connection
con.close()
except Exception as e:
print("ERROR: Unable to search for locations with key '{}' in mode '{}' due to: {}".format(search_token, query, e))
return False
# Iterate over each search result, create dict and add to list
for row in rows:
# Create new dict for the retrieved location
searched_location_data = {
"name" : self.quote_escape(row[0], escape=False),
"author" : {"name" : self.quote_escape(row[1], escape=False)},
"coords" : { "x" : row[2], "y" : row[3], "z" : row[4] },
"desc" : self.quote_escape(row[5], escape=False)
}
# Append to list
found_locations.append(searched_location_data)
return found_locations
# Function to create string of location data in a nice format
def location_str(self, location):
"""Function that creates a string representation for the given location data
Args:
location (dict): Location data that is to be stringified
Returns:
str: Returns the given location in a neat string format
"""
str = "Name: {}\n".format(location["name"])
str += "Author: {}\n".format(location["author"]["name"])
str += "Coordinates: ({}, {})\n".format(location["coords"]["x"], location["coords"]["y"])
# If z coord is specified, print it out
if location["coords"]["z"] is not None:
str += "Altitude: {}\n".format(location["coords"]["z"])
if location["desc"] is not None:
str += "Description: {}\n".format(location["desc"])
return str
# Function to create embed object for the location's information
def location_embed(self, location):
"""Function to create a discord embed object for displaying data for a single location
Args:
location (dict): Location data in dict format
Returns:
embed: Returns a discord embed object of the location's information, nicely formatted to be sent to the text channel
"""
embed = discord.Embed(
title = location["name"],
color = discord.Color.green()
)
embed.add_field(name="Author", value=location["author"]["name"], inline=False)
embed.add_field(name="Coordinates (x, y, z)", value="{}, {}, {}".format(location["coords"]["x"], location["coords"]["z"], location["coords"]["y"]), inline=False)
if location["desc"] is not None:
embed.add_field(name="Description", value=location["desc"], inline=False)
return embed
# Function to print out location data in a nice format
def short_location_str(self, location):
"""Function to create a short version of a string representation for a single location data
Args:
location (dict): Location data in dict format
Returns:
str: Returns the short string to represent the location data
"""
# Format follows as so
# <name_of_location> <author> (x, y)
str = "{:<15} {:<15} ({}, {})".format(location["name"], location["author"]["name"], location["coords"]["x"], location["coords"]["y"])
return str
# Function to return a list of location data that is currently being stored
def location_list(self):
"""Function to print the entire list of locations currently being stored in this object's list of locations
Meant to be used while in dev mode...
Returns:
str: Returns string that represents the list of all locations in the list
"""
str = "List of Registered Locations...\n\n"
for entry in self.locations:
str += self.short_location_str(entry) + "\n"
return str
# Function to return a list of locations stored within an embed
def location_list_embed(self, collection=None, search_token=None, query=None):
"""Function to create a discord embed object for displaying a list of location data that is in the database
Will create a list for all entered location data if no list, search_token and query were not provided.
If a collection of locations was provided and search token and query were provided, it creates a list of locations based on the search result
Args:
collection (list): List of location data that is pre provided (Default: None)
search_token (str): Token to base the search for locations (Default: None)
query (str): How to search for the locations, if searching (Default: None)
Returns:
embed: Returns a discord embed object of the location's information, nicely formatted to be sent to the text channel
"""
# If no location list was provided, use the entire location list
if collection is None:
if self.local:
collection = self.locations
else:
collection = self.search_locations('', query="all")
desc = "List of all registered locations..."
else:
desc = "Search results for {}: '{}'".format(query, search_token)
desc += "\n*(x, y, z) - author*"
embed = discord.Embed(
title = "Locations",
description = desc,
color = discord.Color.green()
)
# In case search for locations on DB side failed
if collection == False:
embed.add_field(name="Error retrieving locations...", value="...", inline=False)
return embed
# Traverse each location entry and create new embed field to add into list
for entry in collection:
embed.add_field(name=entry["name"], value="({}, {}, {}) - {}".format(entry["coords"]["x"], entry["coords"]["z"], entry["coords"]["y"], entry["author"]["name"]), inline=False)
if len(collection) == 0:
embed.add_field(name="No locations found...", value="...", inline=False)
return embed
# Function to calculate the distance between two given location points
def distance(self, name1, name2):
"""Function to calculate the distance between two locations with given names
Args:
name1 (str): Name of the first location to consider in the calculation
name2 (str): Name of the second location to consider in the calculation
Returns:
bool, float or str: Returns whether the calculation was successful and the calculated distance or a message as to why the calculation failed
"""
# Retrieve both locations from database
loc1 = self.get_location_data(name1)
loc2 = self.get_location_data(name2)
# Ensure location 1 was found properly
if loc1 is None:
return False, "First location could not be found"
# Ensure location 2 was found properly
if loc2 is None:
return False, "Second location could not be found"
# Grabbing the x and y of both locations and putting them in tuples for ease of use
p1 = (loc1["coords"]["x"], loc1["coords"]["y"])
p2 = (loc2["coords"]["x"], loc2["coords"]["y"])
print("p1 - {}".format(p1))
print("p2 - {}".format(p2))
# Calculate the distance between the two points
distance = math.sqrt( ((p1[0]-p2[0])**2)+((p1[1]-p2[1])**2) )
print("Calculated distance: {}".format(distance))
return True, distance
def navigation(self, loc1, loc2):
"""Function to calculate the directions between two locations with given names
Args:
pointA (dict): First location to consider in the calculation
pointB (dict): Second location to consider in the calculation
Returns:
bool, float or str: Returns whether the calculation was successful and the calculated angle/navigation or a message as to why the calculation failed
"""
# Grabbing the x and y of both locations and putting them in tuples for ease of use
p1 = (loc1["coords"]["x"], loc1["coords"]["y"])
p2 = (loc2["coords"]["x"], loc2["coords"]["y"])
print("p1 - {}".format(p1))
print("p2 - {}".format(p2))
# Calculate deltas between x and y coords
| |
any command line parameters.'}),
('pid', {'ptype': 'int', 'doc': 'The process ID.'}),
('time', {'ptype': 'time', 'doc': 'The start time for the process.'}),
('user', {'ptype': 'inet:user', 'doc': 'The user name of the process owner.'}),
('path', {'ptype': 'file:path', 'doc': 'The path to the executable of the process.'}),
('src:exe', {'ptype': 'file:path', 'doc': 'The executable which created the process.'}),
('src:proc', {'ptype': 'it:exec:proc', 'doc': 'The process which created the process.'}),
)),
('it:exec:pipe', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that created the named pipe.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the named pipe was created.'}),
('name', {'ptype': 'it:dev:pipe', 'doc': 'The named pipe string.'}),
)),
('it:exec:mutex', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that created the mutex.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the mutex was created.'}),
('name', {'ptype': 'it:dev:mutex', 'doc': 'The mutex string.'}),
)),
('it:exec:url', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that requested the URL.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the URL was requested.'}),
('url', {'ptype': 'inet:url', 'doc': 'The URL that was requested.'}),
('ipv4', {'ptype': 'inet:ipv4', 'doc': 'The IPv4 address of the host during URL retrieval.'}),
('ipv6', {'ptype': 'inet:ipv6', 'doc': 'The IPv6 address of the host during URL retrieval.'}),
)),
('it:exec:bind:tcp', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that bound the listening TCP port.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that bound the port. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that bound the port. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the port was bound.'}),
('port', {'ptype': 'inet:port', 'doc': 'The bound (listening) TCP port.'}),
('ipv4', {'ptype': 'inet:ipv4', 'doc': 'The IPv4 address specified to bind().'}),
('ipv6', {'ptype': 'inet:ipv6', 'doc': 'The IPv6 specified to bind().'}),
)),
('it:exec:bind:udp', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that bound the listening UDP port.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that bound the port. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that bound the port. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the port was bound.'}),
('port', {'ptype': 'inet:port', 'doc': 'The bound (listening) UDP port.'}),
('ipv4', {'ptype': 'inet:ipv4', 'doc': 'The IPv4 specified to bind().'}),
('ipv6', {'ptype': 'inet:ipv6', 'doc': 'The IPv6 specified to bind().'}),
)),
('it:fs:file', {'ptype': 'guid'}, (
('host', {'ptype': 'it:host', 'doc': 'The host containing the file.'}),
('path', {'ptype': 'file:path', 'doc': 'The path for the file.'}),
('path:dir', {'ptype': 'file:path', 'doc': 'The parent directory of the file path (parsed from :path).', 'ro': 1}),
('path:ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name (parsed from :path).', 'ro': 1}),
('path:base', {'ptype': 'file:base', 'doc': 'The final component of the file path (parsed from :path).', 'ro': 1}),
('file', {'ptype': 'file:bytes', 'doc': 'The file on the host.'}),
('ctime', {'ptype': 'time', 'doc': 'The file creation time.'}),
('mtime', {'ptype': 'time', 'doc': 'The file modification time.'}),
('atime', {'ptype': 'time', 'doc': 'The file access time.'}),
('user', {'ptype': 'inet:user', 'doc': 'The owner of the file.'}),
('group', {'ptype': 'inet:user', 'doc': 'The group owner of the file.'}),
)),
# FIXME seed for hex file bytes
('it:exec:file:add', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that created the new file.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the file was created.'}),
('path', {'ptype': 'file:path', 'doc': 'The path where the file was created.'}),
('path:dir', {'ptype': 'file:path', 'doc': 'The parent directory of the file path (parsed from :path).', 'ro': 1}),
('path:ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name (parsed from :path).', 'ro': 1}),
('path:base', {'ptype': 'file:base', 'doc': 'The final component of the file path (parsed from :path).', 'ro': 1}),
('file', {'ptype': 'file:bytes', 'doc': 'The file that was created.'}),
)),
('it:exec:file:del', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that deleted the file.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the file was deleted.'}),
('path', {'ptype': 'file:path', 'doc': 'The path where the file was deleted.'}),
('path:dir', {'ptype': 'file:path', 'doc': 'The parent directory of the file path (parsed from :path).', 'ro': 1}),
('path:ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name (parsed from :path).', 'ro': 1}),
('path:base', {'ptype': 'file:base', 'doc': 'The final component of the file path (parsed from :path).', 'ro': 1}),
('file', {'ptype': 'file:bytes', 'doc': 'The file that was deleted.'}),
)),
('it:exec:file:read', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that read the file.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the file was read.'}),
('path', {'ptype': 'file:path', 'doc': 'The path where the file was read.'}),
('path:dir', {'ptype': 'file:path', 'doc': 'The parent directory of the file path (parsed from :path).', 'ro': 1}),
('path:ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name (parsed from :path).', 'ro': 1}),
('path:base', {'ptype': 'file:base', 'doc': 'The final component of the file path (parsed from :path).', 'ro': 1}),
('file', {'ptype': 'file:bytes', 'doc': 'The file that was read.'}),
)),
('it:exec:file:write', {'ptype': 'guid'}, (
('proc', {'ptype': 'it:exec:proc', 'doc': 'The main process executing code that wrote to / modified the existing file.'}),
('host', {'ptype': 'it:host',
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.'}),
('exe', {'ptype': 'file:bytes',
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe referenced in :proc, if present.'}),
('time', {'ptype': 'time', 'doc': 'The time the file was written to / modified.'}),
('path', {'ptype': 'file:path', 'doc': 'The path where the file was modified.'}),
('path:dir', {'ptype': 'file:path', 'doc': 'The parent directory of the file path (parsed from :path).', 'ro': 1}),
('path:ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name (parsed from :path).', 'ro': 1}),
('path:base', {'ptype': 'file:base', 'doc': 'The final component of the file path (parsed from :path).', 'ro': 1}),
('file', {'ptype': 'file:bytes', 'doc': 'The file that was modified.'}),
)),
('it:exec:reg:get', {'ptype': 'guid'}, (
| |
import itertools
import json
import operator
import pathlib
import pickle
import random
import shutil
from zplib import datafile
class _DataclassBase:
"""Basic methods for "data classes" that have a defined set of fields with
which to compare and hash class instances."""
_FIELDS = () # subclasses should provide a tuple of field names
def _cmpkey(self):
return tuple(getattr(self, field) for field in self._FIELDS)
def _compare(self, other, method):
try:
return method(self._cmpkey(), other._cmpkey())
except (AttributeError, TypeError):
# _cmpkey not implemented, or returns different type, so can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __eq__(self, other):
return self._compare(other, operator.eq)
def __ge__(self, other):
return self._compare(other, operator.ge)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ne__(self, other):
return self._compare(other, operator.ne)
def __hash__(self):
return hash(self._cmpkey())
def __repr__(self):
return self.__class__.__qualname__ + '(' + ', '.join([f"{getattr(self, f)!r}" for f in self._FIELDS]) + ')'
class Experiment(_DataclassBase):
"""Class that represents an experiment.
Experiment instances provide an iterable API and a dict-like API to
access positions by either lexicographical order or name, respectively:
experiment = Experiment('/path/to/experiment/root')
for position in experiment:
# do something
num_positions = len(experiment)
pos_3 = experiment.positions['003']
Experiments also support lazy-loading of the metadata file as needed via
the metadata attribute.
"""
_FIELDS = ('path',)
def __init__(self, path, annotation_dir='annotations'):
"""Create an Experiment instance for data at a given path.
Parameters:
path: path to experiment root directory (containing
experiment_metadata.json)
annotation_dir: subdirectory from which to load position and
timepoint annotations.
"""
super().__init__()
self.path = pathlib.Path(path).resolve()
self.annotation_dir = self.path / annotation_dir
self.metadata_file = self.path / 'experiment_metadata.json'
self.name = self.path.name
self._positions = None
self._metadata = None
def __repr__(self):
return self.__class__.__qualname__ + f'({self.name!r})'
def __iter__(self):
return iter(self.positions.values())
def __len__(self):
return len(self.positions)
def __contains__(self, position):
# Below will be False if dict.get returns None, i.e. don't have a position of that name
# otherwise, will compare positions for equality, which chains up to comparing experiments too...
return position is not None and self.positions.get(getattr(position, 'name', None)) == position
@property
def metadata(self):
"""Metadata dict for the experiment, read from backing file if not
presently cached."""
if self._metadata is None:
self._metadata = json.loads(self.metadata_file.read_text())
return self._metadata
def write_metadata(self):
"""Write the current metadata dictionary back to the metadata file."""
datafile.json_encode_atomic_legible_to_file(self.metadata, self.metadata_file)
@property
def positions(self):
""""Dict of Position objects associated with the Experiment, read from
backing file if not presently cached."""
if self._positions is None:
self._positions = {}
for name in sorted(self.metadata['positions'].keys()):
position = Position(self, name)
if position.metadata_file.exists():
self._positions[name] = position
return self._positions
def reload_positions(self, reload_timepoints=False):
"""Re-read positions from backing files. Restores any positions removed
e.g. in filtering operations. If reload_timepoints is True, any removed
timepoints from all positions will also be reloaded too. Any unsaved
position or timepoint annotations / metatada will be lost."""
self._positions = None
positions = self.positions
if reload_timepoints:
for position in positions:
position.reload_timepoints()
@property
def all_timepoints(self):
"""Iterator over all timepoints in all positions in the experiment."""
return flatten(self)
def add_position(self, name, coords):
"""Add a position at the given xyz coordinates to the experiment."""
position = self.positions[name] = Position(self, name)
self._positions = dict(sorted(self.positions.items())) # make sure positions dict is in sorted order by keys
self.metadata['positions'][name] = coords
return position
def write_to_disk(self):
"""Write all metadata and annotations for the experiment and all positions/timepoints."""
self.write_metadata()
for position in self:
position.write_metadata()
position.write_annotations()
def filter(self, position_filter=None, timepoint_filter=None):
"""Delete positions/timepoints from an Experiment instance (but importantly
not from on disk) based on filter functions.
Example: To retain only non-excluded timepoints and positions, and to
further retain only positions that have been fully stage-annotated
up to the time of death:
experiment.filter(position_filter=(filter_excluded_positions, filter_staged),
timepoint_filter=filter_excluded_timepoints)
Parameters:
position_filter: function or list of functions to be called on each
Position object contained in the Experiment as
position_filter(position). Each function must return bool (to
keep or remove the whole position) or a list of bools, one for
each timepoint in the position.
timepoint_filter: function or list of functions to be called on each
Timepoint object contained in the Experiment as
timepoint_filter(timepoint). The filter must return a bool
representing whether to keep or remove the timepoint. If every
timepoint is removed, the position will be as well.
Returns: filtered_positions, filtered_timepoints
filtered_positions: list of Position instances removed (including
those removed because all timepoints for that position were removed)
filtered_timepoints: list of Timepoint instances removed (including
those whose removal caused the removal of the entire Position)
"""
if position_filter is None:
position_filter = []
elif callable(position_filter):
position_filter = [position_filter]
if timepoint_filter is None:
timepoint_filter = []
elif callable(timepoint_filter):
timepoint_filter = [timepoint_filter]
filtered_positions = []
filtered_timepoints = []
for position in list(self):
results = []
for pf in position_filter:
result = pf(position)
if isinstance(result, bool):
result = [result] * len(position)
results.append(result)
for tf in timepoint_filter:
results.append([tf(timepoint) for timepoint in position])
# now transpose wth zip(*results), resulting in a list of results
# for each timepoint rather than a list of results for each filter-function
# and then use all() to boolean-and the results together.
keep_timepoints = list(map(all, zip(*results)))
for timepoint, keep in zip(list(position), keep_timepoints):
if not keep:
filtered_timepoints.append(position.timepoints.pop(timepoint.name))
if not any(keep_timepoints):
filtered_positions.append(self.positions.pop(position.name))
return filtered_positions, filtered_timepoints
def purge_filtered(self, filtered_positions, filtered_timepoints, dry_run=False, backup_dirname=None):
"""Delete positions and timepoints from disk that had been filtered.
Parameters:
filtered_positions: list of positions to remove, as returned by filter()
filtered_timepoints: list of timepoints to remove, as returned by filter()
NB: timepoints may already be in one of the filtered_positions; this
duplication will be handled correctly.
dry_run: passed on to position/timepoint.purge_from_disk() calls.
backup_dirname: passed on to position.purge_from_disk() call.
"""
for position in filtered_positions:
position.purge_from_disk(dry_run, backup_dirname)
filtered_positions = set(filtered_positions)
for timepoint in filtered_timepoints:
if timepoint.position not in filtered_positions:
timepoint.purge_from_disk(dry_run)
def purge_timepoint(self, timepoint_name, dry_run=False):
"""Remove a specific named timepoint from every position on disk and in memory"""
for position in self:
if timepoint_name in position.timepoints:
position[timepoint_name].purge_from_disk(dry_run=dry_run)
if not dry_run:
del position[timepoint_name]
timepoint_idx = self.metadata['timepoints'].index(timepoint_name)
if not dry_run:
for metadata_list in ['durations', 'timestamps', 'timepoints']:
del self.metadata[metadata_list][timepoint_idx]
for metadata_dict in ['brightfield metering', 'fluorescent metering', 'humidity', 'temperature']:
del self.metadata[metadata_dict][timepoint_name]
self.write_metadata()
def filter_excluded(position_or_timepoint):
"""Position or timepoint filter for Experiment.filter() to remove excluded positions/timepoints."""
# if no annotation present, assume not excluded
return not position_or_timepoint.annotations.get('exclude', False)
def filter_staged(position):
"""Position filter for Experiment.filter() to include only worms that have been
stage-annotated fully, are noted as "dead", and have at least one non-dead timepoint."""
stages = [timepoint.annotations.get('stage') for timepoint in position]
# NB: all(stages) below is True iff there is a non-None, non-empty-string
# annotation for each stage.
return all(stages) and stages[-1] == 'dead' and stages[0] != 'dead'
def filter_to_be_staged(position):
"""Position filter for Experiment.filter() to include only worms that still need to be
stage-annotated fully."""
stages = [timepoint.annotations.get('stage') for timepoint in position]
# NB: all(stages) below is True iff there is a non-None, non-empty-string
# annotation for each stage.
return not all(stages) or stages[-1] != 'dead'
def make_living_filter(keep_eggs, keep_dead):
"""Return a position filter for Experiment.filter() that retains only the
last keep_eggs timepoints staged as 'egg' and the first keep_dead timepoints
staged as 'dead'.
"""
def living_filter(position):
"""Position filter to exclude all timepoints annotated as 'egg' or 'dead', except the last {keep_eggs} 'egg'
and/or the first {keep_dead} 'dead' annotations. (The non-excluded 'egg' and 'dead' allow us to define the hatch and
death times precisely.)"""
stages = [timepoint.annotations.get('stage') for timepoint in position]
trim_eggs = max(0, stages.count('egg') - keep_eggs)
trim_dead = max(0, stages.count('dead') - keep_dead)
retain = len(position) - trim_eggs - trim_dead
return [False] * trim_eggs + [True] * retain + [False] * trim_dead
living_filter.__doc__ = living_filter.__doc__.format(**locals())
return living_filter
filter_living_timepoints = make_living_filter(keep_eggs=1, keep_dead=1)
def filter_has_pose(timepoint):
"""Timepoint filter for Experiment.filter() to include only worms where the
centerline and widths have been fully defined."""
pose = timepoint.annotations.get('pose')
# make sure pose is not None, and center/width tcks are both not None
return pose is not None and pose[0] is not None and pose[1] is not None
class Position(_DataclassBase):
"""Class that represents a specific Position within an experiment."""
_FIELDS = ('experiment', 'name')
def __init__(self, experiment, name):
"""To add a new position to an Experiment instance, use add_position() instead
of constructing a Position directly. | |
"""Return the weights of the specified indeces or, if None, return all.
Parameters
----------
normalize : boolean or float > 0
If True, the weights will be normalized to 1 (the mean is 1).
If a float is provided, the mean of the weights will be equal
to *normalize*. So *True* and *1* will yield the same results.
index : |index_type|
|index_docstring|
Return
------
out: 1-D pandas Series
Return the weights as pandas Series
"""
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
normalize = 1 if normalize is True else normalize
normalize = 0 if normalize is None or normalize is False else normalize
second_storage = kwargs.get("second_storage")
normalize_1 = 1
normalize_2 = 1
# HACK
weights_ratio = normalize
# TODO: implement if targets are different
if weights_ratio > 0 and second_storage is not None:
weights_1 = self.get_weights(index=index)
weights_2 = second_storage.get_weights()
sum_weight_1 = float(sum(weights_1))
sum_weight_2 = float(sum(weights_2))
ratio_1 = weights_ratio * sum_weight_2 / sum_weight_1
self.logger.info("ratio_1 = " + str(ratio_1))
if ratio_1 >= 1:
ratio_2 = 1.0
else:
ratio_2 = 1.0 / ratio_1
ratio_1 = 1.0
normalize_1 = ratio_1
normalize_2 = ratio_2
elif weights_ratio > 0 and second_storage is None:
normalize_1 = weights_ratio
else:
normalize_1 = normalize_2 = False
weights_out = self._get_weights(index=index, normalize=normalize_1)
if dev_tool.is_in_primitive(weights_out, (None, 1)):
weights_out = pd.Series(data=np.ones(length), index=index) * normalize_1
if second_storage is not None:
weights_2 = second_storage.get_weights(normalize=normalize_2)
weights_out = np.concatenate((weights_out, weights_2))
return weights_out
def _get_weights(self, index=None, normalize=True):
"""Return pandas Series of weights or None, 1."""
# initialize values
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
# TODO: allow other primitive weights
if dev_tool.is_in_primitive(self._weights, (None, 1)):
weights_out = self._weights
if normalize != 1 or normalize is not True:
weights_out = pd.Series(np.ones(length), index=index)
else:
normalize = False
elif index is None:
weights_out = self._weights
else:
weights_out = self._weights.loc[index]
weights_out = copy.deepcopy(weights_out)
if normalize or normalize > 0:
normalize = 1 if normalize is True else normalize
weights_out *= normalize / weights_out.mean()
return weights_out
@property
def weights(self):
return self.get_weights(index=None, normalize=False)
@weights.setter
def weights(self, sample_weights):
"""Set the weights of the sample.
Parameters
----------
sample_weights : |sample_weights_type|
|sample_weights_docstring|
"""
self.set_weights(sample_weights=sample_weights)
def set_weights(self, sample_weights, index=None):
"""Set the weights of the sample.
Parameters
----------
sample_weights : |sample_weights_type|
|sample_weights_docstring|
index : 1-D array or list or None
The indeces for the weights to be set. Only the index given will be
set/used as weights.
"""
index = self._index if index is None else index
if isinstance(sample_weights, (str, dict)) and self._data_type == "root":
assert (
isinstance(sample_weights, list) and (len(sample_weights) == 1)
) or isinstance(sample_weights, str), "Can only be one branche"
assert isinstance(
self._data, dict
), "data should be root-dict but is no more..."
tmp_root = copy.deepcopy(self._data)
if isinstance(sample_weights, str):
sample_weights = {"branches": sample_weights}
tmp_root.update(sample_weights)
sample_weights = data_tools.to_ndarray(tmp_root)
self._set_weights(sample_weights=sample_weights, index=index)
def _set_weights(self, sample_weights, index=None):
"""Set the weights"""
index = self.index if index is None else index
length = len(self) if index is None else len(index)
if dev_tool.is_in_primitive(sample_weights, (None, 1)):
if index is None or len(self) == len(index):
self._weights = 1
return
else:
sample_weights = pd.Series(np.ones(len(index)), index=index)
# else:
# sample_weights = np.ones(length)
elif isinstance(sample_weights, pd.Series):
sample_weights = sample_weights[index]
else:
sample_weights = pd.Series(sample_weights, index=index, dtype="f8")
if len(self) == length and index is None:
self._weights = sample_weights
else:
if dev_tool.is_in_primitive(self._weights, (None, 1)):
self._weights = pd.Series(np.ones(len(self)), index=self._index)
self._weights.update(sample_weights)
def set_root_selection(self, selection, exception_if_failure=True):
"""Set the selection in a root-file. Only possible if a root-file is provided."""
warnings.warn("Method set_root_selection very unsafe currently!")
meta_cfg.warning_occured()
if self._data_type == "root":
self.data["selection"] = selection
self.set_data(self.data, columns=self.columns)
self.data_name_addition += "INDEX CRASHED!"
elif exception_if_failure:
raise RuntimeError("selection could not be applied, no root-dict")
else:
self.logger.error("selection not applied, no root-dict")
def pandasDF(self, columns=None, index=None):
"""Return a pandas DataFrame representation of the data
Return a pandas DataFrame.
Parameters
---------
columns : str
Arguments for the :py:func:`~root_numpy.root2rec` ls
function.
index : |index_type|
|index_docstring|
"""
# initialize variables
index = None if index is None else list(index)
if columns is None:
columns = None
else:
columns = data_tools.to_list(columns)
columns = dev_tool.entries_to_str(columns)
# create data
data_out = self._make_df(columns=columns, index=index, copy=True)
# TODO: leave away below?!
# if not data_out.index.tolist() == range(len(data_out)): # if not, convert the indices to
# data_out.reset_index(drop=True, inplace=True)
return data_out
def _make_df(self, columns=None, index=None, copy=False):
"""Return a DataFrame from the internal data. Does some dirty, internal work."""
# initialize data
# TODO: remove trailing comment?
data = self._data # if dev_tool.is_in_primitive(data) else data
columns = self.columns if columns is None else data_tools.to_list(columns)
index = self._index if index is None else data_tools.to_list(index)
if self._data_type == "root":
# update root dictionary
# TODO: change keyword branches or something, due to incompatibility with root_pandas
temp_root_dict = dict(data, **{"branches": columns})
for key, val in list(temp_root_dict.items()):
if dev_tool.is_in_primitive(val, None):
temp_root_dict[key] = self.data.get(key)
data = data_tools.to_pandas(temp_root_dict, columns=columns, index=index)
# if index is not None:
# data.set_index([index], inplace=True, verify_integrity=True)
# TODO: remove below?
# elif self._data_type == 'array':
# data = pd.DataFrame(data, index=index, columns=columns, copy=copy)
elif self._data_type == "df":
if columns is not None:
data = data[columns]
else:
raise NotImplementedError("Unknown/not yet implemented data type")
assert isinstance(data, pd.DataFrame), "data did not convert correctly"
data = data if index is None else data.loc[index]
if isinstance(self.column_alias, dict) and len(self.column_alias) > 0:
data.rename(columns=self.column_alias, inplace=True, copy=False)
return data
# def get_labels(self, columns=None, as_list=False):
# """Return the human readable branch-labels of the data.
#
# Parameters
# ----------
# columns : list with str or str
# The labels of the columns to return
# as_list : boolean
# If true, the labels will be returned as a list instead of a dict.
#
# Return
# ------
# out : list or dict
# Return a list or dict containing the labels.
# """
# if columns is None:
# columns = self.columns
# columns = data_tools.to_list(columns)
# if as_list:
# labels_out = [self._label_dic.get(col, col) for col in columns]
# else:
# labels_out = {key: self._label_dic.get(key) for key in columns}
# return labels_out
# TODO: delete?
# def set_labels(self, data_labels, replace=False):
# """Set the human readable data-labels (for the columns).
#
# Sometimes you want to change the labels(names) of columns. This can be
# done by passing a dictionary containing the column as key and a
# human-readable name as value.
#
# Parameters
# ----------
# data_labels : dict
# It has the form: {column: name}
# replace : boolean
# """
# if data_labels is None:
# return
# assert isinstance(data_labels, dict), "Not a dictionary"
# self._set_data_labels(data_labels=data_labels, replace=replace)
#
# def _set_data_labels(self, data_labels, replace):
# """Update the data labels"""
# if replace:
# self._label_dic = data_labels
# else:
# self._label_dic.update(data_labels)
@property
def targets(self):
return self.get_targets()
@targets.setter
def targets(self, targets):
self.set_targets(targets=targets)
def get_targets(self, index=None):
"""Return the targets of the data as a pandas Series."""
# assing defaults
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
# get targets via internal method
out_targets = self._get_targets(index=index)
# create targets if targets are "simpel" for output
if isinstance(out_targets, (int, float)) or out_targets is None:
if self._target is None:
self.logger.warning("Target list consists of None!")
out_targets = dev_tool.make_list_fill_var([], length, self._target)
out_targets = pd.Series(out_targets, index=index)
return out_targets
def _get_targets(self, index=None):
"""Return targets as pandas Series or primitive type."""
# assign defaults
index = self._index if index is None else list(index)
# length = len(self) if index is None else len(index)
if index is None or dev_tool.is_in_primitive(self._target, (-1, 0, 1, None)):
out_targets = self._target
else:
out_targets = self._target.loc[index]
return out_targets
def set_targets(self, targets, index=None):
"""Set the targets of the data. Either an array-like object or {0, 1}."""
if not dev_tool.is_in_primitive(targets, (-1, 0, 1, None)):
assert len(self) == len(targets), "Invalid targets"
self._set_target(target=targets, index=index)
def _set_target(self, target, index=None):
"""Set the target. Attention with Series, index must be the same as data-index."""
index = self._index if dev_tool.is_in_primitive(index) else index
if | |
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(dict_intersection(style_dict, mpl.rcParams)) == len(style_dict)
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, | |
:param int page: [``optional``] : Sets the page of results to retrieve
from the server.
:param bool include_total_count: [``optional``] : Return the total
number of results for a query. This should typically be used only
for the first page of a large result set.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query, " ")
self._add_days_back_param(req_dict, days_back)
self._add_page_param(req_dict, page)
self._add_server_param(req_dict, server)
self._add_country_param(req_dict, country)
self._add_include_total_count_param(req_dict, include_total_count)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_SEARCH_TYPE, search_type)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_ORG, org)
return self._invoke_service(req_dict,
self._REQ_TOPIC_IP_REGISTRANT_MONITOR,
out_format)
def iris(self, domain=None, ip_address=None, email=None, nameserver=None,
registrar=None, registrant=None, registrant_org=None,
out_format="dict"):
"""
Retrieves Iris pivot engine domain data for any provided search terms,
ANDed together. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#iris>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/iris/>`__
documentation for more information.
:param domain: [``optional``] : One or more domains.
:type domain: str or list(str) or tuple(str) or set(str)
:param str ip_address: [``optional``] : A single full IP address - for
example: ``192.168.3.11`` - that will be matched to the A record
for a domain name.
:param str email: [``optional``] : A single email address. The results
will match email addresses in the Admin, Billing, Registrant, and
Technical Contacts, along with SSL, Whois Records, and DNS/SOA
records.
:param str nameserver: [``optional``] : The exact name of the server
you wish to query.
:param str registrar: [``optional``] : Word search on the domain
registrar.
:param str registrant: [``optional``] : Word search on the Whois
registrant field.
:param str registrant_org: [``optional``] : Word search on the Whois
registrant organization field.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_domain_param(req_dict, domain, ",")
self._add_ip_param(req_dict, ip_address)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_EMAIL, email)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_NAMESERVER, nameserver)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_REGISTRAR, registrar)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_REGISTRANT, registrant)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_REGISTRANT_ORG,
registrant_org)
return self._invoke_service(req_dict, self._REQ_TOPIC_IRIS, out_format)
def name_server_monitor(self, query, days_back=None, page=None,
out_format="dict"):
"""
Retrieves activity for monitored Name Servers which match the hostname
supplied in the ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#name-server-monitor>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/name-server-monitor/>`__
documentation for more information.
:param str query: Hostname of the Name Server to query. For example:
``dynect.net``.
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param int page: [``optional``] : Sets the page of results to retrieve
from the server.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
self._add_days_back_param(req_dict, days_back)
self._add_page_param(req_dict, page)
return self._invoke_service(req_dict,
self._REQ_TOPIC_NAME_SERVER_MONITOR,
out_format)
def parsed_whois(self, query, out_format="dict"):
"""
Retrieves parsed information extracted from the raw Whois record for
the domain supplied in the ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#parsed-whois>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/parsed-whois/>`__
documentation for more information.
:param str query: Hostname of the Name Server to query. For example:
``dynect.net``.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
return self._invoke_service(req_dict, self._REQ_TOPIC_PARSED_WHOIS,
out_format)
def phisheye(self, query, days_back=None, out_format="dict"):
"""
Retrieves daily monitor results from the DomainTools PhishEye product
for the term supplied in the ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#phisheye-domain-list>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/phisheye/>`__
documentation for more information.
:param str query: Term for which the day's domains are desired.
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
self._add_days_back_param(req_dict, days_back)
return self._invoke_service(req_dict, self._REQ_TOPIC_PHISHEYE,
out_format)
def phisheye_term_list(self, include_inactive=None, out_format="dict"):
"""
Retrieves a list of terms setup for the DomainTools PhishEye product
for this account. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#phisheye-term-list>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/phisheye/>`__
documentation for more information.
:param bool include_inactive: [``optional``] : Use this parameter to
display terms that may have been inactivated in users' lists.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_INCLUDE_INACTIVE,
include_inactive)
return self._invoke_service(req_dict,
self._REQ_TOPIC_PHISHEYE_TERM_LIST,
out_format)
def registrant_monitor(self, query, exclude=None, days_back=None,
limit=None, out_format="dict"):
"""
Retrieves information from the ownership (Whois) records of domain
names for search terms specified in the ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#registrant-monitor>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/registrant-monitor/>`__
documentation for more information.
:param query: One or more search terms.
:type query: str or list(str) or tuple(str) or set(str)
:param exclude: [``optional``] : Whois records with these words will be
excluded from the result set.
:type exclude: str or list(str) or tuple(str) or set(str)
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param int limit: [``optional``] : Limit the number of matched domain
names that are returned in your result set.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query, "|")
self._add_exclude_param(req_dict, exclude)
self._add_days_back_param(req_dict, days_back)
self._add_limit_param(req_dict, limit)
return self._invoke_service(req_dict,
self._REQ_TOPIC_REGISTRANT_MONITOR,
out_format)
def reputation(self, query, include_reasons=None, out_format="dict"):
"""
Retrieves reputation information for the domain specified in the
``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#reputation>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/reputation/>`__
documentation for more information.
:param str query: Input domain for which the risk score is desired.
:param bool include_reasons: [``optional``] : Return a list of reasons
for the risk score determination.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_INCLUDE_REASONS,
include_reasons)
return self._invoke_service(req_dict, self._REQ_TOPIC_REPUTATION,
out_format)
def reverse_ip(self, domain, limit=None, out_format="dict"):
"""
Retrieves a list of containers which share the same domain name. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#reverse-ip>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/reverse-ip/>`__
documentation for more information.
:param str domain: Domain name for which the list of containers is
desired.
:param int limit: [``optional``] : Limit the number of matched domain
names that are returned in your result set.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = | |
either a PSL, PSLX, or BLAST-XML file!'.format(str(database)))
else:
raise FileNotFoundError()
except FileNotFoundError:
raise SearchError('Database file "{}" was not found!'.format(str(database)))
return rec
def id_search(id_rec, id_type='brute', verbose=2, indent=0, custom_regex=None, regex_only=False):
"""
EX:
gi =
refseq_accession = 'XP_010883249.1'
scaffold = 'scaffold_145\t[:1033526-1034566](-)\t190
id =
chr = 'chrX[:3047971-3259961](-)119'
seq_range =
assembly1 = 'KN678312.1 [:9787-29116](+) 478'
assembly2 = 'KN678312.1 [:9787-29116](+) 478'
symbol = 'TP53'
symbol = 'INS [:259-568](+) (161)'
strand = '+'
:param id_rec:
:param id_type:
:param custom_regex:
:param regex_only:
:param verbose:
:param indent:
:return:
"""
# Define the regex functions
p = dict(gi=re.compile('(\Agi[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
accession=re.compile('(\A[AXNYZ][MWRCPGTZ][| _:]+[0-9.]+|\Aref[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
scaffold=re.compile('(\Ascaffold[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
id=re.compile('(\Aid[| _:]*[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
chr=re.compile('(\Achr[| _:]*[A-Za-z0-9.]+)'
'([| \t:_])??\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly=re.compile('(\A[A-Za-z]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly_broad=re.compile('(\b[ALYB]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
symbol=re.compile('(\A\S+)([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
seq_range=re.compile(':?(\d+)-(\d+)'),
strand=re.compile('(\([-+0N]\))'),
score=re.compile('\d\d*')
)
if custom_regex is not None:
p = {'custom': custom_regex}
id_type = 'custom'
# Begin search:
if verbose > 1:
print('ID Loaded, performing regex search for identifiers...', indent=indent)
print('ID type: ', id_type, indent=indent)
if id_type == 'brute':
for tmp_type in ['accession', 'gi', 'scaffold', 'id', 'chr', 'assembly', 'assembly_broad', 'symbol']:
if bool(p[tmp_type].findall(id_rec)):
if verbose > 1:
print('Brute Force was set, tested strings for all pre-registered IDs.', indent=indent)
print('ID was selected as type {0}!'.format(tmp_type), indent=indent + 1)
if regex_only:
return p[tmp_type]
else:
return id_search(id_rec=id_rec, id_type=tmp_type, verbose=verbose, indent=indent)
raise IDError('Couldn\'t identify the id type of line: {}!'.format(id_rec))
else:
try:
item_parts = p[id_type].findall(id_rec)[0]
if verbose > 1:
print('Successfully found {0}, compiling list!'.format(id_type), indent=indent)
print('Item:\t', '\t'.join(item_parts), indent=indent + 1)
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
try:
item_parts = list(item_parts)
item_parts[0] = item_parts[0] if not isinstance(item_parts[0], str) else ''.join(item_parts[0])
if item_parts[2]:
try:
sr_tuple = p['seq_range'].findall(item_parts[2])[0]
if verbose > 1:
print('Found sequence delimiters in IDs!', indent=indent)
print(sr_tuple, indent=indent + 1)
except IndexError:
raise IDError('A positive match for a sequence range was found '
'({0}), yet no hits were identified! Confirm that '
'the regex is correct and try again!'.format(item_parts[2]))
else:
sr_tuple = (0, -1)
if item_parts[4]:
try:
strand = p['strand'].findall(item_parts[4])[0]
except IndexError:
strand = '(N)'
try:
score = p['score'].findall(item_parts[4])[0]
except IndexError:
score = 0
else:
strand = '(N)'
score = '0'
if verbose > 1:
if strand != '(N)':
print('Strand info found: {0}'.format(strand), indent=indent)
if score != '0':
print('Score info found: {0}'.format(score), indent=indent)
seq_range = (int(sr_tuple[0]), int(sr_tuple[1]), strand, int(score))
return p, item_parts[0], seq_range, id_type
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
def percent_identity_searchio(hit, is_protein=True):
"""Calculates percent identity based on entire hit. Adapted from UCSC BLAT FAQ and Biopython."""
size_mul = 3 if is_protein else 1
qali_size = size_mul * sum([i[-1] - i[0] for i in merge_ranges([(hsp.query_start, hsp.query_end) for hsp in hit])])
tali_size = sum([i[-1] - i[0] for i in merge_ranges([(hsp.hit_start, hsp.hit_end) for hsp in hit])])
ali_size = min(qali_size, tali_size)
if ali_size <= 0:
return 0
size_dif = qali_size - tali_size
size_dif = 0 if size_dif < 0 else size_dif
sum_match = sum([i.match_num for i in hit])
sum_rep = sum([i.match_rep_num for i in hit])
sum_mismatch = sum([i.mismatch_num for i in hit])
total = size_mul * (sum_match + sum_rep + sum_mismatch)
if total != 0:
millibad = (1000 * (sum([i.mismatch_num for i in hit]) * size_mul + sum([i.query_gap_num for i in hit]) +
round(3 * log(1 + size_dif)))) / total
else:
raise Exception('Somehow your total in the percent_identity function was 0, so you broke the script!')
perc_ident = 100 - (millibad * 0.1)
return perc_ident
def get_searchdb(search_type, species, db_loc, verbose=1, indent=0):
"""Finds and returns the appropriate search database for the given species and search type.
This function automates the process of selecting the search database needed by the selected search program,
like BLAST or BLAT, so that the user does not need to preoccupy themselves with providing said information
for a large number of species. For BLAST* that depend on protein databases (BLASTP and BLASTX), the function
searches for files matching the form 'Genus_species_protein.*' in the given directory; for BLAST* that depend
on DNA databases (BLASTN, TBLASTN, and TBLASTX), it instead looks for files 'Genus_species_genome.*'.
If '-transcript' is added to the end of any of the DNA-dependent BLAST*, then instead the function will
search for files in the style of 'Genus_species_transcript.*'. In the case of BLAT searches, the program will
similarly search for 'Genus_species*.2bit', or for 'Genus_species*transcript.2bit' if '-transcript' is added
after the search type.
In all usage cases, if the program does not find files matching the 'Genus_species' format, it will try to
find the files using a case-insensitive search using the 6-letter abbreviated form of the species name.
Usage::
>>> get_searchdb('blastp', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/Homo_Sapiens_protein.*
>>> get_searchdb('tblastn', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_genome.*
>>> get_searchdb('blastn-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.*
>>> get_searchdb('blat', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap.2bit
>>> get_searchdb('blat-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.2bit
Arguments::
:param str search_type: The name of the search method (blast or blat, and sub-type: blastp, blastn, blat, tblat...)
:param str species: Name of species associated with the database. If there is a space, it will be replaced with an
underscore.
:param str db_loc: Path to folder containing collection of search databases.
:param int verbose: How verbose should the output be. Zero suppresses all output, 2 is max verbosity.
:param int indent: Indent level for printed output.
:return str: Path to the identified search database.
"""
if verbose:
print('Search DB set to auto, choosing search_db...', indent=indent)
species = species.replace(' ', '_')
if verbose > 1:
print('Search DB location set to: ', db_loc, indent=indent)
db_type_dict = {
'blastx': "protein",
'blastp': "protein",
'blastn': "genome",
'tblastn': "genome",
'tblastx': "genome",
'blastn-transcript': "transcript",
'tblastn-transcript': "transcript",
'tblastx-transcript': "transcript",
'blat': "blat",
'tblat': "blat",
'blat-transcript': 'blat-transcript',
'tblat-transcript': 'tblat-transcript'
}
try:
db_type = db_type_dict[search_type]
except KeyError:
print('Unable to determine search db type!', indent=indent)
raise SearchError('Improper search type given ({})!'.format(search_type))
if verbose > 1:
print('DB type: ', db_type, indent=indent)
db_path = Path(db_loc).absolute()
if not db_path.exists():
db_path = Path(db_loc)
if db_path.exists() and db_path.is_dir():
if db_type == 'blat':
glob_path = [i for i in
db_path.glob('{0}*.2bit'.format(species.replace(' ', '_')))] # Todo: generalize extension
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species.replace(' ', '_')))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species.replace(' ', '_'), db_type))]
if not glob_path:
if verbose:
print('No DB found! Trying again with abbreviated species name', indent=indent)
species_abbv = ''.join([i[0:3] for i in species.title().split('_')])
# making it insensitive to case for Glob
species_abbv_insensitive = ''.join(['[{0}{1}]'.format(c.lower(),
c.upper()) for c in species_abbv if c.isalpha()])
if verbose:
print('Abbreviated species name: ', species_abbv, indent=indent)
print('RegEx species abbreviation: ', species_abbv_insensitive, indent=indent)
if db_type == 'blat':
glob_path = [i for i in db_path.glob('{0}*.2bit'.format(species_abbv_insensitive))]
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species_abbv_insensitive))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species_abbv_insensitive, db_type))]
try:
if verbose:
print(glob_path, indent=indent)
if isinstance(glob_path, list):
search_db = sorted(glob_path, reverse=True)[0]
else:
search_db = glob_path
except IndexError:
print('WARNING: COULD NOT FIND DATABASE! ABORTING!', indent=indent)
raise DatabaseNotFoundError('', 'No databases were found!')
else:
raise DatabaseNotFoundError('DB_Path {} does not exist!'.format(str(db_path)))
if verbose:
print('{0} DB chosen: {1}'.format(search_type, str(search_db)), indent=indent)
return search_db
def blat_server(twobit, order='start', host='localhost', port=20000, type='blat', log='/dev/null', species=None,
search_db_loc='/usr/db/blat', verbose=1, indent=0, try_limit=10, **kwargs):
"""Convenience function that controls a gfServer. Still in alpha.
This function serves as a python wrapper for the Bash gfServer command. The user can either provide a .2bit file,
or else can provide a species and set 'twobit="auto"' to have the function use 'get_searchdb()' to find a .2bit file
automatically. By default, the function is set to start up a new gfServer instance, but using the 'order' parameter,
the user can execute any of the standard gfServer commands such as 'stop' and 'status'.
To start a gfServer, the function first probes the selected port (default is 20000) to ensure its unused; if it is
currently in use, the program then goes port-by-port in ascending order until it finds an empty port to use for the
server. Then, it simply calls the gfServer command with all the | |
import numpy as np, copy, matplotlib.pyplot as plt
from matplotlib import colors
cmap = colors.ListedColormap(['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(0, 9)
class Object():
def __init__(self, points = [], low_coord = None, high_coord = None, cohesion_type = 'contact', color = None):
self.cohesion_type = cohesion_type
self.low = low_coord # coordinate of the top-left corner of the smallest rectangle the object fits in
self.high = high_coord # bottom-right
self.points = sorted(points) # list of the pixels (i, j, c) belonging to the object ((i, j) = coordinates, c = color)
self.color = color # color of the object, defined if it is unicolored
def __repr__(self):
s = '\nObject: c_type: '+self.cohesion_type+', color: '+format(self.color)+', low: '+format(self.low)+', high: '+format(self.high)+'\nPoints: '
for p in self.points:
s += format(p)+" "
return s
def same(self, other, mode = 'both'):
'''
Returns true if:
- self and other have the same shape if mode == 'shape'
- self and other have the same color if mode == 'color
- self and other are the same in mode == 'both'
The answer doesn't depend on the two objects location
'''
self.points.sort()
other.points.sort()
if mode == 'both':
return self.points == other.points
elif mode == 'shape':
for (i, j, _), (i_, j_, _) in zip(self.points, other.points):
if i != i_ or j != j_:
return False
return True
elif mode == 'color':
if self.color == None and other.color == None: return False
else: return self.color == other.color
def nb_points(self):
return len(self.points)
def rectangle_size(self): #(height, width) of the smallest rectangle the object fits in
if self.points == []: return 0, 0
return self.high[0] - self.low[0] + 1, self.high[1] - self.low[1] + 1
def is_rectangle(self):
n, m = self.rectangle_size()
return self.nb_points() == n * m
def display(self, mode = 'display'):
'''
If mode == 'display', plots the object (need to do plt.show() afterward)
otherwise, returns an image representing the object
'''
if self.color != 0 :
img = np.zeros(self.rectangle_size())
else :
img = np.ones(self.rectangle_size())
for i, j, c in self.points:
img[i][j] = c
if mode == 'display':
_, ax = plt.subplots(1)
ax.invert_yaxis()
ax.pcolormesh(img, cmap=cmap, norm=norm, edgecolor='xkcd:dark gray', linewidth=.01)
ax.set_title('Location : {}, {}'.format(self.low[0], self.low[1]))
else:
return img
def duplicate(self):
return copy.deepcopy(self)
def translate(self, i, j, mode = 'absolute'): # relative and absolute translation
if mode == 'relative':
self.low = self.low[0] + i, self.low[1] + j
self.high = self.high[0] + i, self.high[1] + j
else:
x, y = self.rectangle_size()
self.low = i, j
self.high = i + x - 1, j + y - 1
return self
def change_color(self, c):
if c == None: return None
self.color = c
self.points = [(i, j, c) for i, j, _ in self.points]
return self
def symmetry_x(self):
if self.points == []:
return self
n = self.high[0] - self.low[0]
self.points = [(int(2 * (n / 2) - i), j, c) for i, j, c in self.points]
return self
def symmetry_y(self):
if self.points == []:
return self
n = self.high[1] - self.low[1]
self.points = [(i, int(2 * (n / 2) - j), c) for i, j, c in self.points]
return self
def rotate(self):
if self.points == []:
return self
a, b = self.rectangle_size()
a, b = (a - 1) / 2, (b - 1) / 2
self.points = [(a + b - j, b - a + i, c) for i, j, c in self.points]
a, b = 0, 0
for i, j, _ in self.points:
if i - int(i) != 0: a = 0.5
if j - int(j) != 0: b = 0.5
self.points = [(int(i + a), int(j + b), c) for i, j, c in self.points]
a, b = min(self.points, key=lambda x: x[0])[0], min(self.points, key=lambda x: x[1])[1]
self.points = [(i - a, j - b, c) for i, j, c in self.points]
self.high = self.low[0] + max(self.points, key=lambda x: x[0])[0], self.low[1] + max(self.points, key=lambda x: x[1])[1]
return self
def __eq__(self, other): # tests if self and other are the same objects and at the same location
return self.low == other.low and self.same(other, 'both')
def find_object_aux(grid, checked, cohesion_type, background_color, n, m, i, j, points):
if cohesion_type[:7] == 'contact':
to_check = []
if j < m-1:
to_check.append((i, j+1))
if i < n-1 and cohesion_type[:16] == 'contact by point':
to_check.append((i+1, j+1))
if i < n-1:
to_check.append((i+1, j))
if j > 0 and cohesion_type[:16] == 'contact by point':
to_check.append((i+1, j-1))
if i > 0:
to_check.append((i-1, j))
if j < m-1 and cohesion_type[:16] == 'contact by point':
to_check.append((i-1, j+1))
if j > 0:
to_check.append((i, j-1))
if i > 0 and cohesion_type[:16] == 'contact by point':
to_check.append((i-1, j-1))
for x, y in to_check:
if checked[x, y] == 0 and grid[x][y] != background_color and ((grid[x][y] == grid[i][j] and cohesion_type[-5:] == 'color') or cohesion_type[-5:] != 'color'):
checked[x, y] = 1
points.append((x, y, grid[x][y]))
points = find_object_aux(grid, checked, cohesion_type, background_color, n, m, x, y, points)
return points
def find_objects_color(grid, background_color):
objects = []
for c in range(10):
if c != background_color:
points = [(i, j, c) for i in range(len(grid)) for j in range(len(grid[0])) if grid[i][j] == c]
if points != []:
obj = Object(points=points, cohesion_type='color', color=c)
obj.low = min(obj.points, key=lambda x: x[0])[0], min(obj.points, key=lambda x: x[1])[1]
obj.high = max(obj.points, key=lambda x: x[0])[0], max(obj.points, key=lambda x: x[1])[1]
obj.points = [(i - obj.low[0], j - obj.low[1], c) for i, j, c in obj.points]
objects.append(obj)
return objects
def find_objects(grid, cohesion_type = 'contact by point and color', background_color = 0):
'''
reads a grid and returns the objects seen on it given a cohesion type and a background color
'''
objects = []
n, m = len(grid), len(grid[0])
checked = np.zeros((n, m))
if cohesion_type[:7] == 'contact':
for i in range(n):
for j in range(m):
if checked[i, j] == 0 and grid[i][j] != background_color:
checked[i, j] = 1
points = find_object_aux(grid, checked, cohesion_type, background_color, n, m, i, j, [(i, j, grid[i][j])])
if cohesion_type[-5:] == 'color':
color = grid[i][j]
else:
color = None
obj = Object(points=points, cohesion_type=cohesion_type, color=color)
obj.low = min(obj.points, key=lambda x: x[0])[0], min(obj.points, key=lambda x: x[1])[1]
obj.high = max(obj.points, key=lambda x: x[0])[0], max(obj.points, key=lambda x: x[1])[1]
obj.points = [(i - obj.low[0], j - obj.low[1], c) for i, j, c in obj.points]
objects.append(obj)
if cohesion_type == 'color':
objects = find_objects_color(grid, background_color)
return objects
def objects_to_grid(objects, n = None, m = None, supple=False, background_color = 0):
'''
puts objects on a blank grid given a background color
(n, m) if defined, is the size of the grid
if supple == True, the size of the grid can be larger than (n, m) to receive objects that might exceed this size
oftherwise, they are cropped or ignored
'''
if objects == []: return [[]]
if n == None:
n = min(max(objects, key=lambda obj: obj.high[0]).high[0] + 1, 30)
if m == None:
m = min(max(objects, key=lambda obj: obj.high[1]).high[1] + 1, 30)
if supple:
n = min(max(n, max(objects, key=lambda obj: obj.high[0]).high[0] + 1), 30)
m = min(max(m, max(objects, key=lambda obj: obj.high[1]).high[1] + 1), 30)
if n <= 0 or m <= 0: return [[]]
grid = np.ones((n, m)) * background_color
for obj in objects:
for i, j, c in obj.points:
if 0 <= i + obj.low[0] < n and 0 <= j + obj.low[1] < m and 0 <= c <= 9:
grid[i + obj.low[0]][j + obj.low[1]] = c
return grid
# def add_pixel(obj, i, j, c):
# obj.points.append((i, j, c))
# obj.update()
# return obj
# def delete_pixel(obj, i, j):
# obj.points = [(x, y, c) for x, y, c in obj.points if x != i or y != j]
# obj.update()
# return obj
def display(img):
'''
plots an image with the coloring of ARC
'''
fig, ax = plt.subplots(1)
ax.invert_yaxis()
ax.pcolormesh(img, cmap=cmap, norm=norm, edgecolor='xkcd:dark gray', linewidth=.01)
return fig
def fill(i, j, n, m, c):
'''
creates a rectangle in (i, j) of color c, height n+1 and width m+1
'''
| |
import logging
from datetime import datetime
import xml.etree.ElementTree as ET
from indra.statements import *
from indra.statements.statements import Migration
from indra.statements.context import MovementContext
from indra.util import UnicodeXMLTreeBuilder as UTB
logger = logging.getLogger(__name__)
class CWMSError(Exception):
pass
POLARITY_DICT = {'CC': {'ONT::CAUSE': 1,
'ONT::INFLUENCE': 1},
'EVENT': {'ONT::INCREASE': 1,
'ONT::MODULATE': None,
'ONT::DECREASE': -1,
'ONT::INHIBIT': -1,
'ONT::TRANSFORM': None,
'ONT::STIMULATE': 1,
'ONT::ARRIVE': None,
'ONT::DEPART': None,
'ONT::MOVE': None,
'ONT::BE': None},
'EPI': {'ONT::ASSOCIATE': None}}
class CWMSProcessor(object):
"""The CWMSProcessor currently extracts causal relationships between
terms (nouns) in EKB. In the future, this processor can be extended to
extract other types of relations, or to extract relations involving
events.
For more details on the TRIPS EKB XML format, see
http://trips.ihmc.us/parser/cgi/drum
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) in XML format as a string.
Attributes
----------
tree : xml.etree.ElementTree.Element
An ElementTree object representation of the TRIPS EKB XML.
doc_id: str
Document ID
statements : list[indra.statements.Statement]
A list of INDRA Statements that were extracted from the EKB.
sentences : dict[str: str]
The list of all sentences in the EKB with their IDs
paragraphs : dict[str: str]
The list of all paragraphs in the EKB with their IDs
par_to_sec : dict[str: str]
A map from paragraph IDs to their associated section types
"""
def __init__(self, xml_string):
self.statements = []
# Parse XML
try:
self.tree = ET.XML(xml_string, parser=UTB())
except ET.ParseError:
logger.error('Could not parse XML string')
self.tree = None
return
# Get the document ID from the EKB tag.
self.doc_id = self.tree.attrib.get('id')
# Store all paragraphs and store all sentences in a data structure
paragraph_tags = self.tree.findall('input/paragraphs/paragraph')
sentence_tags = self.tree.findall('input/sentences/sentence')
self.paragraphs = {p.attrib['id']: p.text for p in paragraph_tags}
self.sentences = {s.attrib['id']: s.text for s in sentence_tags}
self.par_to_sec = {p.attrib['id']: p.attrib.get('sec-type')
for p in paragraph_tags}
# Keep a list of events that are part of relations and events
# subsumed by other events
self.relation_events = set()
self.subsumed_events = set()
# Keep a list of unhandled events for development purposes
self._unhandled_events = set()
self._preprocess_events()
def _preprocess_events(self):
events = self.tree.findall("EVENT/[type]")
for event in events:
affected = event.find("*[@role=':AFFECTED']")
if affected is not None:
affected_id = affected.attrib.get('id')
if affected_id:
self.subsumed_events.add(affected_id)
def extract_causal_relations(self):
"""Extract Influence Statements from the EKB."""
relations = self.tree.findall("CC/[type]")
for relation in relations:
st = self.influence_from_relation(relation)
if st:
self.statements.append(st)
events = self.tree.findall("EVENT/[type]")
for event in events:
st = self.influence_from_event(event)
if st:
self.statements.append(st)
# In some EKBs we get two redundant relations over the same arguments,
# we eliminate these
self._remove_multi_extraction_artifacts()
# Print unhandled event types
logger.debug('Unhandled event types: %s' %
(', '.join(sorted(self._unhandled_events))))
def extract_events(self):
"""Extract standalone Events from the EKB."""
events = [(1, self.tree.findall("EVENT/[type='ONT::INCREASE']")),
(-1, self.tree.findall("EVENT/[type='ONT::DECREASE']"))]
for polarity, event_list in events:
for event_term in event_list:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
event_id in self.relation_events:
continue
event = self.event_from_event(event_term)
if event:
# Here we set the polarity based on the polarity implied by
# the increase/decrease here
event.delta.set_polarity(polarity)
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_migrations(self):
ev_types = ['ONT::MOVE', 'ONT::DEPART', 'ONT::ARRIVE']
events = []
for et in ev_types:
evs = self.tree.findall("EVENT/[type='%s']" % et)
events += evs
for event_term in events:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
event_id in self.relation_events:
continue
event = self.migration_from_event(event_term)
if event is not None:
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_correlations(self):
correlations = self.tree.findall("EPI/[type='ONT::ASSOCIATE']")
for cor in correlations:
st = self._association_from_element(cor, 'EPI', 'NEUTRAL1',
'NEUTRAL2', False)
if st:
self.statements.append(st)
# self._remove_multi_extraction_artifacts()
def _influence_from_element(self, element, element_type, subj_arg,
obj_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, subj_arg, obj_arg, is_arg)
if components is None:
return None
subj, obj, evidence, rel_type = components
# If the object polarity is not given explicitly, we set it
# based on the one implied by the relation
if obj.delta.polarity is None:
obj.delta.set_polarity(POLARITY_DICT[element_type][rel_type])
st = Influence(subj, obj, evidence=[evidence])
return st
def influence_from_relation(self, relation):
"""Return an Influence from a CC element in the EKB."""
return self._influence_from_element(relation, 'CC', 'FACTOR',
'OUTCOME', True)
def influence_from_event(self, event):
"""Return an Influence from an EVENT element in the EKB."""
return self._influence_from_element(event, 'EVENT', 'AGENT',
'AFFECTED', False)
def _statement_components_from_element(self, element, element_type,
member1_arg, member2_arg, is_arg):
element_id = element.attrib.get('id')
rel_type = element.find('type').text
if rel_type not in POLARITY_DICT[element_type]:
self._unhandled_events.add(rel_type)
return None
member1_id, member1_term = self._get_term_by_role(
element, member1_arg, is_arg)
member2_id, member2_term = self._get_term_by_role(
element, member2_arg, is_arg)
if member1_term is None or member2_term is None:
return None
member1 = self.get_event_or_migration(member1_term)
member2 = self.get_event_or_migration(member2_term)
if member1 is None or member2 is None:
return None
self.relation_events |= {member1_id, member2_id, element_id}
evidence = self._get_evidence(element)
return member1, member2, evidence, rel_type
def _association_from_element(self, element, element_type, member1_arg,
member2_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, member1_arg, member2_arg, is_arg)
if components is None:
return None
member1, member2, evidence, _ = components
st = Association([member1, member2], evidence=[evidence])
return st
def event_from_event(self, event_term):
"""Return an Event from an EVENT element in the EKB."""
arg_id, arg_term = self._get_term_by_role(event_term, 'AFFECTED',
False)
if arg_term is None:
return None
# Make an Event statement if it is a standalone event
evidence = self._get_evidence(event_term)
event = self._get_event(arg_term, evidence=[evidence])
if event is None:
return None
event.context = self.get_context(event_term)
return event
def migration_from_event(self, event_term):
"""Return a Migration event from an EVENT element in the EKB."""
# First process at event level
migration_grounding = ('wm/concept/causal_factor/'
'social_and_political/migration')
concept_name = 'migration'
concept_db_refs = {'WM': migration_grounding}
# Get the element's text and use it to construct a Concept
element_text_element = event_term.find('text')
if element_text_element is not None:
element_text = element_text_element.text
concept_db_refs['TEXT'] = element_text
concept_name = sanitize_name(element_text)
concept = Concept(concept_name, db_refs=concept_db_refs)
evidence = self._get_evidence(event_term)
time = self._extract_time(event_term)
# Locations can be at different levels, keep expanding the list
locs = self._get_migration_locations(event_term)
neutral_id, neutral_term = self._get_term_by_role(event_term,
'NEUTRAL',
is_arg=False)
if neutral_term is not None:
locs = self._get_migration_locations(neutral_term, locs, 'origin')
# Arguments can be under AGENT or AFFECTED
agent_arg_id, agent_arg_term = self._get_term_by_role(
event_term, 'AGENT', False)
affected_arg_id, affected_arg_term = self._get_term_by_role(
event_term, 'AFFECTED', False)
if agent_arg_term is None and affected_arg_term is None:
context = MovementContext(locations=locs, time=time)
event = Migration(concept, context=context, evidence=[evidence])
return event
# If there are argument terms, extract more data from them
# Try to get the quantitative state associated with the event
size = None
for arg_term in [agent_arg_term, affected_arg_term]:
if arg_term is not None:
size_arg = arg_term.find('size')
if size_arg is not None and size_arg.attrib.get('id'):
size = self._get_size(size_arg.attrib['id'])
break
# Get more locations from arguments and inevents
if agent_arg_term is not None:
locs = self._get_migration_locations(
agent_arg_term, locs, 'destination')
inevent_term = self._get_inevent_term(agent_arg_term)
if inevent_term is not None:
locs = self._get_migration_locations(inevent_term, locs)
if time is None:
time = self._extract_time(inevent_term)
if size is None:
size = self._get_size_and_entity(inevent_term)
other_event_term = self._get_other_event_term(agent_arg_term)
if other_event_term is not None:
locs = self._get_migration_locations(other_event_term, locs)
if time is None:
time = self._extract_time(other_event_term)
if size is None:
size = self._get_size_and_entity(other_event_term)
if affected_arg_term is not None:
locs = self._get_migration_locations(
affected_arg_term, locs, 'destination')
context = MovementContext(locations=locs, time=time)
event = Migration(
concept, delta=size, context=context, evidence=[evidence])
return event
def _get_inevent_term(self, arg_term):
refset_arg = arg_term.find('refset')
if refset_arg is None:
return None
refset_id = refset_arg.attrib['id']
refset_term = self.tree.find("*[@id='%s']" % refset_id)
if refset_term is None:
return None
features = refset_term.find('features')
if features is None:
return None
inevent = features.find('inevent')
if inevent is None:
return None
inevent_id = inevent.attrib['id']
self.subsumed_events.add(inevent_id)
inevent_term = self.tree.find("*[@id='%s']" % inevent_id)
return inevent_term
def _get_other_event_term(self, arg_term):
refset_arg = arg_term.find('refset')
potential_events = self.tree.findall("EVENT/[type].//arg1/..") + \
self.tree.findall("EVENT/[type].//arg2/..")
for ev in potential_events:
arg1 = ev.find('arg1')
arg2 = ev.find('arg2')
for arg in [arg1, arg2]:
if arg is not None:
if refset_arg is not None:
if arg.attrib.get('id') == refset_arg.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
else:
# Refset might be on a different level
if arg.attrib.get('id'):
term = self.tree.find("*[@id='%s']" % arg.attrib['id'])
arg_refset_arg = term.find('refset')
if arg_refset_arg is not None:
if arg_refset_arg.attrib.get('id') == \
arg_term.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
return None
def _get_migration_locations(self, event_term, existing_locs=None,
default_role='unknown'):
if existing_locs is None:
existing_locs = []
new_locs = []
loc = self._extract_geoloc(event_term, arg_link='location')
if loc is not None:
new_locs.append({'location': loc,
'role': default_role})
loc = self._extract_geoloc(event_term, arg_link='to-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'destination'})
loc = self._extract_geoloc(event_term, arg_link='from-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'origin'})
for loc in new_locs:
if | |
import datetime
import json
import responses
from django.contrib.auth.models import User
from django.core.management import call_command
from django.db.models.signals import post_save
from django.test import TestCase
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from rest_hooks.models import model_saved
from ndoh_hub import utils, utils_tests
from registrations.models import Registration, Source, SubscriptionRequest
from registrations.signals import psh_validate_subscribe
from .models import Change
from .signals import psh_validate_implement
from .tasks import (
remove_personally_identifiable_fields,
restore_personally_identifiable_fields,
validate_implement,
)
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def override_get_today():
return datetime.datetime.strptime("20150817", "%Y%m%d").date()
def mock_get_active_subs_mcpre_mcpost_pmtct_nc(registrant_id):
pmtct_prebirth_sub_id = "subscriptionid-pmtct-prebirth-000000"
momconnect_prebirth_sub_id = "subscriptionid-momconnect-prebirth-0"
nurseconnect_sub_id = "subscriptionid-nurseconnect-00000000"
momconnect_postbirth_sub_id = "subscriptionid-momconnect-postbirth-"
responses.add(
responses.GET,
"http://sbm/api/v1/subscriptions/?active=True&identity={}".format(
registrant_id
),
json={
"next": None,
"previous": None,
"results": [
{ # pmtct_prebirth.patient.1 subscription
"id": pmtct_prebirth_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
pmtct_prebirth_sub_id
),
"messageset": 11,
"next_sequence_number": 11,
"schedule": 101,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{ # momconnect_prebirth.hw_full.1 subscription
"id": momconnect_prebirth_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
momconnect_prebirth_sub_id
),
"messageset": 21,
"next_sequence_number": 21,
"schedule": 121,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{ # nurseconnect.hw_full.1 subscription
"id": nurseconnect_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
nurseconnect_sub_id
),
"messageset": 61,
"next_sequence_number": 6,
"schedule": 161,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{ # momconnect_postbirth.hw_full.1 subscription
"id": momconnect_postbirth_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
momconnect_postbirth_sub_id
),
"messageset": 32,
"next_sequence_number": 32,
"schedule": 132,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
],
},
status=200,
content_type="application/json",
match_querystring=True,
)
return [
pmtct_prebirth_sub_id,
momconnect_prebirth_sub_id,
nurseconnect_sub_id,
momconnect_postbirth_sub_id,
]
def mock_get_active_subs_whatsapp(registrant_id, messagesets):
whatsapp_prebirth_sub_id = "subscriptionid-whatsapp-prebirth-0"
responses.add(
responses.GET,
"http://sbm/api/v1/subscriptions/?active=True&identity={}".format(
registrant_id
),
json={
"next": None,
"previous": None,
"results": [
{ # whatsapp_momconnect_prebirth.hw_full.1 subscription
"id": whatsapp_prebirth_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
whatsapp_prebirth_sub_id
),
"messageset": messageset,
"next_sequence_number": 21,
"schedule": 121,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
for messageset in messagesets
],
},
status=200,
content_type="application/json",
match_querystring=True,
)
return whatsapp_prebirth_sub_id
def mock_get_messagesets(messagesets):
"""
Mocks the request for getting the list of messagesets using responses.
`messagesets` is a list of short names for the messagesets that should be
returned.
"""
response = [
{
"id": i,
"short_name": short_name,
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
for i, short_name in enumerate(messagesets)
]
responses.add(
responses.GET,
"http://sbm/api/v1/messageset/",
status=200,
json={"next": None, "previous": None, "results": response},
content_type="application/json",
)
def mock_get_messageset(messageset_id, short_name):
responses.add(
responses.GET,
"http://sbm/api/v1/messageset/{}/".format(messageset_id),
json={
"id": messageset_id,
"short_name": short_name,
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
status=200,
content_type="application/json",
)
def mock_search_messageset(messageset_id, short_name):
responses.add(
responses.GET,
"http://sbm/api/v1/messageset/?short_name={}".format(short_name),
json={
"next": None,
"previous": None,
"results": [
{
"id": messageset_id,
"short_name": short_name,
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
],
},
status=200,
content_type="application/json",
match_querystring=True,
)
def mock_get_all_messagesets():
responses.add(
responses.GET,
"http://sbm/api/v1/messageset/",
json={
"next": None,
"previous": None,
"results": [
{
"id": 21,
"short_name": "momconnect_prebirth.hw_full.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{
"id": 61,
"short_name": "nurseconnect.hw_full.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{
"id": 62,
"short_name": "nurseconnect_childm.hw_full.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{
"id": 11,
"short_name": "pmtct_prebirth.patient.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{
"id": 98,
"short_name": "whatsapp_momconnect_prebirth.hw_full.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
{
"id": 99,
"short_name": "whatsapp_pmtct_prebirth.hw_full.1",
"content_type": "text",
"notes": "",
"next_set": 7,
"default_schedule": 1,
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
],
},
status=200,
content_type="application/json",
match_querystring=True,
)
def mock_get_subscriptions(querystring=None, results=[]):
"""
Uses responses to mock the request for getting a list of subscriptions.
`querystring` is the querystring to use for filtering
`results` is the list of subscriptions returned in the response
"""
url = "http://sbm/api/v1/subscriptions/{}".format(querystring)
responses.add(
responses.GET,
url,
json={"next": None, "previous": None, "results": results},
status=200,
content_type="application/json",
match_querystring=bool(querystring),
)
def mock_get_active_subs_mc(registrant_id):
momconnect_prebirth_sub_id = "subscriptionid-momconnect-prebirth-0"
mock_get_subscriptions(
"?active=True&identity={}".format(registrant_id),
[
{ # momconnect_prebirth.hw_full.1 subscription
"id": momconnect_prebirth_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(
momconnect_prebirth_sub_id
),
"messageset": 21,
"next_sequence_number": 21,
"schedule": 121,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
],
)
return [momconnect_prebirth_sub_id]
def mock_get_active_subscriptions_none(registrant_id, messageset=None):
qs = "?active=True&identity={}".format(registrant_id)
if messageset is not None:
qs += "&messageset={}".format(messageset)
mock_get_subscriptions(qs)
return []
def mock_update_subscription(subscription_id, identity_id=None):
responses.add(
responses.PATCH,
"http://sbm/api/v1/subscriptions/{}/".format(subscription_id),
json={
"id": subscription_id,
"identity": identity_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(subscription_id),
"messageset": 32,
"next_sequence_number": 32,
"schedule": 132,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
status=200,
content_type="application/json",
)
def mock_get_subscription(subscription_id, identity_id=None):
responses.add(
responses.GET,
"http://sbm/api/v1/subscriptions/{}/".format(subscription_id),
json={
"id": subscription_id,
"identity": identity_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(subscription_id),
"messageset": 32,
"next_sequence_number": 32,
"schedule": 132,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
},
status=200,
content_type="application/json",
)
def mock_get_active_nurseconnect_subscriptions(registrant_id):
nurseconnect_sub_id = "subscriptionid-nurseconnect-00000000"
mock_get_subscriptions(
"?active=True&messageset=61&identity={}".format(registrant_id),
[
{ # nurseconnect.hw_full.1 subscription
"id": nurseconnect_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(nurseconnect_sub_id),
"messageset": 61,
"next_sequence_number": 11,
"schedule": 161,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
],
)
return [nurseconnect_sub_id]
def mock_get_active_nurseconnect_childm_subscriptions(registrant_id):
nurseconnect_sub_id = "subscriptionid-nurseconnect-00000000"
mock_get_subscriptions(
"?active=True&messageset=62&identity={}".format(registrant_id),
[
{ # nurseconnect.hw_full.1 subscription
"id": nurseconnect_sub_id,
"identity": registrant_id,
"active": True,
"completed": False,
"lang": "eng_ZA",
"url": "http://sbm/api/v1/subscriptions/{}".format(nurseconnect_sub_id),
"messageset": 62,
"next_sequence_number": 11,
"schedule": 161,
"process_status": 0,
"version": 1,
"metadata": {},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693272Z",
}
],
)
return [nurseconnect_sub_id]
def mock_deactivate_subscriptions(subscription_ids):
for subscription_id in subscription_ids:
responses.add(
responses.PATCH,
"http://sbm/api/v1/subscriptions/{}/".format(subscription_id),
json={"active": False},
status=200,
content_type="application/json",
)
return
class APITestCase(TestCase):
def setUp(self):
self.adminclient = APIClient()
self.normalclient = APIClient()
self.otherclient = APIClient()
utils.get_today = override_get_today
class AuthenticatedAPITestCase(APITestCase):
def _replace_post_save_hooks_change(self):
def has_listeners():
return post_save.has_listeners(Change)
assert has_listeners(), (
"Change model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests."
)
post_save.disconnect(receiver=psh_validate_implement, sender=Change)
post_save.disconnect(receiver=model_saved, dispatch_uid="instance-saved-hook")
assert not has_listeners(), (
"Change model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests."
)
def _restore_post_save_hooks_change(self):
def has_listeners():
return post_save.has_listeners(Change)
assert not has_listeners(), (
"Change model still has post_save listeners. Make sure"
" helpers removed them properly in earlier tests."
)
post_save.connect(psh_validate_implement, sender=Change)
def _replace_post_save_hooks_registration(self):
def has_listeners():
return post_save.has_listeners(Registration)
assert has_listeners(), (
"Registration model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests."
)
post_save.disconnect(
receiver=psh_validate_subscribe,
sender=Registration,
dispatch_uid="psh_validate_subscribe",
)
post_save.disconnect(receiver=model_saved, dispatch_uid="instance-saved-hook")
assert not has_listeners(), (
"Registration model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests."
)
def _restore_post_save_hooks_registration(self):
def has_listeners():
return post_save.has_listeners(Registration)
assert not has_listeners(), (
"Registration model still has post_save listeners. Make sure"
" helpers removed them properly in earlier tests."
)
post_save.connect(
psh_validate_subscribe,
sender=Registration,
dispatch_uid="psh_validate_subscribe",
)
def make_source_adminuser(self):
data = {
"name": "test_source_adminuser",
"authority": "hw_full",
"user": User.objects.get(username="testadminuser"),
}
return Source.objects.create(**data)
def make_source_normaluser(self):
data = {
"name": "test_source_normaluser",
"authority": "patient",
"user": User.objects.get(username="testnormaluser"),
}
return Source.objects.create(**data)
def make_change_adminuser(self):
data = {
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"action": "pmtct_loss_switch",
"data": {"test_adminuser_change": "test_adminuser_changed"},
"source": self.make_source_adminuser(),
}
return Change.objects.create(**data)
def make_change_normaluser(self):
data = {
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"action": "pmtct_loss_switch",
"data": {"test_normaluser_change": "test_normaluser_changed"},
"source": self.make_source_normaluser(),
}
return Change.objects.create(**data)
def make_registration_pmtct_prebirth(self):
registration_data = {
"reg_type": "pmtct_prebirth",
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_normaluser(),
"data": {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"language": "eng_ZA",
"mom_dob": "1999-01-27",
"edd": "2016-11-30",
},
}
return Registration.objects.create(**registration_data)
def make_registration_pmtct_postbirth(self):
registration_data = {
"reg_type": "pmtct_postbirth",
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_normaluser(),
"data": {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"language": "eng_ZA",
"mom_dob": "1999-01-27",
"baby_dob": "2016-01-01",
},
}
return Registration.objects.create(**registration_data)
def make_registration_nurseconnect(self, anonymised=False):
if anonymised:
data = {
"operator_id": "nurse001-63e2-4acc-9b94-26663b9bc267",
"uuid_registrant": "nurse001-63e2-4acc-9b94-26663b9bc267",
"uuid_device": "nurse001-63e2-4acc-9b94-26663b9bc267",
"faccode": "123456",
}
else:
data = {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"msisdn_registrant": "+27821112222",
"msisdn_device": "+27821112222",
"faccode": "123456",
}
registration_data = {
"reg_type": "nurseconnect",
"registrant_id": "nurse001-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_adminuser(),
"data": data,
}
return Registration.objects.create(**registration_data)
def make_registration_momconnect_prebirth(self):
registration_data = {
"reg_type": "pmtct_prebirth",
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_normaluser(),
"data": {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"language": "eng_ZA",
"mom_dob": "1999-01-27",
"edd": "2016-11-30",
},
}
return Registration.objects.create(**registration_data)
def make_registration_whatsapp_pmtct_prebirth(self):
registration_data = {
"reg_type": "whatsapp_pmtct_prebirth",
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_normaluser(),
"data": {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"language": "eng_ZA",
"mom_dob": "1999-01-27",
"edd": "2016-11-30",
},
}
return Registration.objects.create(**registration_data)
def make_registration_whatsapp_prebirth(self):
registration_data = {
"reg_type": "whatsapp_prebirth",
"registrant_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"source": self.make_source_normaluser(),
"data": {
"operator_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"language": "eng_ZA",
"mom_dob": "1999-01-27",
"edd": "2016-11-30",
},
}
return Registration.objects.create(**registration_data)
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
self._replace_post_save_hooks_change()
self._replace_post_save_hooks_registration()
# Normal User setup
self.normalusername = "testnormaluser"
self.normalpassword = "<PASSWORD>"
| |
the motivation for having it on Alignment __init__
is that it's easy for users to construct Alignment objects directly.
Parameters:
data: Data to convert into a SequenceCollection
Names: Order of Names in the alignment. Should match the
names of the sequences (after processing by
label_to_name if present).
Alphabet: Alphabet to use for the alignment (primarily important
for DenseAlignment)
MolType: MolType to be applied to the Alignment and to each seq.
Name: Name of the SequenceCollection.
Info: Info object to be attached to the alignment itself.
conversion_f: Function to convert string into sequence.
is_array: True if input is an array, False otherwise.
force_same_data: True if data will be used as the same object.
remove_duplicate_names: True if duplicate names are to be silently
deleted instead of raising errors.
label_to_name: if present, converts name into f(name).
"""
#read all the data in if we were passed a generator
if isinstance(data, GeneratorType):
data = list(data)
#set the Name
self.Name = Name
#figure out alphabet and moltype
self.Alphabet, self.MolType = \
self._get_alphabet_and_moltype(Alphabet, MolType, data)
if not isinstance(Info, InfoClass):
if Info:
Info = InfoClass(Info)
else:
Info = InfoClass()
self.Info = Info
#if we're forcing the same data, skip the validation
if force_same_data:
self._force_same_data(data, Names)
curr_seqs = data
#otherwise, figure out what we got and coerce it into the right type
else:
per_seq_names, curr_seqs, name_order = \
self._names_seqs_order(conversion_f, data, Names, is_array, \
label_to_name, remove_duplicate_names, \
Alphabet=self.Alphabet)
self.Names = name_order
#will take only the seqs and names that are in name_order
if per_seq_names != name_order:
good_indices = []
for n in name_order:
good_indices.append(per_seq_names.index(n))
if hasattr(curr_seqs, 'astype'): #it's an array
#much faster to check than to raise exception in this case
curr_seqs = take(curr_seqs, good_indices, axis=0)
else:
curr_seqs = [curr_seqs[i] for i in good_indices]
per_seq_names = name_order
#create NamedSeqs dict for fast lookups
if not suppress_named_seqs:
self.NamedSeqs = self._make_named_seqs(self.Names, curr_seqs)
#Sequence objects behave like sequences of chars, so no difference
#between Seqs and SeqData. Note that this differs for Alignments,
#so be careful which you use if writing methods that should work for
#both SequenceCollections and Alignments.
self._set_additional_attributes(curr_seqs)
def __str__(self):
"""Returns self in FASTA-format, respecting name order."""
return ''.join(['>%s\n%s\n' % (name, self.getGappedSeq(name))
for name in self.Names])
def _make_named_seqs(self, names, seqs):
"""Returns NamedSeqs: dict of name:seq."""
name_seq_tuples = zip(names, seqs)
for n, s in name_seq_tuples:
s.Name = n
return dict(name_seq_tuples)
def _set_additional_attributes(self, curr_seqs):
"""Sets additional attributes based on current seqs: class-specific."""
self.SeqData = curr_seqs
self._seqs = curr_seqs
try:
self.SeqLen = max(map(len, curr_seqs))
except ValueError: #got empty sequence, for some reason?
self.SeqLen = 0
def _force_same_data(self, data, Names):
"""Forces dict that was passed in to be used as self.NamedSeqs"""
self.NamedSeqs = data
self.Names = Names or data.keys()
def copy(self):
"""Returns deep copy of self."""
result = self.__class__(self, MolType=self.MolType, Info=self.Info)
return result
def _get_alphabet_and_moltype(self, Alphabet, MolType, data):
"""Returns Alphabet and MolType, giving MolType precedence."""
if Alphabet is None and MolType is None:
if hasattr(data, 'MolType'):
MolType = data.MolType
elif hasattr(data, 'Alphabet'):
Alphabet = data.Alphabet
#check for containers
else:
curr_item = self._get_container_item(data)
if hasattr(curr_item, 'MolType'):
MolType = curr_item.MolType
elif hasattr(curr_item, 'Alphabet'):
Alphabet = curr_item.Alphabet
else:
MolType = self.MolType #will be BYTES by default
if Alphabet is not None and MolType is None:
MolType = Alphabet.MolType
if MolType is not None and Alphabet is None:
try:
Alphabet = MolType.Alphabets.DegenGapped
except AttributeError:
Alphabet = MolType.Alphabet
return Alphabet, MolType
def _get_container_item(self, data):
"""Checks container for item with Alphabet or MolType"""
curr_item = None
if hasattr(data, 'itervalues'):
curr_item = data.itervalues().next()
else:
try:
curr_item = iter(data).next()
except:
pass
return curr_item
def _strip_duplicates(self, names, seqs):
"""Internal function to strip duplicates from list of names"""
if len(set(names)) == len(names):
return set(), names, seqs
#if we got here, there are duplicates
unique_names = {}
duplicates = {}
fixed_names = []
fixed_seqs = []
for n, s in zip(names, seqs):
if n in unique_names:
duplicates[n] = 1
else:
unique_names[n] = 1
fixed_names.append(n)
fixed_seqs.append(s)
if type(seqs) is ndarray:
fixed_seqs = array(fixed_seqs, seqs.dtype)
return duplicates, fixed_names, fixed_seqs
def _names_seqs_order(self, conversion_f, data, Names, is_array, \
label_to_name, remove_duplicate_names, Alphabet=None):
"""Internal function to figure out names, seqs, and name_order."""
#figure out conversion function and whether it's an array
if not conversion_f:
input_type = self._guess_input_type(data)
is_array = input_type in self.IsArray
conversion_f = self.InputHandlers[input_type]
#set seqs and names as properties
if Alphabet:
seqs, names = conversion_f(data, Alphabet=Alphabet)
else:
seqs, names = conversion_f(data)
if names and label_to_name:
names = map(label_to_name, names)
curr_seqs = self._coerce_seqs(seqs, is_array)
#if no names were passed in as Names, if we obtained them from
#the seqs we should use them, but otherwise we should use the
#default names
if Names is None:
if (names is None) or (None in names):
per_seq_names = name_order = \
self.DefaultNameFunction(len(curr_seqs))
else: #got names from seqs
per_seq_names = name_order = names
else:
#otherwise, names were passed in as Names: use this as the order
#if we got names from the sequences, but otherwise assign the
#names to successive sequences in order
if (names is None) or (None in names):
per_seq_names = name_order = Names
else: #got names from seqs, so assume name_order is in Names
per_seq_names = names
name_order = Names
#check for duplicate names
duplicates, fixed_names, fixed_seqs = \
self._strip_duplicates(per_seq_names, curr_seqs)
if duplicates:
if remove_duplicate_names:
per_seq_names, curr_seqs = fixed_names, fixed_seqs
#if name_order doesn't have the same names as per_seq_names,
#replace it with per_seq_names
if (set(name_order) != set(per_seq_names)) or\
(len(name_order) != len(per_seq_names)):
name_order = per_seq_names
else:
raise ValueError, \
"Some names were not unique. Duplicates are:\n" + \
str(sorted(duplicates.keys()))
return per_seq_names, curr_seqs, name_order
def _coerce_seqs(self, seqs, is_array):
"""Controls how seqs are coerced in _names_seqs_order.
Override in subclasses where this behavior should differ.
"""
if is_array:
seqs = map(str, map(self.MolType.ModelSeq, seqs))
return map(self.MolType.Sequence, seqs)
def _guess_input_type(self, data):
"""Guesses input type of data; returns result as key of InputHandlers.
First checks whether data is an Alignment, then checks for some common
string formats, then tries to do it based on string or array properties.
Returns 'empty' if check fails, i.e. if it can't recognize the sequence
as a specific type. Note that bad sequences are not guaranteed to
return 'empty', and may be recognized as another type incorrectly.
"""
if isinstance(data, DenseAlignment):
return 'dense_aln'
if isinstance(data, Alignment):
return 'aln'
if isinstance(data, SequenceCollection):
return 'collection'
if isinstance(data, dict):
return 'dict'
if isinstance(data, str):
if data.startswith('>'):
return 'fasta'
else:
return 'generic'
first = None
try:
first = data[0]
except (IndexError, TypeError):
pass
try:
first = iter(data).next()
except (IndexError, TypeError, StopIteration):
pass
if first is None:
return 'empty'
try:
if isinstance(first, ModelSequence): #model sequence base type
return 'model_seqs'
elif hasattr(first, 'dtype'): #array object
return 'array'
elif isinstance(first, str) and first.startswith('>'):
return 'fasta'
else:
try:
dict(data)
return 'kv_pairs'
except (TypeError, ValueError):
pass
return 'generic'
except (IndexError, TypeError), e:
return 'empty'
def __cmp__(self, other):
"""cmp first tests as dict, then as str."""
c = cmp(self.NamedSeqs, other)
if not c:
return 0
else:
return cmp(str(self), str(other))
def keys(self):
"""keys uses self.Names, which defaults to known keys if None.
Note: returns copy, not original.
"""
return self.Names[:]
def values(self):
"""values returns values corresponding to self.Names."""
return [self.NamedSeqs[n] for n in self.Names]
def items(self):
"""items returns (name, value) pairs."""
return [(n, self.NamedSeqs[n]) for n in self.Names]
def iterSeqs(self, seq_order=None):
"""Iterates over values (sequences) in the alignment, in order.
seq_order: list of keys giving the order in which seqs will be returned.
Defaults to self.Names. Note that only these sequences will be
returned, and that KeyError will be raised if there are sequences
in order that have been deleted from the Alignment. If self.Names
is None, returns the sequences in the same order as
self.NamedSeqs.values().
Use map(f, self.seqs()) to apply the constructor f to each seq. f | |
<filename>ai4good/models/abm/np_impl/model.py<gh_stars>10-100
import random
import numpy as np
import numba as nb
from ai4good.models.abm.np_impl.parameters import Parameters
from ai4good.utils.logger_util import get_logger
logger = get_logger(__name__)
# very small float number to account for floating precision loss
SMALL_ERROR = 0.0000001
FEMALE = 0
MALE = 1
# Individuals remain in the symptomatic or 1st asymptomatic states for 5 days, and are infectious during this period.
# This period of "5 days" as defined by <NAME> is parameterized in `SYMP_PERIOD`
SYMP_PERIOD = 5
# Disease states of the agents
INF_SUSCEPTIBLE = 0 # Agent is Ok
INF_EXPOSED = 1 # Agent got in contact with another infected agent and now has virus inside them
INF_PRESYMPTOMATIC = 2 # Agent is infected but is not showing symptoms yet
INF_SYMPTOMATIC = 3 # Agent is infected and showing early symptoms
INF_MILD = 4 # Agent is infected and showing mild severity
INF_SEVERE = 5 # Agent is infected and has severe condition
INF_ASYMPTOMATIC1 = 6 # Agent is infected but not showing symptoms (first phase)
INF_ASYMPTOMATIC2 = 7 # Agent is infected but not showing symptoms (second phase)
INF_RECOVERED = 8 # Agent has recovered or died due to infection. Recovered agents will not contract infection again
INF_DECEASED = 9 # Agent has passed away due to infection severity
# Activity routes for the agents
ACTIVITY_HOUSEHOLD = 0 # Agent is inside their household
ACTIVITY_WANDERING = 1 # Agent is wandering outside their household
ACTIVITY_TOILET = 2 # Agent is in the toilet queue
ACTIVITY_FOOD_LINE = 3 # Agent is in the food line queue
ACTIVITY_QUARANTINED = 4 # Agent is under quarantine. Infection spread in quarantine is similar to that of household
ACTIVITY_HOSPITALIZED = 5 # Agent is hospitalized. Infection spread is not modelled inside hospitals
# Number of features of each agent
A_FEATURES = 16
# Features of each agent
A_X = 0 # x co-ordinate at any given point in time
A_Y = 1 # y co-ordinate at any given point in time
A_AGE = 2 # Age of the agent
A_GENDER = 3 # Gender of the agent
A_DISEASE = 4 # Disease state of the agent
A_INCUBATION_PERIOD = 5 # Incubation period of the agent
A_HOME_RANGE = 6 # Home range of the agent around their household
A_ETHNICITY = 7 # Ethnicity of the agent
A_HOUSEHOLD_X = 8 # x co-ordinate of the household where agent lives
A_HOUSEHOLD_Y = 9 # y co-ordinate of the household where agent lives
A_TOILET = 10 # Id of the toilet closest to the agent's household
A_FOOD_LINE = 11 # Id of the food line closest to the agent's household
A_ACTIVITY = 12 # Current activity route of the agent such as wandering, inside household, in toilet, etc.
# Number of days in the current disease state. After reaching a new disease state, this counter is reset to 0.
# Exception: counter does not reset when disease state reaches `INF_EXPOSED` in order to track incubation period.
A_DAY_COUNTER = 13
# Flag 0/1 to check if agent is asymptomatic
# All children under the age of 16 become asymptomatic (ref), and others become asymptomatic with probability 0.178
# (Mizumoto et al. 2020)
A_IS_ASYMPTOMATIC = 14
# The activity of agent before they went to a queue. Agents will be sent back to this activity after dequeue
A_ACTIVITY_BEFORE_QUEUE = 15
class OptimizedOps(object):
"""
Helper class with numba optimized static methods
"""
def __init__(self):
pass
@staticmethod
@nb.njit
def position_blocks(grid_size: int, camp_size: float) -> np.array:
"""
Uniform placement of blocks (typically food line or toilet) in the camp.
Parameters
----------
grid_size: Size of the square grid where placement of food line/toilet happens
camp_size: Side length of the square sized camp
Returns
-------
out: An array of length grid_size * grid_size (x, y) co-ordinates of the blocks
"""
# since the placement will be uniform and equidistant, there will be a fixed distance between two blocks along
# an axis. We call this distance as step
step = camp_size / grid_size
# bottom left position of the first block. This serves as both the x and y co-ordinate since camp is a square
pos0 = step / 2.0
# output position matrix
out = np.zeros(shape=(grid_size * grid_size, 2))
k = 0 # counter for out array
for i in range(grid_size): # along x-axis
for j in range(grid_size): # along y-axis
# new position calculated by moving `step` distance along each axis
out[k, :] = [pos0 + i * step, pos0 + j * step]
k += 1 # increment counter
# return the co-ordinates array
return out
@staticmethod
@nb.njit(fastmath=True)
def find_nearest(pos, others, condn=None):
# Find and return the index of the entity nearest to subject positioned at `pos`
# The co-ordinates of the entities are defined in `others` array (?, 2)
# Additionally, an optional `condn` boolean array can be used to filter `others`
d_min = 10000000000.0 # a large number in terms of distance
d_min_index = -1 # index in `others` which is nearest to the subject positioned at `pos`
# number of entities around subject positioned at `pos`
n = others.shape[0]
for i in range(n): # iterate all entities in `others` array
# distance between entity `i` and subject
dij = (others[i, 0] - pos[0]) ** 2 + (others[i, 1] - pos[1]) ** 2
# dij = dij ** 0.5 : this step is not needed since relative distance is needed
# update nearest entity based on distance
if dij < d_min and (condn is None or condn[i] == 1):
d_min = dij
d_min_index = i
# return index of the nearest entity and the nearest distance associated with that entity
return d_min_index, d_min
@staticmethod
@nb.njit
def showing_symptoms(disease_state):
# return boolean array with True if agents are showing symptoms else False
n = disease_state.shape[0]
out = np.zeros_like(disease_state, dtype=np.int32)
for i in range(n):
out[i] = disease_state[i] in (INF_SYMPTOMATIC, INF_MILD, INF_SEVERE)
return out
@staticmethod
@nb.njit
def is_infected(disease_states):
# return boolean array denoting if agents are infected or not
n = disease_states.shape[0]
out = np.zeros_like(disease_states, dtype=np.int32)
for i in range(n):
out[i] = disease_states[i] not in (INF_SUSCEPTIBLE, INF_EXPOSED, INF_RECOVERED, INF_DECEASED)
return out
class Camp:
"""
Base class for camp operations.
"""
def __init__(self, params: Parameters, camp_size: float):
self.camp_size = camp_size
self.params = params
self.num_people = params.num_people
self.agents: np.array = None
self.toilet_queue = {}
self.food_line_queue = {}
def set_agents(self, agents: np.array) -> None:
self.agents = agents
@staticmethod
@nb.njit(parallel=True)
def simulate_households(agents: np.array, prob_spread: float, activity: int) -> (np.array, int):
"""
Function to send people to household and simulate infection dynamics in those households.
This function is optimized using numba.
Parameters
----------
agents: A Numpy array containing data of agents who will go inside their households at current simulation step
prob_spread: The probability of infection transmission if a susceptible and infectious agent interact in house
activity: The activity inside the household. It can be either ACTIVITY_HOUSEHOLD (normal household visit) or
ACTIVITY_QUARANTINED (for when under quarantine)
Returns
-------
out: Updated agents array and the number of new infections
"""
n = agents.shape[0] # number of agents inside their households
num_new_infections = 0 # number of new infections caused by household interactions
# Loop through each pair (i,j) of agents who are inside their households.
# If agent i and agent j shares household AND agent i is susceptible AND agent j is infectious
# Then, infection can spread from agent j to agent i with some probability
for i in nb.prange(n):
# Update current activity route
agents[i, A_ACTIVITY] = activity
# Update current location of agent to household location
agents[i, A_X] = agents[i, A_HOUSEHOLD_X]
agents[i, A_Y] = agents[i, A_HOUSEHOLD_Y]
# Agent i will be infected iff he/she is currently susceptible
if agents[i, A_DISEASE] != INF_SUSCEPTIBLE:
# Skip if agent i is not susceptible
continue
num_infectious_hh = 0 # number of infectious households of agent i
for j in nb.prange(n):
# Agent j will infect agent i if agent j is infectious
if agents[j, A_DISEASE] in (INF_SUSCEPTIBLE, INF_EXPOSED, INF_RECOVERED, INF_DECEASED):
# Skip if agent j is not infectious
continue
# Distance between households of i and j
dij = (agents[i, A_HOUSEHOLD_X] - agents[j, A_HOUSEHOLD_X]) ** 2 | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import sys
import time
import re
import tensorflow as tf
from vocab_utils import Vocab
from SentenceMatchDataStream import SentenceMatchDataStream
from SentenceMatchModelGraph import SentenceMatchModelGraph
import namespace_utils
FLAGS = None
def collect_vocabs(train_path, with_POS=False, with_NER=False):
all_labels = set()
all_words = set()
all_POSs = None
all_NERs = None
if with_POS: all_POSs = set()
if with_NER: all_NERs = set()
infile = open(train_path, 'rt', encoding="utf8")
for line in infile:
line = line.strip()
if line.startswith('-'): continue
items = re.split("\t", line)
label = items[0]
sentence1 = re.split("\\s+",items[1].lower())
sentence2 = re.split("\\s+",items[2].lower())
all_labels.add(label)
all_words.update(sentence1)
all_words.update(sentence2)
if with_POS:
all_POSs.update(re.split("\\s+",items[3]))
all_POSs.update(re.split("\\s+",items[4]))
if with_NER:
all_NERs.update(re.split("\\s+",items[5]))
all_NERs.update(re.split("\\s+",items[6]))
infile.close()
all_chars = set()
for word in all_words:
for char in word:
all_chars.add(char)
return (all_words, all_chars, all_labels, all_POSs, all_NERs)
def evaluate(dataStream, valid_graph, sess, outpath=None, label_vocab=None, mode='prediction',char_vocab=None, POS_vocab=None, NER_vocab=None):
if outpath is not None: outfile = open(outpath, 'wt')
total_tags = 0.0
correct_tags = 0.0
dataStream.reset()
for batch_index in range(dataStream.get_num_batch()):
cur_dev_batch = dataStream.get_batch(batch_index)
(label_batch, sent1_batch, sent2_batch, label_id_batch, word_idx_1_batch, word_idx_2_batch,
char_matrix_idx_1_batch, char_matrix_idx_2_batch, sent1_length_batch, sent2_length_batch,
sent1_char_length_batch, sent2_char_length_batch,
POS_idx_1_batch, POS_idx_2_batch, NER_idx_1_batch, NER_idx_2_batch) = cur_dev_batch
feed_dict = {
valid_graph.get_truth(): label_id_batch,
valid_graph.get_question_lengths(): sent1_length_batch,
valid_graph.get_passage_lengths(): sent2_length_batch,
valid_graph.get_in_question_words(): word_idx_1_batch,
valid_graph.get_in_passage_words(): word_idx_2_batch,
# valid_graph.get_question_char_lengths(): sent1_char_length_batch,
# valid_graph.get_passage_char_lengths(): sent2_char_length_batch,
# valid_graph.get_in_question_chars(): char_matrix_idx_1_batch,
# valid_graph.get_in_passage_chars(): char_matrix_idx_2_batch,
}
if char_vocab is not None:
feed_dict[valid_graph.get_question_char_lengths()] = sent1_char_length_batch
feed_dict[valid_graph.get_passage_char_lengths()] = sent2_char_length_batch
feed_dict[valid_graph.get_in_question_chars()] = char_matrix_idx_1_batch
feed_dict[valid_graph.get_in_passage_chars()] = char_matrix_idx_2_batch
if POS_vocab is not None:
feed_dict[valid_graph.get_in_question_poss()] = POS_idx_1_batch
feed_dict[valid_graph.get_in_passage_poss()] = POS_idx_2_batch
if NER_vocab is not None:
feed_dict[valid_graph.get_in_question_ners()] = NER_idx_1_batch
feed_dict[valid_graph.get_in_passage_ners()] = NER_idx_2_batch
total_tags += len(label_batch)
correct_tags += sess.run(valid_graph.get_eval_correct(), feed_dict=feed_dict)
if outpath is not None:
if mode =='prediction':
predictions = sess.run(valid_graph.get_predictions(), feed_dict=feed_dict)
for i in range(len(label_batch)):
outline = label_batch[i] + "\t" + label_vocab.getWord(predictions[i]) + "\t" + sent1_batch[i] + "\t" + sent2_batch[i] + "\n"
outfile.write(outline.encode('utf-8'))
else:
probs = sess.run(valid_graph.get_prob(), feed_dict=feed_dict)
for i in range(len(label_batch)):
outfile.write(label_batch[i] + "\t" + output_probs(probs[i], label_vocab) + "\n")
if outpath is not None: outfile.close()
try:
accuracy = correct_tags / total_tags * 100
except:
accuracy = 0
pass
return accuracy
def output_probs(probs, label_vocab):
out_string = ""
for i in range(probs.size):
out_string += " {}:{}".format(label_vocab.getWord(i), probs[i])
return out_string.strip()
def main(_):
print('Configurations:')
print(FLAGS)
train_path = FLAGS.train_path
dev_path = FLAGS.dev_path
test_path = FLAGS.test_path
word_vec_path = FLAGS.word_vec_path
log_dir = FLAGS.model_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
path_prefix = log_dir + "/SentenceMatch.{}".format(FLAGS.suffix)
namespace_utils.save_namespace(FLAGS, path_prefix + ".config.json")
# build vocabs
word_vocab = Vocab(word_vec_path, fileformat='txt3')
best_path = path_prefix + '.best.model'
char_path = path_prefix + ".char_vocab"
label_path = path_prefix + ".label_vocab"
POS_path = path_prefix + ".POS_vocab"
NER_path = path_prefix + ".NER_vocab"
has_pre_trained_model = False
POS_vocab = None
NER_vocab = None
if os.path.exists(best_path):
has_pre_trained_model = True
label_vocab = Vocab(label_path, fileformat='txt2')
char_vocab = Vocab(char_path, fileformat='txt2')
if FLAGS.with_POS: POS_vocab = Vocab(POS_path, fileformat='txt2')
if FLAGS.with_NER: NER_vocab = Vocab(NER_path, fileformat='txt2')
else:
print('Collect words, chars and labels ...')
(all_words, all_chars, all_labels, all_POSs, all_NERs) = collect_vocabs(train_path, with_POS=FLAGS.with_POS, with_NER=FLAGS.with_NER)
print('Number of words: {}'.format(len(all_words)))
print('Number of labels: {}'.format(len(all_labels)))
label_vocab = Vocab(fileformat='voc', voc=all_labels,dim=2)
label_vocab.dump_to_txt2(label_path)
print('Number of chars: {}'.format(len(all_chars)))
char_vocab = Vocab(fileformat='voc', voc=all_chars,dim=FLAGS.char_emb_dim)
char_vocab.dump_to_txt2(char_path)
if FLAGS.with_POS:
print('Number of POSs: {}'.format(len(all_POSs)))
POS_vocab = Vocab(fileformat='voc', voc=all_POSs,dim=FLAGS.POS_dim)
POS_vocab.dump_to_txt2(POS_path)
if FLAGS.with_NER:
print('Number of NERs: {}'.format(len(all_NERs)))
NER_vocab = Vocab(fileformat='voc', voc=all_NERs,dim=FLAGS.NER_dim)
NER_vocab.dump_to_txt2(NER_path)
print('word_vocab shape is {}'.format(word_vocab.word_vecs.shape))
print('tag_vocab shape is {}'.format(label_vocab.word_vecs.shape))
num_classes = label_vocab.size()
print('Build SentenceMatchDataStream ... ')
trainDataStream = SentenceMatchDataStream(train_path, word_vocab=word_vocab, char_vocab=char_vocab,
POS_vocab=POS_vocab, NER_vocab=NER_vocab, label_vocab=label_vocab,
batch_size=FLAGS.batch_size, isShuffle=True, isLoop=True, isSort=True,
max_char_per_word=FLAGS.max_char_per_word, max_sent_length=FLAGS.max_sent_length)
devDataStream = SentenceMatchDataStream(dev_path, word_vocab=word_vocab, char_vocab=char_vocab,
POS_vocab=POS_vocab, NER_vocab=NER_vocab, label_vocab=label_vocab,
batch_size=FLAGS.batch_size, isShuffle=False, isLoop=True, isSort=True,
max_char_per_word=FLAGS.max_char_per_word, max_sent_length=FLAGS.max_sent_length)
testDataStream = SentenceMatchDataStream(test_path, word_vocab=word_vocab, char_vocab=char_vocab,
POS_vocab=POS_vocab, NER_vocab=NER_vocab, label_vocab=label_vocab,
batch_size=FLAGS.batch_size, isShuffle=False, isLoop=True, isSort=True,
max_char_per_word=FLAGS.max_char_per_word, max_sent_length=FLAGS.max_sent_length)
print('Number of instances in trainDataStream: {}'.format(trainDataStream.get_num_instance()))
print('Number of instances in devDataStream: {}'.format(devDataStream.get_num_instance()))
print('Number of instances in testDataStream: {}'.format(testDataStream.get_num_instance()))
print('Number of batches in trainDataStream: {}'.format(trainDataStream.get_num_batch()))
print('Number of batches in devDataStream: {}'.format(devDataStream.get_num_batch()))
print('Number of batches in testDataStream: {}'.format(testDataStream.get_num_batch()))
sys.stdout.flush()
if FLAGS.wo_char: char_vocab = None
best_accuracy = 0.0
init_scale = 0.01
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
# with tf.name_scope("Train"):
with tf.variable_scope("Model", reuse=None, initializer=initializer):
train_graph = SentenceMatchModelGraph(num_classes, word_vocab=word_vocab, char_vocab=char_vocab,POS_vocab=POS_vocab, NER_vocab=NER_vocab,
dropout_rate=FLAGS.dropout_rate, learning_rate=FLAGS.learning_rate, optimize_type=FLAGS.optimize_type,
lambda_l2=FLAGS.lambda_l2, char_lstm_dim=FLAGS.char_lstm_dim, context_lstm_dim=FLAGS.context_lstm_dim,
aggregation_lstm_dim=FLAGS.aggregation_lstm_dim, is_training=True, MP_dim=FLAGS.MP_dim,
context_layer_num=FLAGS.context_layer_num, aggregation_layer_num=FLAGS.aggregation_layer_num,
fix_word_vec=FLAGS.fix_word_vec,with_filter_layer=FLAGS.with_filter_layer, with_highway=FLAGS.with_highway,
word_level_MP_dim=FLAGS.word_level_MP_dim,
with_match_highway=FLAGS.with_match_highway, with_aggregation_highway=FLAGS.with_aggregation_highway,
highway_layer_num=FLAGS.highway_layer_num,with_lex_decomposition=FLAGS.with_lex_decomposition,
lex_decompsition_dim=FLAGS.lex_decompsition_dim,
with_left_match=(not FLAGS.wo_left_match), with_right_match=(not FLAGS.wo_right_match),
with_full_match=(not FLAGS.wo_full_match), with_maxpool_match=(not FLAGS.wo_maxpool_match),
with_attentive_match=(not FLAGS.wo_attentive_match), with_max_attentive_match=(not FLAGS.wo_max_attentive_match))
tf.summary.scalar("Training Loss", train_graph.get_loss()) # Add a scalar summary for the snapshot loss.
# with tf.name_scope("Valid"):
with tf.variable_scope("Model", reuse=True, initializer=initializer):
valid_graph = SentenceMatchModelGraph(num_classes, word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab, NER_vocab=NER_vocab,
dropout_rate=FLAGS.dropout_rate, learning_rate=FLAGS.learning_rate, optimize_type=FLAGS.optimize_type,
lambda_l2=FLAGS.lambda_l2, char_lstm_dim=FLAGS.char_lstm_dim, context_lstm_dim=FLAGS.context_lstm_dim,
aggregation_lstm_dim=FLAGS.aggregation_lstm_dim, is_training=False, MP_dim=FLAGS.MP_dim,
context_layer_num=FLAGS.context_layer_num, aggregation_layer_num=FLAGS.aggregation_layer_num,
fix_word_vec=FLAGS.fix_word_vec,with_filter_layer=FLAGS.with_filter_layer, with_highway=FLAGS.with_highway,
word_level_MP_dim=FLAGS.word_level_MP_dim,
with_match_highway=FLAGS.with_match_highway, with_aggregation_highway=FLAGS.with_aggregation_highway,
highway_layer_num=FLAGS.highway_layer_num, with_lex_decomposition=FLAGS.with_lex_decomposition,
lex_decompsition_dim=FLAGS.lex_decompsition_dim,
with_left_match=(not FLAGS.wo_left_match), with_right_match=(not FLAGS.wo_right_match),
with_full_match=(not FLAGS.wo_full_match), with_maxpool_match=(not FLAGS.wo_maxpool_match),
with_attentive_match=(not FLAGS.wo_attentive_match), with_max_attentive_match=(not FLAGS.wo_max_attentive_match))
initializer = tf.global_variables_initializer()
vars_ = {}
for var in tf.global_variables():
if "word_embedding" in var.name: continue
# if not var.name.startswith("Model"): continue
vars_[var.name.split(":")[0]] = var
saver = tf.train.Saver(vars_)
sess = tf.Session()
sess.run(initializer)
if has_pre_trained_model:
print("Restoring model from " + best_path)
saver.restore(sess, best_path)
print("DONE!")
print('Start the training loop.')
train_size = trainDataStream.get_num_batch()
max_steps = train_size * FLAGS.max_epochs
total_loss = 0.0
start_time = time.time()
for step in range(max_steps):
# read data
cur_batch = trainDataStream.nextBatch()
(label_batch, sent1_batch, sent2_batch, label_id_batch, word_idx_1_batch, word_idx_2_batch,
char_matrix_idx_1_batch, char_matrix_idx_2_batch, sent1_length_batch, sent2_length_batch,
sent1_char_length_batch, sent2_char_length_batch,
POS_idx_1_batch, POS_idx_2_batch, NER_idx_1_batch, NER_idx_2_batch) = cur_batch
feed_dict = {
train_graph.get_truth(): label_id_batch,
train_graph.get_question_lengths(): sent1_length_batch,
train_graph.get_passage_lengths(): sent2_length_batch,
train_graph.get_in_question_words(): word_idx_1_batch,
train_graph.get_in_passage_words(): word_idx_2_batch,
# train_graph.get_question_char_lengths(): sent1_char_length_batch,
# train_graph.get_passage_char_lengths(): sent2_char_length_batch,
# train_graph.get_in_question_chars(): char_matrix_idx_1_batch,
# train_graph.get_in_passage_chars(): char_matrix_idx_2_batch,
}
if char_vocab is not None:
feed_dict[train_graph.get_question_char_lengths()] = sent1_char_length_batch
feed_dict[train_graph.get_passage_char_lengths()] = sent2_char_length_batch
feed_dict[train_graph.get_in_question_chars()] = char_matrix_idx_1_batch
feed_dict[train_graph.get_in_passage_chars()] = char_matrix_idx_2_batch
if POS_vocab is not None:
feed_dict[train_graph.get_in_question_poss()] = POS_idx_1_batch
feed_dict[train_graph.get_in_passage_poss()] = POS_idx_2_batch
if NER_vocab is not None:
feed_dict[train_graph.get_in_question_ners()] = NER_idx_1_batch
feed_dict[train_graph.get_in_passage_ners()] = NER_idx_2_batch
_, loss_value = sess.run([train_graph.get_train_op(), train_graph.get_loss()], feed_dict=feed_dict)
total_loss += loss_value
if step % 100==0:
print('{} '.format(step), end="")
sys.stdout.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % trainDataStream.get_num_batch() == 0 or (step + 1) == max_steps:
print()
# Print status to stdout.
duration = time.time() - start_time
start_time = time.time()
print('Step %d: loss = %.2f (%.3f sec)' % (step, total_loss, duration))
total_loss = 0.0
# Evaluate against the validation set.
print('Validation Data Eval:')
accuracy = evaluate(devDataStream, valid_graph, sess,char_vocab=char_vocab, POS_vocab=POS_vocab, NER_vocab=NER_vocab)
print("Current accuracy is %.2f" % accuracy)
if accuracy>best_accuracy:
best_accuracy = accuracy
saver.save(sess, best_path)
print("Best accuracy on dev set is %.2f" % best_accuracy)
# decoding
print('Decoding on the test set:')
init_scale = 0.01
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("Model", reuse=False, initializer=initializer):
valid_graph = SentenceMatchModelGraph(num_classes, word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab, NER_vocab=NER_vocab,
dropout_rate=FLAGS.dropout_rate, learning_rate=FLAGS.learning_rate, optimize_type=FLAGS.optimize_type,
lambda_l2=FLAGS.lambda_l2, char_lstm_dim=FLAGS.char_lstm_dim, context_lstm_dim=FLAGS.context_lstm_dim,
aggregation_lstm_dim=FLAGS.aggregation_lstm_dim, is_training=False, MP_dim=FLAGS.MP_dim,
context_layer_num=FLAGS.context_layer_num, aggregation_layer_num=FLAGS.aggregation_layer_num,
fix_word_vec=FLAGS.fix_word_vec,with_filter_layer=FLAGS.with_filter_layer, with_highway=FLAGS.with_highway,
word_level_MP_dim=FLAGS.word_level_MP_dim,
with_match_highway=FLAGS.with_match_highway, with_aggregation_highway=FLAGS.with_aggregation_highway,
highway_layer_num=FLAGS.highway_layer_num, with_lex_decomposition=FLAGS.with_lex_decomposition,
lex_decompsition_dim=FLAGS.lex_decompsition_dim,
with_left_match=(not FLAGS.wo_left_match), with_right_match=(not FLAGS.wo_right_match),
with_full_match=(not FLAGS.wo_full_match), with_maxpool_match=(not FLAGS.wo_maxpool_match),
with_attentive_match=(not FLAGS.wo_attentive_match), with_max_attentive_match=(not FLAGS.wo_max_attentive_match))
vars_ = {}
for var in tf.all_variables():
if "word_embedding" in var.name: continue
if not var.name.startswith("Model"): continue
vars_[var.name.split(":")[0]] = var
saver = tf.train.Saver(vars_)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
step = 0
saver.restore(sess, best_path)
accuracy = evaluate(testDataStream, valid_graph, sess,char_vocab=char_vocab,POS_vocab=POS_vocab, NER_vocab=NER_vocab)
print("Accuracy for test set is %.2f" % accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', type=str, help='Path to the train set.')
parser.add_argument('--dev_path', type=str, help='Path to the dev set.')
parser.add_argument('--test_path', type=str, help='Path to the test set.')
parser.add_argument('--word_vec_path', type=str, help='Path the to pre-trained word vector model.')
parser.add_argument('--model_dir', type=str, help='Directory to save model files.')
parser.add_argument('--batch_size', type=int, default=60, help='Number of instances in each batch.')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--lambda_l2', type=float, default=0.0, help='The coefficient of L2 regularizer.')
parser.add_argument('--dropout_rate', type=float, default=0.1, help='Dropout ratio.')
parser.add_argument('--max_epochs', type=int, default=10, help='Maximum epochs for training.')
parser.add_argument('--optimize_type', type=str, default='adam', help='Optimizer type.')
parser.add_argument('--char_emb_dim', type=int, default=20, help='Number of dimension for character embeddings.')
parser.add_argument('--char_lstm_dim', type=int, default=100, help='Number of dimension for character-composed embeddings.')
parser.add_argument('--context_lstm_dim', type=int, default=100, help='Number of dimension for context representation layer.')
parser.add_argument('--aggregation_lstm_dim', type=int, default=100, help='Number of dimension for aggregation layer.')
parser.add_argument('--MP_dim', type=int, default=10, help='Number of perspectives for matching vectors.')
parser.add_argument('--max_char_per_word', type=int, default=10, help='Maximum number of characters for each word.')
parser.add_argument('--max_sent_length', type=int, default=100, help='Maximum number of words within each sentence.')
parser.add_argument('--aggregation_layer_num', type=int, default=1, help='Number of LSTM layers for aggregation layer.')
parser.add_argument('--context_layer_num', type=int, default=1, help='Number of LSTM layers for context representation layer.')
parser.add_argument('--highway_layer_num', type=int, default=1, help='Number of highway layers.')
parser.add_argument('--suffix', type=str, default='normal', required=True, help='Suffix of the model name.')
parser.add_argument('--fix_word_vec', default=False, help='Fix pre-trained word embeddings during training.', action='store_true')
parser.add_argument('--with_highway', default=False, help='Utilize highway layers.', action='store_true')
parser.add_argument('--with_filter_layer', default=False, help='Utilize filter layer.', action='store_true')
parser.add_argument('--word_level_MP_dim', type=int, default=-1, help='Number of perspectives for word-level matching.')
parser.add_argument('--with_match_highway', default=False, help='Utilize highway layers for matching layer.', action='store_true')
parser.add_argument('--with_aggregation_highway', default=False, help='Utilize highway layers for aggregation layer.', action='store_true')
parser.add_argument('--with_lex_decomposition', default=False, help='Utilize lexical decomposition features.', action='store_true')
parser.add_argument('--lex_decompsition_dim', type=int, | |
"""
Frame encoders and decoders for each frame type. Note that we have encoders
for frames that the server does not use; it's for testing, and in case someone
wants to write a Python Minerva client.
"""
import re
import sys
import operator
from simplejson import dumps
from simplejson.decoder import JSONDecodeError
from minerva.objcheck import (
ensureBool, ensureNonNegIntLimit, strToNonNegLimit, strToIntInRange)
from strfrag import StringFragment
from minerva.mutils import strictDecoder, StrictDecodeError
from minerva.window import SACK
_postImportVars = vars().keys()
class attrdict(dict):
"""
A dict that can be modified by setting and getting attributes.
This may be broken in funny ways; use with care.
"""
__slots__ = ()
def __setattr__(self, key, value):
self[key] = value
def __getattribute__(self, key):
return self[key]
# Property key names for the hello frame.
class HelloFrameArguments(object):
transportNumber = 'tnum'
protocolVersion = 'ver'
httpFormat = 'format'
requestNewStream = 'new'
streamId = 'id'
streamingResponse = 'ming'
needPaddingBytes = 'pad'
maxReceiveBytes = 'maxb'
maxOpenTime = 'maxt'
maxInactivity = 'maxia'
useMyTcpAcks = 'tcpack'
succeedsTransport = 'eeds'
sack = 'sack'
lastSackSeenByClient = 'seenack'
# Make globals that refbinder can optimize away
_hfa = HelloFrameArguments
Hello_transportNumber = _hfa.transportNumber
Hello_protocolVersion = _hfa.protocolVersion
Hello_httpFormat = _hfa.httpFormat
Hello_requestNewStream = _hfa.requestNewStream
Hello_streamId = _hfa.streamId
Hello_streamingResponse = _hfa.streamingResponse
Hello_needPaddingBytes = _hfa.needPaddingBytes
Hello_maxReceiveBytes = _hfa.maxReceiveBytes
Hello_maxOpenTime = _hfa.maxOpenTime
Hello_maxInactivity = _hfa.maxInactivity
Hello_useMyTcpAcks = _hfa.useMyTcpAcks
Hello_succeedsTransport = _hfa.succeedsTransport
Hello_sack = _hfa.sack
Hello_lastSackSeenByClient = _hfa.lastSackSeenByClient
del _hfa
FORMAT_XHR, FORMAT_HTMLFILE = 2, 3
class InvalidFrame(Exception):
pass
class InvalidHello(InvalidFrame):
pass
class CannotEncode(Exception):
pass
def helloDataToHelloFrame(helloData):
"""
Convert arbitrary JSON-decoded blob of objects into a L{HelloFrame}.
Raises L{InvalidHello} if there were errors in the blob of objects.
"""
if not isinstance(helloData, dict):
raise InvalidHello("helloData not a dict")
# simplejson without speedups will always give us unicode instead of str
# objects. (With speedups, it gives you a str when possible.)
for k, v in helloData.iteritems():
if isinstance(v, unicode):
try:
helloData[k] = v.encode('ascii')
except UnicodeEncodeError:
raise InvalidHello("could not encode value for key "
"%r to ascii; was %r" % (k, v))
obj = attrdict()
# sack is always optional.
if Hello_sack in helloData:
try:
sack = helloData[Hello_sack]
if not isinstance(sack, str):
raise TypeError
obj.sack = sackStringToSack(sack)
except (KeyError, TypeError, InvalidSackString):
raise InvalidHello("bad sack")
else:
obj.sack = None
try:
lastSackSeen = helloData[Hello_lastSackSeenByClient]
if not isinstance(lastSackSeen, str):
raise TypeError
obj.lastSackSeenByClient = sackStringToSack(lastSackSeen)
except (KeyError, TypeError, InvalidSackString):
raise InvalidHello("bad lastSackSeenByClient")
try:
# Any line here can raise KeyError; additional exceptions marked with 'e:'
# requestNewStream is always optional. If missing or False/0,
# transport is intended to attach to an existing stream.
obj.requestNewStream = ensureBool( # e: ValueError
helloData[Hello_requestNewStream]) if \
Hello_requestNewStream in helloData else False
obj.transportNumber = ensureNonNegIntLimit( # e: ValueError, TypeError
helloData[Hello_transportNumber], 2**53)
obj.protocolVersion = helloData[Hello_protocolVersion]
obj.streamingResponse = ensureBool( # e: ValueError
helloData[Hello_streamingResponse])
# Rules for streamId: must be 20-30 inclusive bytes, must not
# contain codepoints > 127
obj.streamId = helloData[Hello_streamId]
if not isinstance(obj.streamId, str) or not 20 <= len(obj.streamId) <= 30:
raise InvalidHello("bad streamId")
except (KeyError, TypeError, ValueError):
raise InvalidHello(
"problem with requestNewStream, transportNumber, "
"protocolVersion, streamingResponse, or streamId")
if obj.protocolVersion != 2:
raise InvalidHello("bad protocolVersion")
# Hello_succeedsTransport is always optional. If missing, the client does not
# want to get S2C strings over this transport. If None, the client does,
# but the transport does not succeed an existing primary transport. If a
# number, the transport might succeed an existing primary transport.
if Hello_succeedsTransport in helloData:
obj.succeedsTransport = helloData[Hello_succeedsTransport]
if obj.succeedsTransport is not None:
try:
obj.succeedsTransport = ensureNonNegIntLimit(
obj.succeedsTransport, 2**53)
except (TypeError, ValueError):
raise InvalidHello("bad succeedsTransport")
try:
obj.httpFormat = helloData[Hello_httpFormat]
if not obj.httpFormat in (FORMAT_XHR, FORMAT_HTMLFILE):
raise InvalidHello("bad httpFormat")
except KeyError:
obj.httpFormat = None
# needPaddingBytes is always optional. If missing, 0.
if Hello_needPaddingBytes in helloData:
try:
obj.needPaddingBytes = ensureNonNegIntLimit(
helloData[Hello_needPaddingBytes], 16*1024) # e: ValueError, TypeError
except (TypeError, ValueError):
raise InvalidHello("bad needPaddingBytes")
else:
obj.needPaddingBytes = 0
# maxReceiveBytes is optional. If missing, no limit.
try:
obj.maxReceiveBytes = ensureNonNegIntLimit(
helloData[Hello_maxReceiveBytes], 2**53) # e: ValueError, TypeError
except KeyError:
obj.maxReceiveBytes = 2**53
except (TypeError, ValueError):
raise InvalidHello("bad maxReceiveBytes")
# maxOpenTime is optional. If missing, no limit.
# Time is in seconds.
try:
obj.maxOpenTime = ensureNonNegIntLimit(
helloData[Hello_maxOpenTime], 2**53) # e: ValueError, TypeError
except KeyError:
obj.maxOpenTime = None
except (TypeError, ValueError):
raise InvalidHello("bad maxOpenTime")
# maxInactivity is required. If 0, no heartbeat.
# Time is in seconds.
try:
obj.maxInactivity = ensureNonNegIntLimit(
helloData[Hello_maxInactivity], 600) # e: ValueError, TypeError
except (KeyError, TypeError, ValueError):
raise InvalidHello("bad maxInactivity")
return HelloFrame(obj)
# The other frame classes are a tuple; this one is not, and some users
# like L{mserver.sanitizeHelloFrame} do mutate it.
class HelloFrame(object):
def __init__(self, obj):
self.__dict__ = obj
def __eq__(self, other):
return False if type(self) != type(other) else self.__dict__ == other.__dict__
def __ne__(self, other):
return True if type(self) != type(other) else self.__dict__ != other.__dict__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "H".
"""
try:
# Reach into the private attributes, we know what we're doing.
helloData, stoppedAt = strictDecoder.raw_decode(
frameString._string, frameString._pos)
except (JSONDecodeError, StrictDecodeError, RuntimeError):
# Note: RuntimeError raised if stack overflows
raise InvalidHello("corrupt JSON")
# `- 1` because we expect to stop before the trailing "H"
if stoppedAt != frameString._pos + frameString.size - 1:
raise InvalidHello("trailing garbage")
return helloDataToHelloFrame(helloData)
def _yieldMapping(self):
for k, v in self.__dict__.iteritems():
argByte = getattr(HelloFrameArguments, k, None)
if argByte is None:
raise CannotEncode("Don't know argByte for %r" % (k,))
# We allow the user to pass either a string or a SACK,
# hopefully they'll pass a SACK.
if isinstance(v, SACK):
yield argByte, SackFrame(v).encode()[:-1]
else:
yield argByte, v
def encode(self):
return dumps(dict(self._yieldMapping()), separators=(',', ':'), allow_nan=False) + 'H'
def wantsStrings(self):
"""
Returns a C{bool} indicating whether this HelloFrame indicates that
client wants to receive strings.
"""
return hasattr(self, 'succeedsTransport')
class StringFrame(tuple):
__slots__ = ()
_MARKER = object()
string = property(operator.itemgetter(1))
def __new__(cls, string):
"""
C{string} is a L{StringFragment} or C{str}. Note: a StringFrame
instantiated with a L{StringFragment} is unequal to one with a
C{str}, even if they represent the same string.
"""
if not isinstance(string, StringFragment):
string = StringFragment(string, 0, len(string))
return tuple.__new__(cls, (cls._MARKER, string))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self[1]))
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with " ".
Restricted string validation is not performed here, to accomodate
a future extension that allows expanding the allowed byte/char
range mid-stream.
"""
return cls(frameString[:-1])
def encode(self):
return str(self.string) + ' '
class CommentFrame(tuple):
__slots__ = ()
_MARKER = object()
comment = property(operator.itemgetter(1))
def __new__(cls, comment):
"""
C{comment} is a C{str}.
"""
return tuple.__new__(cls, (cls._MARKER, comment))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self[1])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "^".
"""
return cls(str(frameString[:-1]))
def encode(self):
return str(self.comment) + '^'
class SeqNumFrame(tuple):
__slots__ = ()
_MARKER = object()
seqNum = property(operator.itemgetter(1))
def __new__(cls, seqNum):
"""
C{seqNum} is an L{int} or L{long}.
"""
return tuple.__new__(cls, (cls._MARKER, seqNum))
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self[1])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "N".
"""
try:
seqNum = strToNonNegLimit(str(frameString[:-1]), 2**53)
except ValueError:
raise InvalidFrame("bad seqNum")
return cls(seqNum)
def encode(self):
return '%dN' % self.seqNum
class InvalidSackString(Exception):
pass
def sackStringToSack(sackString):
"""
C{sackString} is a C{str}. Returns a L{window.SACK}.
"""
try:
# If not enough args for split, Python raises ValueError
joinedSackList, ackNumberStr = sackString.rsplit('|', 1)
ackNumber = strToIntInRange(ackNumberStr, -1, 2**53)
sackList = tuple(strToNonNegLimit(s, 2**53) for s in joinedSackList.split(',')) if joinedSackList else ()
except ValueError:
raise InvalidSackString("bad sack")
return SACK(ackNumber, sackList)
def sackToSackString(sack):
"""
C{sack} is a L{window.SACK}. Returns a C{str}.
"""
return ','.join(str(s) for s in sack.sackList) + '|' + str(sack.ackNumber)
class SackFrame(tuple):
__slots__ = ()
_MARKER = object()
sack = property(operator.itemgetter(1))
def __new__(cls, sack):
"""
C{sack} is a {window.SACK}
"""
assert isinstance(sack, SACK), type(sack)
return tuple.__new__(cls, (cls._MARKER, sack))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self[1])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "A".
"""
try:
return cls(sackStringToSack(str(frameString[:-1])))
except InvalidSackString:
raise InvalidFrame("bad sackList or ackNumber")
def encode(self):
return sackToSackString(self.sack) + 'A'
class StreamStatusFrame(tuple):
__slots__ = ()
_MARKER = object()
lastSackSeen = property(operator.itemgetter(1))
def __new__(cls, lastSackSeen):
"""
C{lastSackSeen} is a {window.SACK}
"""
assert isinstance(lastSackSeen, SACK), type(lastSackSeen)
return tuple.__new__(cls, (cls._MARKER, lastSackSeen))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self[1])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "A".
"""
try:
return cls(sackStringToSack(str(frameString[:-1])))
except InvalidSackString:
raise InvalidFrame("bad sackList or ackNumber")
def encode(self):
return sackToSackString(self.lastSackSeen) + 'T'
class StreamCreatedFrame(tuple):
__slots__ = ()
_MARKER = object()
def __new__(cls):
"""
No arguments.
"""
return tuple.__new__(cls, (cls._MARKER,))
def __repr__(self):
return '%s()' % (self.__class__.__name__,)
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "C".
"""
if len(frameString) != 1:
raise InvalidFrame("leading garbage")
return cls()
def encode(self):
return 'C'
class YouCloseItFrame(tuple):
__slots__ = ()
_MARKER = object()
def __new__(cls):
"""
No arguments.
"""
return tuple.__new__(cls, (cls._MARKER,))
def __repr__(self):
return '%s()' % (self.__class__.__name__,)
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "Y".
"""
if len(frameString) != 1:
raise InvalidFrame("leading garbage")
return cls()
def encode(self):
return 'Y'
RESTRICTED_STRING_RE = re.compile(r"\A[ -~]*\Z")
def isRestrictedString(string):
"""
Return C{True} if C{str} C{string}'s bytes are within inclusive
range 0x20 " " - 0x7E "~"
@param string: The string to validate.
@type string: C{str} or C{buffer}
"""
return not not RESTRICTED_STRING_RE.match(string)
class ResetFrame(tuple):
"""
A reset frame indicates this side has given up on the stream.
A reset frame from the server implies a transport kill as well.
"""
__slots__ = ()
_MARKER = object()
reasonString = property(operator.itemgetter(1))
applicationLevel = property(operator.itemgetter(2))
def __new__(cls, reasonString, applicationLevel):
"""
@param reasonString: why the stream reset.
ASCII (0x20-0x7E)-only C{str}, max 255 bytes.
"""
return tuple.__new__(cls, (cls._MARKER, reasonString, applicationLevel))
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self[1], self[2])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is a L{StringFragment} that ends with "!".
"""
reasonString, applicationLevelStr = str(frameString[:-1]).rsplit('|', 1)
try:
applicationLevel = {'0': False, '1': True}[applicationLevelStr]
except KeyError:
raise InvalidFrame("bad applicationLevel")
if len(reasonString) > 255 or not isRestrictedString(reasonString):
raise InvalidFrame("reasonString too long or has illegal bytes")
return cls(reasonString, applicationLevel)
def encode(self):
return self.reasonString + '|' + str(int(self.applicationLevel)) + '!'
class TransportKillFrame(tuple):
__slots__ = ()
_MARKER = object()
reason = property(operator.itemgetter(1))
# No such stream
stream_attach_failure = "stream_attach_failure"
# Peer acked strings that we never sent
acked_unsent_strings = "acked_unsent_strings"
# Peer sent frames that we don't understand
invalid_frame_type_or_arguments = "invalid_frame_type_or_arguments"
# Peer sent data that could not even be decoded to frames
# (only applies to some decoders).
frame_corruption = "frame_corruption"
# Peer has caused our receive window to overflow
rwin_overflow = "rwin_overflow"
allReasons = set([
stream_attach_failure, acked_unsent_strings,
invalid_frame_type_or_arguments, frame_corruption,
rwin_overflow])
def __new__(cls, reason):
"""
@param reason: a valid transport kill reason
@type reason: str
"""
return tuple.__new__(cls, (cls._MARKER, reason))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self[1])
@classmethod
def decode(cls, frameString):
"""
C{frameString} is | |
# -*- coding: utf-8 -*-
# Copyright 2015 moco_beta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import io
import pickle
import gzip
from struct import pack
from .fst import Matcher, create_minimum_transducer, compileFST, unpack_uint
import traceback
import logging
import sys
import re
import itertools
import pkgutil
import zlib
import base64
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARN)
handler = logging.StreamHandler()
handler.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s\t%(name)s - %(levelname)s\t%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
from functools import lru_cache
except ImportError:
from functools import wraps
def lru_cache(**kwargs):
def _dummy(function):
@wraps(function)
def __dummy(*args, **kwargs):
return function(*args, **kwargs)
return __dummy
return _dummy
PY3 = sys.version_info[0] == 3
SYSDIC_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sysdic')
MODULE_FST_DATA = 'fst_data%d.py'
MODULE_ENTRIES_EXTRA = 'entries_extra%d.py'
MODULE_ENTRIES_COMPACT = 'entries_compact%d.py'
MODULE_ENTRIES_BUCKETS = 'entries_buckets.py'
MODULE_CONNECTIONS = 'connections%d.py'
MODULE_CHARDEFS = 'chardef.py'
MODULE_UNKNOWNS = 'unknowns.py'
FILE_USER_FST_DATA = 'user_fst.data'
FILE_USER_ENTRIES_DATA = 'user_entries.data'
def save_fstdata(data, dir, part=0):
_save_as_module(os.path.join(dir, MODULE_FST_DATA % part), data, binary=True)
def start_save_entries(dir, bucket_num):
for i in range(0, bucket_num):
_start_entries_as_module(os.path.join(dir, MODULE_ENTRIES_COMPACT % i))
_start_entries_as_module(os.path.join(dir, MODULE_ENTRIES_EXTRA % i))
def end_save_entries(dir, bucket_num):
for i in range(0, bucket_num):
_end_entries_as_module(os.path.join(dir, MODULE_ENTRIES_COMPACT % i))
_end_entries_as_module(os.path.join(dir, MODULE_ENTRIES_EXTRA % i))
def save_entry(dir, bucket_idx, morph_id, entry):
_save_entry_as_module_compact(os.path.join(dir, MODULE_ENTRIES_COMPACT % bucket_idx), morph_id, entry)
_save_entry_as_module_extra(os.path.join(dir, MODULE_ENTRIES_EXTRA % bucket_idx), morph_id, entry)
def save_entry_buckets(dir, buckets):
_save_as_module(os.path.join(dir, MODULE_ENTRIES_BUCKETS), buckets)
def save_connections(connections, dir=u'.'):
# split whole connections to 2 buckets to reduce memory usage while installing.
# TODO: find better ways...
bucket_size = (len(connections) // 2) + 1
offset = 0
for i in range(1, 3):
_save_as_module(os.path.join(dir, MODULE_CONNECTIONS % i),
connections[offset:offset+bucket_size])
offset += bucket_size
def save_chardefs(chardefs, dir=u'.'):
_save_as_module(os.path.join(dir, MODULE_CHARDEFS), chardefs)
def save_unknowns(unknowns, dir=u'.'):
_save_as_module(os.path.join(dir, MODULE_UNKNOWNS), unknowns)
def _save(file, data, compresslevel):
if not data:
return
with gzip.open(file, 'wb', compresslevel) as f:
f.write(data)
f.flush()
def _load(file):
if not os.path.exists(file):
return None
with gzip.open(file, 'rb') as f:
data = f.read()
return data
def _load_package_data(package, resource):
try:
rawdata = pkgutil.get_data(package, resource)
except IOError:
return None
return zlib.decompress(rawdata, zlib.MAX_WBITS | 16)
def _save_as_module(file, data, binary=False):
if not data:
return
with open(file, 'w') as f:
f.write(u'DATA=')
if binary:
f.write('"')
f.write(base64.b64encode(data))
f.write('"')
else:
f.write(str(data).replace('\\\\', '\\') if PY3 else unicode(data))
f.flush()
def _start_entries_as_module(file):
idx_file = re.sub(r'\.py$', '_idx.py', file)
with open(file, 'w') as f:
with open(idx_file, 'w') as f_idx:
f.write("# -*- coding: utf-8 -*-\n")
f.write('DATA={')
f_idx.write('DATA={')
def _end_entries_as_module(file):
idx_file = re.sub(r'\.py$', '_idx.py', file)
with open(file, 'a') as f:
with open(idx_file, 'a') as f_idx:
f.write('}\n')
f_idx.write('}\n')
f.flush()
f_idx.flush()
def _save_entry_as_module_compact(file, morph_id, entry):
idx_file = re.sub(r'\.py$', '_idx.py', file)
with open(file, 'a') as f:
with open(idx_file, 'a') as f_idx:
f.write('%d:(' % morph_id)
_pos1 = f.tell()
f_idx.write('%d:%d,' % (morph_id, _pos1))
s = u"u'%s',%s,%s,%d" % (
entry[0].encode('unicode_escape').decode('ascii') if PY3 else entry[0].encode('unicode_escape'),
entry[1],
entry[2],
entry[3])
f.write(s)
f.write('),')
def _save_entry_as_module_extra(file, morph_id, entry):
idx_file = re.sub(r'\.py$', '_idx.py', file)
with open(file, 'a') as f:
with open(idx_file, 'a') as f_idx:
f.write('%d:(' % morph_id)
_pos1 = f.tell()
f_idx.write('%d:%d,' % (morph_id, _pos1))
s = u"u'%s',u'%s',u'%s',u'%s',u'%s',u'%s'" % (
entry[4].encode('unicode_escape').decode('ascii') if PY3 else entry[4].encode('unicode_escape'),
entry[5].encode('unicode_escape').decode('ascii') if PY3 else entry[5].encode('unicode_escape'),
entry[6].encode('unicode_escape').decode('ascii') if PY3 else entry[6].encode('unicode_escape'),
entry[7].encode('unicode_escape').decode('ascii') if PY3 else entry[7].encode('unicode_escape'),
entry[8].encode('unicode_escape').decode('ascii') if PY3 else entry[8].encode('unicode_escape'),
entry[9].encode('unicode_escape').decode('ascii') if PY3 else entry[9].encode('unicode_escape'))
f.write(s)
f.write('),')
class Dictionary(object):
u"""
Base dictionary class
"""
def __init__(self, compiledFST, entries, connections):
self.compiledFST = compiledFST
self.matcher = Matcher(compiledFST)
self.entries = entries
self.connections = connections
def lookup(self, s):
(matched, outputs) = self.matcher.run(s)
if not matched:
return []
try:
res = []
for e in outputs:
num = unpack_uint(e)
res.append((num,) + self.entries[num][:4])
return res
except Exception as e:
logger.error('Cannot load dictionary data. The dictionary may be corrupted?')
logger.error('input=%s' % s)
logger.error('outputs=%s' % str(outputs) if PY3 else unicode(outputs))
traceback.format_exc()
sys.exit(1)
def lookup_extra(self, num):
try:
return self.entries[num][4:]
except Exception as e:
logger.error('Cannot load dictionary data. The dictionary may be corrupted?')
logger.error('input=%s' % s)
logger.error('outputs=%s' % str(outputs) if PY3 else unicode(outputs))
traceback.format_exc()
sys.exit(1)
def get_trans_cost(self, id1, id2):
return self.connections[id1][id2]
class MMapDictionary(object):
u"""
Base MMap dictionar class
"""
def __init__(self, compiledFST, entries_compact, entries_extra, open_files, connections):
self.compiledFST = compiledFST
self.matcher = Matcher(compiledFST)
self.entries_compact = entries_compact
self.entries_extra = entries_extra
self.open_files = open_files
self.connections = connections
def lookup(self, s):
(matched, outputs) = self.matcher.run(s)
if not matched:
return []
try:
matched_entries = []
for e in outputs:
idx = unpack_uint(e)
bucket = next(filter(lambda b: idx >= b[0] and idx < b[1], self.entries_compact.keys())) if PY3 \
else filter(lambda b: idx >= b[0] and idx < b[1], self.entries_compact.keys())[0]
mm, mm_idx = self.entries_compact[bucket]
_pos1s = mm_idx[idx] + 2
_pos1e = mm.find(b"',", _pos1s) if PY3 else mm.find("',", _pos1s)
_pos2s = _pos1e + 2
_pos2e = mm.find(b",", _pos2s) if PY3 else mm.find(",", _pos2s)
_pos3s = _pos2e + 1
_pos3e = mm.find(b",", _pos3s) if PY3 else mm.find(",", _pos3s)
_pos4s = _pos3e + 1
_pos4e = mm.find(b")", _pos4s) if PY3 else mm.find(")", _pos4s)
_entry = (mm[_pos1s:_pos1e].decode('unicode_escape'), int(mm[_pos2s:_pos2e]), int(mm[_pos3s:_pos3e]), int(mm[_pos4s:_pos4e]))
matched_entries.append((idx,) + _entry)
return matched_entries
except Exception as e:
logger.error('Cannot load dictionary data. The dictionary may be corrupted?')
logger.error('input=%s' % s)
logger.error('outputs=%s' % str(outputs) if PY3 else unicode(outputs))
traceback.format_exc()
sys.exit(1)
def lookup_extra(self, idx):
try:
bucket = next(filter(lambda b: idx >= b[0] and idx < b[1], self.entries_extra.keys())) if PY3 \
else filter(lambda b: idx >= b[0] and idx < b[1], self.entries_extra.keys())[0]
mm, mm_idx = self.entries_extra[bucket]
_pos1s = mm_idx[idx] + 2
_pos1e = mm.find(b"',u'", _pos1s) if PY3 else mm.find("',u'", _pos1s)
_pos2s = _pos1e + 4
_pos2e = mm.find(b"',u'", _pos2s) if PY3 else mm.find("',u'", _pos2s)
_pos3s = _pos2e + 4
_pos3e = mm.find(b"',u'", _pos3s) if PY3 else mm.find("',u'", _pos3s)
_pos4s = _pos3e + 4
_pos4e = mm.find(b"',u'", _pos4s) if PY3 else mm.find("',u'", _pos4s)
_pos5s = _pos4e + 4
_pos5e = mm.find(b"',u'", _pos5s) if PY3 else mm.find("',u'", _pos5s)
_pos6s = _pos5e + 4
_pos6e = mm.find(b"')", _pos6s) if PY3 else mm.find("')", _pos6s)
return (
mm[_pos1s:_pos1e].decode('unicode_escape'), mm[_pos2s:_pos2e].decode('unicode_escape'), mm[_pos3s:_pos3e].decode('unicode_escape'),
mm[_pos4s:_pos4e].decode('unicode_escape'), mm[_pos5s:_pos5e].decode('unicode_escape'), mm[_pos6s:_pos6e].decode('unicode_escape')
)
except Exception as e:
logger.error('Cannot load extra info. The dictionary may be corrupted?')
logger.error('idx=%d' % idx)
traceback.format_exc()
sys.exit(1)
def get_trans_cost(self, id1, id2):
return self.connections[id1][id2]
def __del__(self):
for mm, mm_idx in self.entries_compact.values():
mm.close()
if self.entries_extra:
for mm, mm_idx in self.entries_extra.values():
mm.close()
for fp in self.open_files:
fp.close()
class UnknownsDictionary(object):
def __init__(self, chardefs, unknowns):
self.char_categories = chardefs[0]
self.char_ranges = chardefs[1]
self.unknowns = unknowns
@lru_cache(maxsize=1024)
def get_char_categories(self, c):
res = {}
for chr_range in self.char_ranges:
if chr_range['from'] <= c <= chr_range['to']:
cate = chr_range['cate']
compate_cates = chr_range['compat_cates'] if 'compat_cates' in chr_range else []
res[cate] = compate_cates
if not res:
res = {u'DEFAULT': []}
return res
def unknown_invoked_always(self, cate):
if cate in self.char_categories:
return self.char_categories[cate]['INVOKE']
return False
def unknown_grouping(self, cate):
if cate in self.char_categories:
return self.char_categories[cate]['GROUP']
return False
def unknown_length(self, cate):
if cate in self.char_categories:
return self.char_categories[cate]['LENGTH']
return -1
class SystemDictionary(Dictionary, UnknownsDictionary):
u"""
System dictionary class
"""
def __init__(self, all_fstdata, entries, connections, chardefs, unknowns):
Dictionary.__init__(self, all_fstdata, entries, connections)
UnknownsDictionary.__init__(self, chardefs, unknowns)
class MMapSystemDictionary(MMapDictionary, UnknownsDictionary):
u"""
MMap System dictionary class
"""
def __init__(self, all_fstdata, mmap_entries, connections, chardefs, unknowns):
MMapDictionary.__init__(self, all_fstdata, mmap_entries[0], mmap_entries[1], mmap_entries[2], connections)
UnknownsDictionary.__init__(self, chardefs, unknowns)
class UserDictionary(Dictionary):
u"""
User dictionary class (uncompiled)
"""
def __init__(self, user_dict, enc, type, connections):
"""
Initialize user defined dictionary object.
:param user_dict: user dictionary file (CSV format)
:param enc: character encoding
:param type: user dictionary type. supported types are 'ipadic' and 'simpledic'
:param connections: connection cost matrix. expected value is SYS_DIC.connections
.. seealso:: See http://mocobeta.github.io/janome/en/#use-with-user-defined-dictionary for details for user dictionary.
"""
build_method = getattr(self, 'build' + type)
compiledFST, entries = build_method(user_dict, enc)
Dictionary.__init__(self, [compiledFST], entries, connections)
def buildipadic(self, user_dict, enc):
surfaces = []
entries = {}
with io.open(user_dict, encoding=enc) as f:
for line in f:
line = line.rstrip()
surface, left_id, right_id, cost, \
pos_major, pos_minor1, pos_minor2, pos_minor3, \
infl_type, infl_form, base_form, reading, phonetic = \
line.split(',')
part_of_speech = ','.join([pos_major, pos_minor1, pos_minor2, pos_minor3])
morph_id = len(surfaces)
surfaces.append((surface.encode('utf8'), pack('I', morph_id)))
entries[morph_id] = (surface, int(left_id), int(right_id), int(cost), part_of_speech, infl_type, infl_form, base_form, reading, phonetic)
inputs = sorted(surfaces) # inputs must be sorted.
assert len(surfaces) == len(entries)
processed, fst = create_minimum_transducer(inputs)
compiledFST = compileFST(fst)
return compiledFST, entries
def buildsimpledic(self, user_dict, enc):
import sys
surfaces = []
entries = {}
with io.open(user_dict, encoding=enc) as f:
for line in | |
this object directly, use :meth:`DSSSubpopulationAnalyses.get_analysis(feature)` instead
"""
def __init__(self, analysis, prediction_type):
self._internal_dict = analysis
self.computed_as_type = analysis["computed_as_type"]
self.modalities = [DSSSubpopulationModality(analysis["feature"], self.computed_as_type, m, prediction_type) for m in analysis.get("modalities", [])]
def get_raw(self):
"""
Gets the raw dictionary of the subpopulation analysis
:rtype: dict
"""
return self._internal_dict
def __repr__(self):
return "{cls}(computed_as_type={type}, feature={feature}, modalities_count={modalities_count})".format(
cls=self.__class__.__name__,
type=self.computed_as_type,
feature=self._internal_dict["feature"],
modalities_count=len(self.modalities))
def get_computation_params(self):
"""
Gets computation params
"""
return {
"nbRecords": self._internal_dict["nbRecords"],
"randomState": self._internal_dict["randomState"],
"onSample": self._internal_dict["onSample"]
}
def list_modalities(self):
"""
List definitions of modalities
"""
return [m.definition for m in self.modalities]
def get_modality_data(self, definition):
"""
Retrieves modality from definition
:param definition: definition of modality to retrieve. Can be:
* :class:`dataikuapi.dss.ml.DSSSubpopulationModalityDefinition`
* `dataikuapi.dss.ml.DSSSubpopulationModalityDefinition.MISSING_VALUES`
to retrieve modality corresponding to missing values
* for category modality, can be a str corresponding to the value of the modality
* for numeric modality, can be a number inside the modality
:returns: the modality
:rtype: :class:`dataikuapi.dss.ml.DSSSubpopulationModality`
"""
if definition == DSSSubpopulationModalityDefinition.MISSING_VALUES:
for m in self.modalities:
if m.definition.missing_values:
return m
raise ValueError("No 'missing values' modality found")
if isinstance(definition, DSSSubpopulationModalityDefinition):
modality_candidates = [m for m in self.modalities if m.definition.index == definition.index]
if len(modality_candidates) == 0:
raise ValueError("Modality with index '%s' not found" % definition.index)
return modality_candidates[0]
for m in self.modalities:
if m.definition.contains(definition):
return m
raise ValueError("Modality not found: %s" % definition)
class DSSSubpopulationAnalyses(object):
"""
Object to read details of subpopulation analyses of a trained model
Do not create this object directly, use :meth:`DSSTrainedPredictionModelDetails.get_subpopulation_analyses()` instead
"""
def __init__(self, data, prediction_type):
self._internal_dict = data
self.prediction_type = prediction_type
self.analyses = []
for analysis in data.get("subpopulationAnalyses", []):
self.analyses.append(DSSSubpopulationAnalysis(analysis, prediction_type))
def get_raw(self):
"""
Gets the raw dictionary of subpopulation analyses
:rtype: dict
"""
return self._internal_dict
def __repr__(self):
return "{cls}(prediction_type={type}, analyses={analyses})".format(cls=self.__class__.__name__,
type=self.prediction_type,
analyses=self.list_analyses())
def get_global(self):
"""
Retrieves information and performance on the full dataset used to compute the subpopulation analyses
"""
return DSSSubpopulationGlobal(self._internal_dict["global"], self.prediction_type)
def list_analyses(self):
"""
Lists all features on which subpopulation analyses have been computed
"""
return [analysis.get_raw()["feature"] for analysis in self.analyses]
def get_analysis(self, feature):
"""
Retrieves the subpopulation analysis for a particular feature
"""
try:
return next(analysis for analysis in self.analyses if analysis.get_raw()["feature"] == feature)
except StopIteration:
raise ValueError("Subpopulation analysis for feature '%s' cannot be found" % feature)
class DSSPartialDependence(object):
"""
Object to read details of partial dependence of a trained model
Do not create this object directly, use :meth:`DSSPartialDependencies.get_partial_dependence(feature)` instead
"""
def __init__(self, data):
self._internal_dict = data
def get_raw(self):
"""
Gets the raw dictionary of the partial dependence
:rtype: dict
"""
return self._internal_dict
def __repr__(self):
return "{cls}(feature={feature})".format(cls=self.__class__.__name__, feature=self._internal_dict["feature"])
def get_computation_params(self):
"""
Gets computation params
"""
return {
"nbRecords": self._internal_dict["nbRecords"],
"randomState": self._internal_dict["randomState"],
"onSample": self._internal_dict["onSample"]
}
class DSSPartialDependencies(object):
"""
Object to read details of partial dependencies of a trained model
Do not create this object directly, use :meth:`DSSTrainedPredictionModelDetails.get_partial_dependencies()` instead
"""
def __init__(self, data):
self._internal_dict = data
self.partial_dependencies = []
for pd in data.get("partialDependencies", []):
self.partial_dependencies.append(DSSPartialDependence(pd))
def get_raw(self):
"""
Gets the raw dictionary of partial dependencies
:rtype: dict
"""
return self._internal_dict
def __repr__(self):
return "{cls}(features={features})".format(cls=self.__class__.__name__, features=self.list_features())
def list_features(self):
"""
Lists all features on which partial dependencies have been computed
"""
return [partial_dep.get_raw()["feature"] for partial_dep in self.partial_dependencies]
def get_partial_dependence(self, feature):
"""
Retrieves the partial dependencies for a particular feature
"""
try:
return next(pd for pd in self.partial_dependencies if pd.get_raw()["feature"] == feature)
except StopIteration:
raise ValueError("Partial dependence for feature '%s' cannot be found" % feature)
class DSSClustersFacts(object):
def __init__(self, clusters_facts):
self.clusters_facts = clusters_facts
def get_raw(self):
"""Gets the raws facts data structure"""
return self.clusters_facts
def get_cluster_size(self, cluster_index):
"""Gets the size of a cluster identified by its index"""
return self.clusters_facts["clusters"][cluster_index]["size"]
def get_facts_for_cluster(self, cluster_index):
"""
Gets all facts for a cluster identified by its index. Returns a list of dicts
:rtype: list
"""
return self.clusters_facts["clusters"][cluster_index]["facts"]
def get_facts_for_cluster_and_feature(self, cluster_index, feature_name):
"""
Gets all facts for a cluster identified by its index, limited to a feature name. Returns a list of dicts
:rtype: list
"""
return [x for x in self.get_facts_for_cluster(cluster_index) if x["feature_label"] == feature_name]
class DSSTrainedClusteringModelDetails(DSSTrainedModelDetails):
"""
Object to read details of a trained clustering model
Do not create this object directly, use :meth:`DSSMLTask.get_trained_model_details()` instead
"""
def __init__(self, details, snippet, saved_model=None, saved_model_version=None, mltask=None, mltask_model_id=None):
DSSTrainedModelDetails.__init__(self, details, snippet, saved_model, saved_model_version, mltask, mltask_model_id)
def get_raw(self):
"""
Gets the raw dictionary of trained model details
"""
return self.details
def get_train_info(self):
"""
Returns various information about the train process (size of the train set, quick description, timing information)
:rtype: dict
"""
return self.details["trainInfo"]
def get_facts(self):
"""
Gets the 'cluster facts' data, i.e. the structure behind the screen "for cluster X, average of Y is Z times higher than average
:rtype: :class:`DSSClustersFacts`
"""
return DSSClustersFacts(self.details["facts"])
def get_performance_metrics(self):
"""
Returns all performance metrics for this clustering model.
:returns: a dict of performance metrics values
:rtype: dict
"""
import copy
clean_snippet = copy.deepcopy(self.snippet)
for x in ["fullModelId", "algorithm", "trainInfo", "userMeta", "backendType", "sessionId", "sessionDate", "facts"]:
if x in clean_snippet:
del clean_snippet[x]
return clean_snippet
def get_preprocessing_settings(self):
"""
Gets the preprocessing settings that were used to train this model
:rtype: dict
"""
return self.details["preprocessing"]
def get_modeling_settings(self):
"""
Gets the modeling (algorithms) settings that were used to train this model.
Note: the structure of this dict is not the same as the modeling params on the ML Task
(which may contain several algorithm)
:rtype: dict
"""
return self.details["modeling"]
def get_actual_modeling_params(self):
"""
Gets the actual / resolved parameters that were used to train this model.
:return: A dictionary, which contains at least a "resolved" key
:rtype: dict
"""
return self.details["actualParams"]
def get_scatter_plots(self):
"""
Gets the cluster scatter plot data
:return: a DSSScatterPlots object to interact with the scatter plots
:rtype: :class:`dataikuapi.dss.ml.DSSScatterPlots`
"""
scatters = self.mltask.client._perform_json(
"GET", "/projects/%s/models/lab/%s/%s/models/%s/scatter-plots" % (self.mltask.project_key, self.mltask.analysis_id, self.mltask.mltask_id, self.mltask_model_id))
return DSSScatterPlots(scatters)
class DSSMLTask(object):
@staticmethod
def from_full_model_id(client, fmi, project_key=None):
match = re.match(r"^A-(\w+)-(\w+)-(\w+)-(s[0-9]+)-(pp[0-9]+(-part-(\w+)|-base)?)-(m[0-9]+)$", fmi)
if match is None:
return DataikuException("Invalid model id: {}".format(fmi))
else:
if project_key is None:
project_key = match.group(1)
return DSSMLTask(client, project_key, match.group(2), match.group(3))
"""A handle to interact with a MLTask for prediction or clustering in a DSS visual analysis"""
def __init__(self, client, project_key, analysis_id, mltask_id):
self.client = client
self.project_key = project_key
self.analysis_id = analysis_id
self.mltask_id = mltask_id
def delete(self):
"""
Delete the present ML task
"""
return self.client._perform_json(
"DELETE", "/projects/%s/models/lab/%s/%s/" % (self.project_key, self.analysis_id, self.mltask_id))
def wait_guess_complete(self):
"""
Waits for guess to be complete. This should be called immediately after the creation of a new ML Task
(if the ML Task was created with wait_guess_complete=False),
before calling ``get_settings`` or ``train``
"""
while True:
status = self.get_status()
if status.get("guessing", "???") == False:
break
time.sleep(0.2)
def get_status(self):
"""
Gets the status of this ML Task
:return: a dict
"""
return self.client._perform_json(
"GET", "/projects/%s/models/lab/%s/%s/status" % (self.project_key, self.analysis_id, self.mltask_id))
def get_settings(self):
"""
Gets the settings of this ML Tasks
:return: a DSSMLTaskSettings object to interact with the settings
:rtype: :class:`dataikuapi.dss.ml.DSSMLTaskSettings`
"""
settings = self.client._perform_json(
"GET", "/projects/%s/models/lab/%s/%s/settings" % (self.project_key, self.analysis_id, self.mltask_id))
if settings["taskType"] == "PREDICTION":
return DSSPredictionMLTaskSettings(self.client, self.project_key, self.analysis_id, self.mltask_id, settings)
else:
return DSSClusteringMLTaskSettings(self.client, self.project_key, self.analysis_id, self.mltask_id, settings)
def train(self, session_name=None, session_description=None, run_queue=False):
"""
Trains models for this ML Task
:param str session_name: name for the session
:param str session_description: description for the session
This method waits for train to complete. If you want to train asynchronously, use :meth:`start_train` and :meth:`wait_train_complete`
This method returns the list of trained model identifiers. It returns models that have been trained for this train
session, not all trained models for this ML task. To get all identifiers for all models trained across all training sessions,
use :meth:`get_trained_models_ids`
These identifiers can be used for :meth:`get_trained_model_snippet`, :meth:`get_trained_model_details` and :meth:`deploy_to_flow`
:return: A list of model identifiers
:rtype: list of strings
"""
train_ret = self.start_train(session_name, session_description, run_queue)
self.wait_train_complete()
return self.get_trained_models_ids(session_id = train_ret["sessionId"])
def ensemble(self, model_ids=None, method=None):
"""
Create an ensemble model of a set of models
:param list model_ids: A list of model identifiers (defaults to `[]`)
:param str method: the ensembling method. One of: AVERAGE, PROBA_AVERAGE, MEDIAN, VOTE, LINEAR_MODEL, LOGISTIC_MODEL
This method waits for the ensemble train to complete. If you want to train asynchronously, use | |
a problem. Skipping.", newname)
def getGuildInfo(self, guildname):
'''
Lookup guild by name. If such a guild exists (and the API is available)
the info as specified on https://wiki.guildwars2.com/wiki/API:2/guild/:id is returned.
Else, None is returned.
'''
ids = request("https://api.guildwars2.com/v2/guild/search?name=%s" % (urllib.parse.quote(guildname),))
return None if ids is None or len(ids) == 0 else request("https://api.guildwars2.com/v2/guild/%s" % (ids[0]))
def removeGuild(self, name):
'''
Removes a guild from the TS. That is:
- deletes their guild channel and all their subchannels by force
- removes the group from TS by force
- remove the auto-assignment for that group from the DB
name: name of the guild as in the game
'''
SUCCESS = 0
INVALID_GUILD_NAME = 1
NO_DB_ENTRY = 2
INVALID_PARAMETERS = 5
if name is None:
return INVALID_PARAMETERS
ginfo = self.getGuildInfo(name)
if ginfo is None:
return INVALID_GUILD_NAME
with self.dbc.lock:
g = self.dbc.cursor.execute("SELECT ts_group FROM guilds WHERE guild_name = ?", (name,)).fetchone()
groupname = g[0] if g is not None else None
if groupname is None:
return NO_DB_ENTRY
ts3conn = self.ts_connection
tag = ginfo.get("tag")
# FROM DB
log.debug("Deleting guild '%s' from DB.", name)
with self.dbc.lock:
self.dbc.cursor.execute("DELETE FROM guilds WHERE guild_name = ?", (name,))
self.dbc.conn.commit()
# CHANNEL
channelname = "%s [%s]" % (name, tag)
channel, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = channelname).first(), signal_exception_handler)
if channel is None:
log.debug("No channel '%s' to delete.", channelname)
else:
log.debug("Deleting channel '%s'.", channelname)
ts3conn.ts3exec(lambda tsc: tsc.exec_("channeldelete", cid = channel.get("cid"), force = 1))
# GROUP
groups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergrouplist").all())
group = next((g for g in groups if g.get("name") == groupname), None)
if group is None:
log.debug("No group '%s' to delete.", groupname)
else:
log.debug("Deleting group '%s'.", groupname)
ts3conn.ts3exec(lambda tsc: tsc.exec_("servergroupdel", sgid = group.get("sgid"), force = 1))
return SUCCESS
def createGuild(self, name, tag, groupname, contacts):
'''
Creates a guild in the TS.
- retrieves and uploads their emblem as icon
- creates guild channel with subchannels as read from the config with the icon
- creates a guild group with the icon and appropriate permissions
- adds in automatic assignment of the guild group upon re-verification
- adds the contact persons as initial channel description
- gives the contact role to the contact persons if they can be found in the DB
name: name of the guild as is seen ingame
tag: their tag
groupname: group that should be used for them. Useful if the tag is already taken
contacts: list of account names (Foo.1234) that should be noted down as contact and receive the special role for the new channel
returns: 0 for success or an error code indicating the problem (see below)
'''
SUCCESS = 0
DUPLICATE_TS_GROUP = 1
DUPLICATE_DB_ENTRY = 2
DUPLICATE_TS_CHANNEL = 3
MISSING_PARENT_CHANNEL = 4
INVALID_PARAMETERS = 5
if (name is None or tag is None or groupname is None or contacts is None
or len(name) < 3 or len(tag) < 2 or len(groupname) < 3
or not isinstance(contacts, list)):
return INVALID_PARAMETERS
ts3conn = self.ts_connection
channelname = "%s [%s]" % (name, tag)
channel_description = self.create_guild_channel_description(contacts, name, tag)
log.info("Creating guild '%s' with tag '%s', guild group '%s', and contacts '%s'." % (name, tag, groupname, ", ".join(contacts)))
# lock for the whole block to avoid constant interference
# locking the ts3conn is vital to properly do the TS3FileTransfer
# down the line.
with ts3conn.lock, self.dbc.lock:
#############################################
# CHECK IF GROUPS OR CHANNELS ALREADY EXIST #
#############################################
log.debug("Doing preliminary checks.")
groups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergrouplist").all(), default_exception_handler)
group = next((g for g in groups if g.get("name") == groupname), None)
if group is not None:
# group already exists!
log.debug("Can not create a group '%s', because it already exists. Aborting guild creation.", group)
return DUPLICATE_TS_GROUP
with self.dbc.lock:
dbgroups = self.dbc.cursor.execute("SELECT ts_group, guild_name FROM guilds WHERE ts_group = ?", (groupname,)).fetchall()
if(len(dbgroups) > 0):
log.debug("Can not create a DB entry for TS group '%s', as it already exists. Aborting guild creation.", groupname)
return DUPLICATE_DB_ENTRY
channel, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = channelname).first(), signal_exception_handler)
if channel is not None:
# channel already exists!
log.debug("Can not create a channel '%s', as it already exists. Aborting guild creation.", channelname)
return DUPLICATE_TS_CHANNEL
parent, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = Config.guilds_parent_channel).first(), signal_exception_handler)
if parent is None:
# parent channel does not exist!
log.debug("Can not find a parent-channel '%s' for guilds. Aborting guild creation.", Config.guilds_parent_channel)
return MISSING_PARENT_CHANNEL
log.debug("Checks complete.")
#Icon uploading
icon_id = self.handle_guild_icon(name, ts3conn) #Returns None if no icon
##################################
# CREATE CHANNEL AND SUBCHANNELS #
##################################
log.debug("Creating guild channels...")
pid = parent.get("cid")
info, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelinfo", cid = pid).all(), signal_exception_handler)
# assert channel and group both exist and parent channel is available
all_guild_channels = [c for c in ts3conn.ts3exec(lambda tc: tc.query("channellist").all(), signal_exception_handler)[0] if c.get("pid") == pid]
all_guild_channels.sort(key=lambda c: c.get("channel_name"), reverse = True)
# Assuming the channels are already in order on the server,
# find the first channel whose name is alphabetically smaller than the new channel name.
# The sort_order of channels actually specifies after which channel they should be
# inserted. Giving 0 as sort_order puts them in first place after the parent.
found_place = False
sort_order = 0
i = 0
while i < len(all_guild_channels) and not found_place:
if all_guild_channels[i].get("channel_name") > channelname:
i += 1
else:
sort_order = int(all_guild_channels[i].get("cid"))
found_place = True
cinfo, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelcreate"
, channel_name = channelname
, channel_description = channel_description
, cpid = pid
, channel_flag_permanent = 1
, channel_maxclients = 0
, channel_order = sort_order
, channel_flag_maxclients_unlimited = 0)
.first(), signal_exception_handler)
perms = [("i_channel_needed_join_power", 25),
("i_channel_needed_subscribe_power", 25),
("i_channel_needed_modify_power", 45),
("i_channel_needed_delete_power", 75)
]
if icon_id is not None:
perms.append(("i_icon_id", icon_id))
def channeladdperm(cid, permsid, permvalue):
return ts3conn.ts3exec(lambda tsc: tsc.exec_("channeladdperm"
, cid = cid
, permsid = permsid
, permvalue = permvalue
, permnegated = 0
, permskip = 0)
, signal_exception_handler)
for p,v in perms:
_, ex = channeladdperm(cinfo.get("cid"), p, v)
for c in Config.guild_sub_channels:
# FIXME: error check
res, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelcreate"
, channel_name = c
, cpid = cinfo.get("cid")
, channel_flag_permanent = 1)
.first(), signal_exception_handler)
###################
# CREATE DB GROUP #
###################
# must exist in DB before creating group to have it available when reordering groups.
log.debug("Creating entry in database for auto assignment of guild group...")
with self.dbc.lock:
self.dbc.cursor.execute("INSERT INTO guilds(ts_group, guild_name) VALUES(?,?)", (groupname, name))
self.dbc.conn.commit()
#######################
# CREATE SERVER GROUP #
#######################
log.debug("Creating and configuring server group...")
resp, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergroupadd", name = groupname).first(), signal_exception_handler)
guildgroupid = resp.get("sgid")
if ex is not None and ex.resp.error["id"] == "1282":
log.warning("Duplication error while trying to create the group '%s' for the guild %s [%s]." % (groupname, name, tag))
def servergroupaddperm(sgid, permsid, permvalue):
return ts3conn.ts3exec(lambda tsc: tsc.exec_("servergroupaddperm"
, sgid = sgid
, permsid = permsid
, permvalue = permvalue
, permnegated = 0
, permskip = 0)
, signal_exception_handler)
perms = [
("b_group_is_permanent", 1),
("i_group_show_name_in_tree", 1),
("i_group_needed_modify_power", 75),
("i_group_needed_member_add_power", 50),
("i_group_needed_member_remove_power", 50),
("i_group_sort_id", Config.guilds_sort_id),
]
if icon_id is not None:
perms.append(("i_icon_id", icon_id))
for p,v in perms:
x,ex = servergroupaddperm(guildgroupid, p, v)
groups.append({"sgid": resp.get("sgid"), "name": groupname}) # the newly created group has to be added to properly iterate over the guild groups
guildgroups = []
with self.dbc.lock:
guildgroups = [g[0] for g in self.dbc.cursor.execute("SELECT ts_group FROM guilds ORDER BY ts_group").fetchall()]
for i in range(len(guildgroups)):
g = next((g for g in groups if g.get("name") == guildgroups[i]), None)
if g is None:
# error! Group deleted from TS, but not from DB!
log.warning("Found guild '%s' in the database, but no coresponding server group! Skipping this entry, but it should be fixed!", guildgroups[i])
else:
tp = Config.guilds_maximum_talk_power - i
if tp < 0:
log.warning("Talk power for guild %s is below 0.", g.get("name"))
# sort guild groups to have users grouped by their guild tag alphabetically in channels
x,ex = servergroupaddperm(g.get("sgid"), "i_client_talk_power", tp)
################
# ADD CONTACTS #
################
log.debug("Adding contacts...")
cgroups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelgrouplist").all(), default_exception_handler)
| |
DEBUG = False
UNIT_TEST_DEBUG = False
import re
import os
import string
from os import path
PACKAGE_NAME = "ClassesAndTests"
def plugin_loaded():
global settings
settings = sublime.load_settings(PACKAGE_NAME+ '.sublime-settings')
global PACKAGE_DIR
global TEMPLATES_DIR
PACKAGE_DIR = os.path.join(sublime.packages_path(), PACKAGE_NAME)
TEMPLATES_DIR = os.path.join(PACKAGE_DIR, "templates")
try:
import sublime
import sublime_plugin
except ImportError:
try:
from src.mocking.sublime import sublime
from src.mocking import sublime_plugin
except ImportError:
from .src.mocking.sublime import sublime
from .src.mocking import sublime_plugin
if UNIT_TEST_DEBUG:
DEBUG = True
print("CreateMissingFunctions: sublime and sublime_plugin not imported in " + __file__)
else:
DEBUG = False
try:
from src.CommandExecutionThread import CommandExecutionThread
from src.LiveUnitTesting import LiveUnitTesting
from src.UnitTestFunctions import UnitTestFunctions
from src.FileComponents import FileComponents
from src.Std import Std
from src.FileSystem import FileSystem
from src.MirroredDirectory import MirroredDirectory
except ImportError:
from .src.CommandExecutionThread import CommandExecutionThread
from .src.LiveUnitTesting import LiveUnitTesting
from .src.UnitTestFunctions import UnitTestFunctions
from .src.FileComponents import FileComponents
from .src.Std import Std
from .src.FileSystem import FileSystem
from .src.MirroredDirectory import MirroredDirectory
def plugin_loaded():
global settings
settings = sublime.load_settings(PACKAGE_NAME+ '.sublime-settings')
global PACKAGE_DIR
global TEMPLATES_DIR
PACKAGE_DIR = os.path.join(sublime.packages_path(), PACKAGE_NAME)
TEMPLATES_DIR = os.path.join(PACKAGE_DIR, "templates")
else:
plugin_loaded()
class CreateMissingFunctionsCommand(sublime_plugin.TextCommand):
def initializeDependencies(self):
if not hasattr(self, "fileSystem"):
self.fileSystem = FileSystem()
def run(self, edit):
self.initializeDependencies()
window = self.view.window()
if window is None:
return
self.classView = UnitTestFunctions.getClassView(window, self.view)
self.edit = edit
self._runUnitTest()
def _runUnitTest(self):
if DEBUG: print("Running tests to determine if all functions have been declared:")
classView = self.classView
liveUnitTest = LiveUnitTesting(UnitTestFunctions.getCommandFolders(settings))
liveUnitTest.updateTempFiles(classView)
command = liveUnitTest.getCommand()
argument = liveUnitTest.getArgument()
thread = CommandExecutionThread(command, argument)
thread.start()
self._handleCommandThread(thread)
def _handleCommandThread(self, thread):
if thread.is_alive():
sublime.set_timeout(lambda: self._handleCommandThread(thread), 100)
else:
functionName, functionType = self._getFunctionName(thread.result)
if functionName is not None:
self._insertFunction(functionName, functionType)
else:
if DEBUG: print("No functions have to be declared.")
def _getFunctionName(self, testResult):
result = None
phpMatches = re.findall("(?<=Fatal\\serror:)(?:[\\s\\w\\\\]+undefined\\smethod)(?:[\\s\\w\\\\]+::)([\\w]+)(?=\\(\\))", testResult)
if len(phpMatches) > 0:
return phpMatches[0], "php"
pyMatches = re.findall("(?<=AttributeError:)(?:[\\s\\w]+')(\\w+)(?=')", testResult)
if len(pyMatches) > 0:
return pyMatches[0], "py"
sqlMatches = re.findall("(?<=StorageAPIException)(?:[:\s\w\[\]\d]+\.)([\w_\d]+)", testResult)
if len(sqlMatches) > 0:
sqlType = re.findall("(?<=StorageAPIException)(?:[:\s\w\[\]\d]+)(FUNCTION)", testResult)
if len(sqlType) > 0:
if sqlType[0] == 'FUNCTION':
sqlType = "sqlFunc"
else: sqlType = "sqlPro"
return sqlMatches[0], sqlType
return result
# TODO: possible duplicate... merge with _getParameterNames
def _getParameterNamesFromString(self, string, functionName):
result = None
parameters = re.findall("(?<=->"+functionName+")(?:[\s]*\()([\w\d_,\s\$]+)(?=\))", string)
if len(parameters) > 0:
paramList = [x.strip()[1:] for x in parameters[0].split(",")]
return paramList
def _createFunctionBody(self, functionName, parametList):
parametString = "("+" INT UNSIGNED, ".join(parametList) + " INT UNSIGNED)"
return """
CREATE FUNCTION """ + functionName + " " + parametString + """
RETURNS INT UNSIGNED
BEGIN
RETURN """ + parametList[0] + """;
END //
"""
def _createProcedureBody(self, functionName, parametList):
parametString = "(in "+" INT UNSIGNED, in ".join(parametList) + " INT UNSIGNED)"
return """
CREATE PROCEDURE """ + functionName + " " + parametString + """
BEGIN
SELECT COUNT(*) FROM users;
END //
"""
def _insertSqlFunction(self, functionName, paramList, testFileName):
self._setDbFiles(testFileName)
content = self.fileSystem.getFileContent(self.classFunctionsDir)
functionBody = self._createFunctionBody(functionName, paramList)
content = self._replaceDelimiter(content, functionBody)
self.fileSystem.replaceFile(self.classFunctionsDir, content)
return
def _insertSqlProcedure(self, functionName, paramList, testFileName):
self._setDbFiles(testFileName)
content = self.fileSystem.getFileContent(self.classFunctionsDir)
functionBody = self._createProcedureBody(functionName, paramList)
content = self._replaceDelimiter(content, functionBody)
self.fileSystem.replaceFile(self.classFunctionsDir, content)
return
def _replaceDelimiter(self, fileContent, newContent):
fileContent = re.sub(r'DELIMITER ;', "\n" + newContent + "\n\nDELIMITER ;", fileContent)
return fileContent
def _getParameterNamesFromView(self, view, functionName):
content = view.substr(sublime.Region(0, view.size()))
return self._getParameterNamesFromString(content, functionName)
def _insertFunction(self, functionName, functionType):
classView = self.classView
classFileName = self.classView.file_name()
md = MirroredDirectory()
md.fileSystem = self.fileSystem
md.set(classFileName)
testFileName = md.getTestFileName()
if functionType == "sqlFunc":
print("creating sql function " + functionName)
paramList = self._getParameterNamesFromView(classView, functionName)
self._insertSqlFunction(functionName, paramList, testFileName)
return
elif functionType == "sqlPro":
print("creating sql function " + functionName)
paramList = self._getParameterNamesFromView(classView, functionName)
self._insertSqlProcedure(functionName, paramList, testFileName)
return
insertionPoint = self._getInsertPoint(classView)
if insertionPoint is not None:
indentation = Std.getLineIndentAsWhitespace(classView.substr(classView.line(insertionPoint)))
parameters = self._getParameterNames(functionName, testFileName)
insertionString = self._getFunctionBody(classFileName, functionName, indentation, parameters)
self.classView.sel().clear()
self.classView.sel().add(sublime.Region(insertionPoint))
self.classView.run_command("insert_snippet", {"contents": insertionString })
if self.view != self.classView:
sublime.active_window().run_command("toggle_sources_tests")
extension = FileComponents(classView.file_name()).getExtension()
if extension != "py":
# for some odd reason in .py scripts this would create multiple functions with
# the same name.... I might have to hook into the on_change event
sublime.set_timeout(lambda: self._runUnitTest(), 200)
# if db test case in php, create templates for db setup
if self._isPhpDbTestCase(testFileName):
self._createDbTestCaseFilesIfNotExist(testFileName)
else:
print("File is not formatted correctly. A class{} needs to be inside a namespace{}")
def _isPhpDbTestCase(self, testFileDir):
result = None
fileContent = self.fileSystem.getFileContent(testFileDir)
pyMatches = re.findall("DbTestCase?\s{", fileContent)
if len(pyMatches) > 0: return True
return False
def _setDbFiles(self, testFile):
md = MirroredDirectory()
md.fileSystem = self.fileSystem
md.set(testFile)
classFile = md.getFileName()
self.className = os.path.basename(classFile).split(".")[0]
self.testDataDir = path.join(FileComponents(testFile).getDir(), FileComponents(testFile).getFile() + "Data")
self.classDataDir = path.join(FileComponents(classFile).getDir(), FileComponents(classFile).getFile() + "Data")
self.testSetupDir = path.join(self.testDataDir, "setup.sql")
self.classSetupDir = path.join(self.classDataDir,"setup.json")
self.classFunctionsDir = path.join(self.classDataDir,"functions.sql")
self.classTablesDir = path.join(self.classDataDir,"tables.sql")
# print(self.testDataDir)
# print(self.classDataDir)
def _replaceDbFileVariables(self, className, string):
replacement = "blabl"
searchString = "CAMEL_CASE"
replacement = className
string = re.sub('\{\$'+searchString+'\}', replacement, string)
searchString = "CAMEL_CASE_FIRST_LOWER"
replacement = className[:1].lower() + className[1:]
string = re.sub('\{\$'+searchString+'\}', replacement, string)
searchString = "SNAKE_CASE"
replacement = self.camelToSnake(className)
string = re.sub('\{\$'+searchString+'\}', replacement, string)
return string
def camelToSnake(self, name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _createDbTestCaseFilesIfNotExist(self, testFile):
self._setDbFiles(testFile)
if self.fileSystem.isdir(self.classDataDir) == False:
self.fileSystem.createFolder(self.testDataDir)
self.fileSystem.createFolder(self.classDataDir)
testSetupContent = self._templateContentGetter("setup.sql")
classSetupContent = self._templateContentGetter("setup.json")
classFunctionsContent = self._templateContentGetter("functions.sql")
classFunctionsContent = self._replaceDbFileVariables(self.className, classFunctionsContent)
classTablesContent = self._templateContentGetter("tables.sql")
classTablesContent = self._replaceDbFileVariables(self.className, classTablesContent)
self.fileSystem.createFile(self.testSetupDir, testSetupContent)
self.fileSystem.createFile(self.classSetupDir, classSetupContent)
self.fileSystem.createFile(self.classFunctionsDir, classFunctionsContent)
self.fileSystem.createFile(self.classTablesDir, classTablesContent)
return True
return False
def _templateContentGetter(self, name):
templatePath = os.path.join(TEMPLATES_DIR, "php", "dbTestCase", name)
templateContent = self.fileSystem.getFileContent(templatePath)
return templateContent
def _getInsertPoint(self, view):
extension = FileComponents(view.file_name()).getExtension()
insertionPoint = None
if extension == "php":
region = view.find("\\}[^\\}]*\\}[^\\}]*\\z", 0)
if region is not None:
insertionPoint = region.begin()
elif extension == "py":
region = view.line(view.size())
if region is not None:
insertionPoint = region.end()
return insertionPoint
def _getFunctionBody(self, fileName, functionName, indent, parameters=[]):
md = MirroredDirectory()
md.fileSystem = self.fileSystem
md.set(fileName)
testFileName = md.getTestFileName()
extension = FileComponents(fileName).getExtension()
if len(parameters) > 0:
parameterString = ', '.join(parameters)
else:
parameterString = ""
out = ""
tabCounter = 2
if extension == "php":
parameterString = parameterString.replace("$", "\$")
if DEBUG: print("Creating php function \"" + functionName + "()\"")
indent2 = indent + indent
if parameterString != "":
parameterDescriptionString = ""
for parameter in parameters:
# TODO: parameter type detection for php types
#print("parameter: " + parameter)
parameterType = self._getParameterType(testFileName, functionName, parameter)
parameterDescriptionString += indent + " * @param " + parameterType + " " + parameter + "${" + str(tabCounter) + ":__parameterDescription__}\n"
tabCounter += 1
else:
parameterDescriptionString = ""
out += "\n"
out += indent + "/**\n"
out += indent + " * ${1:__functionDescription__}\n"
out += parameterDescriptionString
out += indent + " */\n"
out += indent + "public function " + functionName + "(" + parameterString + ") \{\n"
out += indent2+ "${" + str(tabCounter) + "://FunctionBody};\n"
tabCounter += 1
out += indent2+ "return${" + str(tabCounter) + ":};\n"
tabCounter += 1
out += indent + "\}${" + str(tabCounter) + ":}\n"
elif extension == "py":
if DEBUG: print("Creating py function \"" + functionName + "()\"")
indent = " " # ignoring the indentation passed with indent
indent2 = indent + indent
if parameterString != "":
parameterString = ', ' + parameterString
parameterDescriptionString = "\n"
for parameter in parameters:
parameterType = self._getParameterType(testFileName, functionName, parameter)
parameterDescriptionString += indent + "@param " + parameterType + " " + parameter + " ${" + str(tabCounter) + ":__parameterDescription__}\n"
tabCounter += 1
else:
parameterDescriptionString = ""
out += "\n"
out += "def " + functionName + "(self" + parameterString + "):\n"
out += indent + "\"\"\" ${1:__functionDescription__}\n"
out += parameterDescriptionString
out += "\n"
out += indent + "returns: ${" + str(tabCounter) + ":__returnTypeDescription__}\n"
tabCounter += 1
out += indent + "\"\"\"\n"
out += indent + "${" + str(tabCounter) + ":# FunctionBody}\n"
tabCounter += 1
out += indent + "return${" + str(tabCounter) + ":}\n"
else:
out = None
return out
def _getParameterNames(self, functionName, fileDir):
fileName, extension = os.path.splitext(fileDir)
if extension == ".py":
testFileContent = self.fileSystem.getFileContent(fileDir)
regexString = "(?<=\\." + functionName + "\\()[^\\)]+"
match = re.search(regexString, testFileContent)
if match:
rawParameterString = match.group()
parameterString = re.sub('\\s', '', rawParameterString)
parameters = str.split(parameterString, ',')
return parameters
else:
return []
elif extension == ".php":
testFileContent = self.fileSystem.getFileContent(fileDir)
regexString = "(?<=->" + functionName + "\\()[^\\)]+"
match = re.search(regexString, testFileContent)
if match:
rawParameterString = match.group()
parameterString = re.sub('\\s', '', rawParameterString)
parameters = str.split(parameterString, ',')
return parameters
else:
return []
return
def _getParameterType(self, fileDir, functionName, parameterName):
| |
for _ in range(34)]
# change first one to total_loss
all_losses[0] = total_loss
# all_loss_weights - 34 is the number of outputs
# We're giving a weight of 0.5 for the final outputs to the total weight
all_loss_weights = [0.5 for _ in range(34)]
all_loss_weights[0] = 1.
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=all_losses,
loss_weights=all_loss_weights,
metrics={'capsnet': 'accuracy'})
# Training without data augmentation (preferred) :
# Combine y_train and y_train_output to get an uniform vector
y_train_list=[0] # Dummy value to avoid index out of bounds in next line
y_train_list[0] = y_train
# To reshape y_train_output from [None,33] to [33,None]
y_train_output = np.array([np.array(_) for _ in zip(*y_train_output)])
for output in y_train_output:
y_train_list.append(output)
# Combine y_test and y_test_output to get an uniform vector
y_test_list=[0] # Dummy value to avoid index out of bounds in next line
y_test_list[0] = y_test
# To reshape y_test_output from [None,33] to [33,None]
y_test_output = np.array([np.array(_) for _ in zip(*y_test_output)])
for output in y_test_output:
y_test_list.append(output)
model.fit([x_train, y_train], y_train_list,
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=[[x_test, y_test], y_test_list],
callbacks=[log, tb, checkpoint, lr_decay])
'''
# Begin: Training with data augmentation ---------------------------------------------------------------------#
def train_generator(x, y, batch_size, shift_fraction=0.):
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction) # shift up to 2 pixel for MNIST
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size, args.shift_fraction),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, y_test_output]],
callbacks=[log, tb, checkpoint, lr_decay])
# End: Training with data augmentation -----------------------------------------------------------------------#
'''
model.save_weights(args.save_dir + '/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
from utils import plot_log
plot_log(args.save_dir + '/log.csv', show=True)
return model
def test(model, data, args):
x_test, y_test, y_test_output = data
y_pred, y_test_output_pred = model.predict(x_test, batch_size=100)
print('-'*30 + 'Begin: test' + '-'*30)
print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])
# Test accuracy for the predicted output words
# To be done
def manipulate_latent(model, data, args):
print('-'*30 + 'Begin: manipulate' + '-'*30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:,:,dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img*255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
print('manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))
print('-' * 30 + 'End: manipulate' + '-' * 30)
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print("X Train:", x_train)
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_train = to_categorical(y_train.astype('float32'))
y_test = to_categorical(y_test.astype('float32'))
return (x_train, y_train), (x_test, y_test)
def get_file_name(path):
head, tail = ntpath.split(path)
return str(tail) or str(ntpath.basename(head))
def build_output(features):
# Builds the output according to the output format in the hierarchy_( train | eval )_model
# Length of output vector
output = [0 for _ in range(33)]
# -------- Volatile (under processing) ------------------------- <BEGIN>
nothing_present_flag = True
# Order - face,eyes,mouth,snout,ears,whiskers,nose,teeth,beak,tongue,body,wings,paws,tail,legs,surface,arm_rest,base,pillows,cushions,drawer,knob,mattress,colour,brown,black,grey,white,purple,pink,yellow,turqoise,unknown
if 'face' in features:
output[0]=1
nothing_present_flag = False
if 'eyes' in features:
output[1]=1
output[0]=1
nothing_present_flag = False
if 'mouth' in features:
output[2]=1
output[0]=1
nothing_present_flag = False
if 'snout' in features:
output[3]=1
output[0]=1
nothing_present_flag = False
if 'ears' in features:
output[4]=1
output[0]=1
nothing_present_flag = False
if 'whiskers' in features:
output[5]=1
output[0]=1
nothing_present_flag = False
if 'nose' in features:
output[6]=1
output[0]=1
nothing_present_flag = False
if 'teeth' in features:
output[7]=1
output[0]=1
nothing_present_flag = False
if 'beak' in features:
output[8]=1
output[0]=1
nothing_present_flag = False
if 'tongue' in features:
output[9]=1
output[0]=1
nothing_present_flag = False
if 'body' in features:
output[10]=1
nothing_present_flag = False
if 'wings' in features:
output[11]=1
output[10]=1
nothing_present_flag = False
if 'paws' in features:
output[12]=1
output[10]=1
nothing_present_flag = False
if 'tail' in features:
output[13]=1
output[10]=1
nothing_present_flag = False
if 'legs' in features:
output[14]=1
output[10]=1
nothing_present_flag = False
if 'surface' in features:
output[15]=1
output[10]=1
nothing_present_flag = False
if 'arm rests' in features:
output[16]=1
output[10]=1
nothing_present_flag = False
if 'base' in features:
output[17]=1
output[10]=1
nothing_present_flag = False
if 'pillows' in features:
output[18]=1
output[10]=1
nothing_present_flag = False
if 'cushions' in features:
output[19]=1
output[10]=1
nothing_present_flag = False
if 'drawers' in features:
output[20]=1
output[10]=1
nothing_present_flag = False
if 'knobs' in features:
output[21]=1
output[10]=1
nothing_present_flag = False
if 'mattress' in features:
output[22]=1
output[10]=1
nothing_present_flag = False
if 'colour' in features:
output[23]=1
nothing_present_flag = False
if 'brown' in features:
output[24]=1
output[23]=1
nothing_present_flag = False
if 'black' in features:
output[25]=1
output[23]=1
nothing_present_flag = False
if 'grey' in features:
output[26]=1
output[23]=1
nothing_present_flag = False
if 'white' in features:
output[27]=1
output[23]=1
nothing_present_flag = False
if 'purple' in features:
output[28]=1
output[23]=1
nothing_present_flag = False
if 'pink' in features:
output[29]=1
output[23]=1
nothing_present_flag = False
if 'yellow' in features:
output[30]=1
output[23]=1
nothing_present_flag = False
if 'turqoise' in features:
output[31]=1
output[23]=1
nothing_present_flag = False
# Other "similar" cases
if 'eye' in features:
output[1]=0.5
output[0]=1
nothing_present_flag = False
if 'ear' in features:
output[4]=0.5
output[0]=1
nothing_present_flag = False
if 'wing' in features:
output[11]=0.5
output[10]=1
nothing_present_flag = False
if 'paw' in features:
output[12]=0.5
output[10]=1
nothing_present_flag = False
if 'leg' in features:
output[14]=0.5
output[10]=1
nothing_present_flag = False
if 'rectangular surface' in features:
output[15]=1
output[10]=1
nothing_present_flag = False
if 'circular surface' in features:
output[15]=2
output[10]=1
nothing_present_flag = False
if 'arm rest' in features:
output[16]=0.5
output[10]=1
nothing_present_flag = False
if 'pillow' in features:
output[18]=0.5
output[10]=1
nothing_present_flag = False
if 'cushion' in features:
output[19]=0.5
output[10]=1
nothing_present_flag = False
if 'drawer' in features:
output[20]=0.5
output[10]=1
nothing_present_flag = False
if 'knob' in features:
output[21]=0.5
output[10]=1
nothing_present_flag = False
if 'silver' in features:
output[27]=0.5
output[23]=1
nothing_present_flag = False
if 'transparent' in features:
output[27]=0
output[23]=1
nothing_present_flag = False
if 'golden' in features:
output[30]=0.5
output[23]=1
nothing_present_flag = False
# For 'unknown' case
if nothing_present_flag:
output[:-1] = [0]*(len(output)-1)
output[-1]=1
output = np.array(output)
# -------- Volatile (under processing) ------------------------- <END>
return output
def load_custom_dataset(dataset_path):
# Function to use custom dataset
x_train = []
x_test = []
y_train = []
y_test = []
y_train_output = []
y_test_output = []
classes = {'animals':['cats', 'dogs', 'fox', 'hyenas', 'wolves'],'birds':['ducks','eagles','hawks','parrots','sparrows'],'furniture':['chair','table', 'sofa','nightstand', 'bed']}
class_encodings = {'cats':0, 'dogs':1, 'fox':2, 'hyenas':3, 'wolves':4, 'ducks':5, 'eagles':6, 'hawks':7, 'parrots':8, 'sparrows':9, 'chair':10, 'table':11, 'sofa':12, 'nightstand':13, 'bed':14}
# classes = {'animals':['cats', 'dogs', 'foxes', 'hyenas', 'wolves'],'birds':['ducks','eagles','parrots','sparrows'],'furniture':['chair','sofa','table']}
# class_encodings = {'cats':0, 'dogs':1, 'foxes':2, 'hyenas':3, 'wolves':4, 'ducks':5, 'eagles':6, 'parrots':8, 'sparrows':9, 'chair':10, 'sofa':11, 'table':12}
for class_ in classes:
dataset_path = "../Dataset/"+class_[0].upper()+class_[1:]+'/'
y_train_dataframe = pd.read_csv("./csv_folder/"+class_+'.csv', encoding = "ISO-8859-1")
for sub_class in classes[class_]:
print("Processing class", sub_class+"..")
img_dir = dataset_path+str(sub_class)+'/'
data_path = os.path.join(img_dir,'*g')
files = glob.glob(data_path)
for current_file in files:
random_number = random.randint(1,10)
if(random_number == 7 or random_number == 3):
img = cv2.imread(current_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (input_image_height, input_image_width))
# y_test_output logic with x_test and y_test append
for index, row in y_train_dataframe.iterrows():
if get_file_name(current_file) == row['File Name']:
x_test.append(img)
# if(sub_class == 'table'):
y_test.append(class_encodings[sub_class])
y_test_features = row['Features']
y_test_output.append(build_output(y_test_features))
break
else:
img = cv2.imread(current_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (input_image_height, input_image_width))
# y_train_output logic with x_train and y_train append
for index, row in y_train_dataframe.iterrows():
if get_file_name(current_file) == row['File Name']:
x_train.append(img)
y_train.append(class_encodings[sub_class])
y_train_features = row['Features']
y_train_output.append(build_output(y_train_features))
break
x_train = np.array(x_train)
y_train = np.array(y_train)
y_train_output = np.array(y_train_output)
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_train = to_categorical(y_train.astype('float32'))
x_test = np.array(x_test)
y_test = np.array(y_test)
y_test_output = np.array(y_test_output)
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_test = to_categorical(y_test.astype('float32'))
print("Y Set:", len(np.unique(np.argmax(y_train, 1))), "=", np.unique(np.argmax(y_train, 1)))
return (x_train, y_train, y_train_output), (x_test, y_test, y_test_output)
if __name__ == "__main__":
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on MNIST.")
parser.add_argument('--epochs', default=1, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--dataset', default=dataset_path, type=str, help="Relative path to the custom dataset to use")
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float,
help="The value multiplied by lr at each epoch. Set a larger | |
eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = - v[{line_num:d}]"
)
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = + v[{line_num:d}]"
)
def dephosphorylate(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B dephosphorylates pA --> uA'
Notes
-----
* Parameters
.. math:: V, K
* Rate equation
.. math:: v = V * [B] * [pA] / (K + [pA])
* Differential equation
.. math::
d[uA]/dt = + v
d[pA]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "V", "K")
phosphatase = description[0].strip(" ")
if " --> " in description[1]:
phosphorylated_form = description[1].split(" --> ")[0].strip(" ")
unphosphorylated_form = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '-->' to specify the name of the dephosphorylated "
"(or deactivated) protein."
)
if phosphorylated_form == unphosphorylated_form:
raise ValueError(f"line{line_num:d}: {unphosphorylated_form} <- Use a different name.")
self._set_species(phosphatase, phosphorylated_form, unphosphorylated_form)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{phosphatase}] * y[V.{phosphorylated_form}] / "
f"(x[C.K{line_num:d}] + y[V.{phosphorylated_form}])"
)
counter_phosphorylated_form, counter_unphosphorylated_form = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{phosphorylated_form}]" in eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{unphosphorylated_form}]" in eq:
counter_unphosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = - v[{line_num:d}]"
)
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = + v[{line_num:d}]"
)
def transcribe(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B transcribes a'
>>> 'B1 & B2 transcribe a' # (AND-gate)
>>> 'B transcribes a, repressed by C' # (Negative regulation)
Notes
-----
* Parameters
.. math:: V, K, n, (KF, nF)
* Rate equation
.. math::
v = V * [B] ^ {n} / (K ^ {n} + [B] ^ {n})
v = V * ([B1] * [B2]) ^ {n} / (K ^ {n} + ([B1] * [B2]) ^ {n})
v = V * [B] ^ {n} / (K ^ {n} + [B] ^ {n} + ([C] / KF) ^ {nF})
* Differential equation
.. math:: d[a]/dt = + v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "V", "K", "n", "KF", "nF"
)
repressor: Optional[str] = None
ratio = self._word2scores(", repressed by", description[1])
if not ratio or max(ratio) < 1.0:
self.parameters.remove(f"KF{line_num:d}")
self.parameters.remove(f"nF{line_num:d}")
mRNA = description[1].strip()
if " " in mRNA:
# Fix typo in line{line_num:d}
raise ValueError(
f"line{line_num:d}: "
"Add ', repressed by XXX' to describe negative regulation from XXX."
)
else:
# Add negative regulation from repressor
mRNA = description[1].split(", repressed by")[0].strip()
repressor = description[1].split(", repressed by")[1].strip()
if " & " not in description[0]:
TF = description[0].strip(" ")
self._set_species(mRNA, TF)
if repressor is not None:
self._set_species(repressor)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{TF}] ** x[C.n{line_num:d}] / "
f"(x[C.K{line_num:d}] ** x[C.n{line_num:d}] + "
f"y[V.{TF}] ** x[C.n{line_num:d}]"
+ (
")"
if repressor is None
else f" + (y[V.{repressor}] / x[C.KF{line_num:d}]) ** x[C.nF{line_num:d}])"
)
)
else:
# AND-gate
TFs = [TF.strip(" ") for TF in description[0].split(" & ")]
self._set_species(mRNA, *TFs)
if repressor is not None:
self._set_species(repressor)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * ({'y[V.' + '] * y[V.'.join(TFs) + ']'}) ** x[C.n{line_num:d}] / "
f"(x[C.K{line_num:d}] ** x[C.n{line_num:d}] + "
f"({'y[V.' + '] * y[V.'.join(TFs) + ']'}) ** x[C.n{line_num:d}]"
+ (
")"
if repressor is None
else f" + (y[V.{repressor}] / x[C.KF{line_num:d}]) ** x[C.nF{line_num:d}])"
)
)
counter_mRNA = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{mRNA}]" in eq:
counter_mRNA += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_mRNA == 0:
self.differential_equations.append(f"dydt[V.{mRNA}] = + v[{line_num:d}]")
def is_translated(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'a is translated into A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [a]
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
mRNA = description[0].strip(" ")
protein = description[1].strip(" ")
self._set_species(mRNA, protein)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{mRNA}]")
counter_protein = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{protein}]" in eq:
counter_protein += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_protein == 0:
self.differential_equations.append(f"dydt[V.{protein}] = + v[{line_num:d}]")
def synthesize(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B synthesizes A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [B]
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
catalyst = description[0].strip(" ")
product = description[1].strip(" ")
self._set_species(catalyst, product)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{catalyst}]")
counter_product = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{product}]" in eq:
counter_product += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_product == 0:
self.differential_equations.append(f"dydt[V.{product}] = + v[{line_num:d}]")
def is_synthesized(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A is synthesized'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
chemical_species = description[0].strip(" ")
self._set_species(chemical_species)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}]")
counter_chemical_species = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{chemical_species}]" in eq:
counter_chemical_species += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_chemical_species == 0:
self.differential_equations.append(f"dydt[V.{chemical_species}] = + v[{line_num:d}]")
def degrade(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B degrades A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [B]
* Differential equation
.. math:: d[A]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
protease = description[0].strip(" ")
protein = description[1].strip(" ")
self._set_species(protease, protein)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{protease}]")
counter_protein = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{protein}]" in eq:
counter_protein += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
if counter_protein == 0:
self.differential_equations.append(f"dydt[V.{protein}] = - v[{line_num:d}]")
def is_degraded(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A is degraded'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [A]
* Differential equation
.. math:: d[A]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
chemical_species = description[0].strip(" ")
self._set_species(chemical_species)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{chemical_species}]")
counter_chemical_species = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{chemical_species}]" in eq:
counter_chemical_species += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
if counter_chemical_species == 0:
self.differential_equations.append(f"dydt[V.{chemical_species}] = - v[{line_num:d}]")
def translocate(self, line_num: int, line: str) -> None:
r"""
Examples
--------
>>> 'A_at_cyt translocates from cytoplasm to nucleus (V_cyt, V_nuc) <--> A_at_nuc'
>>> 'A_at_cyt is translocated from cytoplasm to nucleus (V_cyt, V_nuc) <--> A_at_nuc'
Notes
-----
* Parameters
.. math:: kf, kr, (V_{pre}, V_{post})
* Rate equation
.. math:: v = kf * [A\_at\_pre] - kr * (V_{post} / V_{pre}) * [A\_at\_post]
* Differential equation
.. math::
d[A\_at\_pre]/dt = - v
d[A\_at\_post]/dt = + v * (V_{pre} / V_{post})
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
pre_translocation = description[0].strip(" ")
if " <--> " in description[1]:
post_translocation = description[1].split(" <--> ")[1].strip(" ")
elif " --> " in description[1]:
warnings.warn(
f"line{line_num:d}: Use '<-->' instead of '-->' for reversible reaction rules.",
FutureWarning,
)
post_translocation = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '<-->' to specify the name of the species after translocation."
)
if pre_translocation == post_translocation:
raise ValueError(f"line{line_num:d}: {post_translocation} <- Use a different name.")
# Information about compartment volumes
if "(" in description[1] and ")" in description[1]:
[pre_volume, post_volume] = description[1].split("(")[-1].split(")")[0].split(",")
if not self._isfloat(pre_volume.strip(" ")) or not self._isfloat(
post_volume.strip(" ")
):
raise ValueError("pre_volume and post_volume must be float or int.")
else:
[pre_volume, post_volume] = ["1", "1"]
self._set_species(pre_translocation, post_translocation)
self.reactions.append(
f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{pre_translocation}] - "
f"x[C.kr{line_num:d}] * y[V.{post_translocation}]"
)
if float(pre_volume.strip(" ")) != float(post_volume.strip(" ")):
self.reactions[-1] = (
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{pre_translocation}] - "
f"x[C.kr{line_num:d}] * "
f"({post_volume.strip()} / {pre_volume.strip()}) * "
f"y[V.{post_translocation}]"
)
counter_pre_translocation, counter_post_translocation = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{pre_translocation}]" in eq:
counter_pre_translocation += 1
self.differential_equations[i] | |
<reponame>j-wilson/Ax
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import numpy as np
from ax.core.objective import ScalarizedObjective
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import (
ComparisonOp,
ObjectiveThreshold,
OutcomeConstraint,
ScalarizedOutcomeConstraint,
)
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig
from ax.exceptions.core import UnsupportedError, UserInputError
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger(__name__)
@dataclass
class WinsorizationConfig:
"""Dataclass for storing Winsorization configuration parameters
Attributes:
lower_quantile_margin: Winsorization will increase any metric value below this
quantile to this quantile's value.
upper_quantile_margin: Winsorization will decrease any metric value above this
quantile to this quantile's value. NOTE: this quantile will be inverted before
any operations, e.g., a value of 0.2 will decrease values above the 80th
percentile to the value of the 80th percentile.
lower_boundary: If this value is lesser than the metric value corresponding to
``lower_quantile_margin``, set metric values below ``lower_boundary`` to
``lower_boundary`` and leave larger values unaffected.
upper_boundary: If this value is greater than the metric value corresponding to
``upper_quantile_margin``, set metric values above ``upper_boundary`` to
``upper_boundary`` and leave smaller values unaffected.
"""
lower_quantile_margin: float = 0.0
upper_quantile_margin: float = 0.0
lower_boundary: Optional[float] = None
upper_boundary: Optional[float] = None
OLD_KEYS = ["winsorization_lower", "winsorization_upper", "percentile_bounds"]
AUTO_WINS_QUANTILE = -1 # This shouldn't be in the [0, 1] range
DEFAULT_CUTOFFS = (-float("inf"), float("inf"))
class Winsorize(Transform):
"""Clip the mean values for each metric to lay within the limits provided in
the config. The config can contain either or both of two keys:
- ``"winsorization_config"``, corresponding to either a single
``WinsorizationConfig``, which, if provided will be used for all metrics; or
a mapping ``Dict[str, WinsorizationConfig]`` between each metric name and its
``WinsorizationConfig``.
- ``"optimization_config"``, which can be used to determine default winsorization
settings if ``"winsorization_config"`` does not provide them for a given
metric.
For example,
``{"winsorization_config": WinsorizationConfig(lower_quantile_margin=0.3)}``
will specify the same 30% winsorization from below for all metrics, whereas
```
{
"winsorization_config":
{
"metric_1": WinsorizationConfig(lower_quantile_margin=0.2),
"metric_2": WinsorizationConfig(upper_quantile_margin=0.1),
}
}
```
will winsorize 20% from below for metric_1 and 10% from above from metric_2.
Additional metrics won't be winsorized.
You can also determine the winsorization cutoffs automatically without having an
``OptimizationConfig`` by passing in AUTO_WINS_QUANTILE for the quantile you want
to winsorize. For example, to automatically winsorize large values:
``"m1": WinsorizationConfig(upper_quantile_margin=AUTO_WINS_QUANTILE)``.
This may be useful when fitting models in a notebook where there is no corresponding
``OptimizationConfig``.
Additionally, you can pass in winsorization boundaries ``lower_boundary`` and
``upper_boundary``that specify a maximum allowable amount of winsorization. This
is discouraged and will eventually be deprecated as we strongly encourage
that users allow ``Winsorize`` to automatically infer these boundaries from
the optimization config.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
if len(observation_data) == 0:
raise ValueError("Winsorize transform requires non-empty observation data.")
if config is None:
raise ValueError(
"Transform config for `Winsorize` transform must be specified and "
"non-empty when using winsorization."
)
all_metric_values = get_data(observation_data=observation_data)
# Check for legacy config
use_legacy = False
old_present = set(OLD_KEYS).intersection(config.keys())
if old_present:
warnings.warn(
"Winsorization received an out-of-date `transform_config`, containing "
f"the following deprecated keys: {old_present}. Please update the "
"config according to the docs of "
"`ax.modelbridge.transforms.winsorize.Winsorize`.",
DeprecationWarning,
)
use_legacy = True
# Get winsorization and optimization configs
winsorization_config = config.get("winsorization_config", {})
opt_config = config.get("optimization_config", {})
if "optimization_config" in config:
if not isinstance(opt_config, OptimizationConfig):
raise UserInputError(
"Expected `optimization_config` of type `OptimizationConfig` but "
f"got type `{type(opt_config)}."
)
opt_config = checked_cast(OptimizationConfig, opt_config)
self.cutoffs = {}
for metric_name, metric_values in all_metric_values.items():
if use_legacy:
self.cutoffs[metric_name] = _get_cutoffs_from_legacy_transform_config(
metric_name=metric_name,
metric_values=metric_values,
transform_config=config,
)
else:
self.cutoffs[metric_name] = _get_cutoffs_from_transform_config(
metric_name=metric_name,
metric_values=metric_values,
winsorization_config=winsorization_config, # pyre-ignore[6]
optimization_config=opt_config, # pyre-ignore[6]
)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for idx, metric_name in enumerate(obsd.metric_names):
if metric_name not in self.cutoffs: # pragma: no cover
raise ValueError(f"Cannot winsorize unknown metric {metric_name}")
# Clip on the winsorization bounds.
obsd.means[idx] = max(obsd.means[idx], self.cutoffs[metric_name][0])
obsd.means[idx] = min(obsd.means[idx], self.cutoffs[metric_name][1])
return observation_data
def _get_cutoffs_from_transform_config(
metric_name: str,
metric_values: List[float],
winsorization_config: Union[WinsorizationConfig, Dict[str, WinsorizationConfig]],
optimization_config: Optional[OptimizationConfig],
) -> Tuple[float, float]:
# (1) Use the same config for all metrics if one WinsorizationConfig was specified
if isinstance(winsorization_config, WinsorizationConfig):
return _quantiles_to_cutoffs(
metric_name=metric_name,
metric_values=metric_values,
metric_config=winsorization_config,
)
# (2) If `winsorization_config` is a dict, use it if `metric_name` is a key,
# and the corresponding value is a WinsorizationConfig.
if isinstance(winsorization_config, dict) and metric_name in winsorization_config:
metric_config = winsorization_config[metric_name]
if not isinstance(metric_config, WinsorizationConfig):
raise UserInputError(
"Expected winsorization config of type "
f"`WinsorizationConfig` but got {metric_config} of type "
f"{type(metric_config)} for metric {metric_name}."
)
return _quantiles_to_cutoffs(
metric_name=metric_name,
metric_values=metric_values,
metric_config=metric_config,
)
# (3) For constraints and objectives that don't have a pre-specified config we
# choose the cutoffs automatically using the optimization config (if supplied).
# We ignore ScalarizedOutcomeConstraint and ScalarizedObjective for now. An
# exception is raised if we encounter relative constraints.
if optimization_config:
if metric_name in optimization_config.objective.metric_names:
if isinstance(optimization_config.objective, ScalarizedObjective):
warnings.warn(
"Automatic winsorization isn't supported for ScalarizedObjective. "
"Specify the winsorization settings manually if you want to "
f"winsorize metric {metric_name}."
)
return DEFAULT_CUTOFFS # Don't winsorize a ScalarizedObjective
elif optimization_config.is_moo_problem:
# We deal with a multi-objective function the same way as we deal
# with an output constraint. It may be worth investigating setting
# the winsorization cutoffs based on the Pareto frontier in the future.
optimization_config = checked_cast(
MultiObjectiveOptimizationConfig, optimization_config
)
objective_threshold = _get_objective_threshold_from_moo_config(
optimization_config=optimization_config, metric_name=metric_name
)
if objective_threshold:
return _get_auto_winsorization_cutoffs_outcome_constraint(
metric_values=metric_values,
outcome_constraints=objective_threshold,
)
warnings.warn(
"Automatic winsorization isn't supported for an objective in "
"`MultiObjective` without objective thresholds. Specify the "
"winsorization settings manually if you want to winsorize "
f"metric {metric_name}."
)
return DEFAULT_CUTOFFS # Don't winsorize if there is no threshold
else: # Single objective
return _get_auto_winsorization_cutoffs_single_objective(
metric_values=metric_values,
minimize=optimization_config.objective.minimize,
)
# Get all outcome constraints for metric_name that aren't relative or scalarized
outcome_constraints = _get_outcome_constraints_from_config(
optimization_config=optimization_config, metric_name=metric_name
)
if outcome_constraints:
return _get_auto_winsorization_cutoffs_outcome_constraint(
metric_values=metric_values,
outcome_constraints=outcome_constraints,
)
# If none of the above, we don't winsorize.
return DEFAULT_CUTOFFS
def _get_outcome_constraints_from_config(
optimization_config: OptimizationConfig, metric_name: str
) -> List[OutcomeConstraint]:
"""Get all outcome constraints (non-scalarized) for a given metric."""
# Check for scalarized outcome constraints for the given metric
if any(
isinstance(oc, ScalarizedOutcomeConstraint)
and metric_name in [metric.name for metric in oc.metrics]
for oc in optimization_config.outcome_constraints
):
warnings.warn(
"Automatic winsorization isn't supported for a "
"`ScalarizedOutcomeConstraint`. Specify the winsorization settings "
f"manually if you want to winsorize metric {metric_name}."
)
# Filter scalarized outcome constraints
outcome_constraints = [
oc
for oc in optimization_config.outcome_constraints
if not isinstance(oc, ScalarizedOutcomeConstraint)
and oc.metric.name == metric_name
]
# Raise an error if there are relative constraints
if any(oc.relative for oc in outcome_constraints):
raise UnsupportedError(
"Automatic winsorization doesn't support relative outcome constraints. "
"Make sure a `Derelativize` transform is applied first."
)
return outcome_constraints
def _get_objective_threshold_from_moo_config(
optimization_config: MultiObjectiveOptimizationConfig, metric_name: str
) -> List[ObjectiveThreshold]:
"""Get the non-relative objective threshold for a given metric."""
objective_thresholds = [
ot
for ot in optimization_config.objective_thresholds
if ot.metric.name == metric_name
]
if any(oc.relative for oc in objective_thresholds):
raise UnsupportedError(
"Automatic winsorization doesn't support relative objective thresholds. "
"Make sure a `Derelevatize` transform is applied first."
)
return objective_thresholds
def _get_tukey_cutoffs(Y: np.ndarray, lower: bool) -> float:
"""Compute winsorization cutoffs similarly to Tukey boxplots.
See https://mathworld.wolfram.com/Box-and-WhiskerPlot.html for more details.
"""
q1 = np.percentile(Y, q=25, interpolation="lower")
q3 = np.percentile(Y, q=75, interpolation="higher")
iqr = q3 - q1
return q1 - 1.5 * iqr if lower else q3 + 1.5 * iqr
def _get_auto_winsorization_cutoffs_single_objective(
metric_values: List[float], minimize: bool
) -> Tuple[float, float]:
"""Automatic winsorization for a single objective.
We use a heuristic similar to what is used for Tukey box-plots in order to determine
what is an outlier. If we are | |
<filename>CS/CSC148/exercises/ex2/ex2.py
"""CSC148 Exercise 2: Inheritance and Introduction to Stacks
=== CSC148 Fall 2016 ===
<NAME>, <NAME>, and <NAME>
Department of Computer Science,
University of Toronto
=== Module description ===
This file contains starter code for Exercise 2.
It is divided into two parts:
- Task 1, which contaiใๆๆซใExams.zipใๆๆซใExams.zipns a set of classes that build on your work from
last week
- Task 2, which contains the skeleton of a simple function involving a Stack
data structure.
Notes:
1. When you override a method, you generally do not need to include a
method docstring, unless there are subclass-specific details to describe.
While PyCharm will complain about a missing docstring, you may ignore this
warning *for this specific case*.
2. A lot of starter code has been provided! Read through it carefully
before starting. You may also find it interesting to compare our work
against what you did for Exercise 1.
"""
# You will find these imports useful. Please do not import any others,
# or python_ta will deduct marks.
from math import sqrt # sqrt used to calculate diagonal distances
import random # used to generate random numbers
##############################################################################
# Task 1: Cars and other vehicles
##############################################################################
class SuperDuperManager:
"""A class responsible for keeping track of all cars in the system.
"""
# @param dict[str, Vehicle] _vehicles:
# A map of unique string identifiers to the corresponding vehicles.
# For example, _vehicles['a01'] would be a vehicle corresponding to
# the id_ 'a01'.
def __init__(self):
"""Initialize a new SuperDuperManager.
Initially there are no vehicles in the system.
@param SuperDuperManager self:
@rtype: None
"""
self._vehicles = {}
def add_vehicle(self, vehicle_type, id_, fuel):
"""Add a new vehicle to the system of the given type.
The new vehicle is identified by the string <id_>,
and has initial amount of fuel <fuel>.
Do nothing if there is already a vehicle with the given id.
Precondition: <vehicle_type> is one of 'Car', 'Helicopter', or
'UnreliableMagicCarpet'.
@param SuperDuperManager self:
@param str vehicle_type:
@param str id_:
@param int fuel:
@rtype: None
"""
# Check to make sure the identifier isn't already used.
if id_ not in self._vehicles:
if vehicle_type == 'Car':
self._vehicles[id_] = Car(fuel)
elif vehicle_type == 'Helicopter':
self._vehicles[id_] = Helicopter(fuel)
elif vehicle_type == 'UnreliableMagicCarpet':
self._vehicles[id_] = UnreliableMagicCarpet(fuel)
def move_vehicle(self, id_, new_x, new_y):
"""Move a vehicle with the given id.
The vehicle called <id_> should be moved to position (<new_x>, <new_y>).
Do nothing if there is no vehicle with the given id,
or if the corresponding vehicle does not have enough fuel to move.
@param SuperDuperManager self: SuperDuperManager
@param str id_:
@param int new_x:
@param int new_y:
@rtype: None
"""
if id_ in self._vehicles:
self._vehicles[id_].move(new_x, new_y)
def get_vehicle_position(self, id_):
"""Return the position of the vehicle with the given id.
Return a tuple of the (x, y) position of the vehicle.
Return None if there is no vehicle with the given id.
@param SuperDuperManager self: SuperDuperManager
@param str id_: str
@rtype: (int, int) | None
"""
if id_ in self._vehicles:
return self._vehicles[id_].position
def get_vehicle_fuel(self, id_):
"""Return the amount of fuel of the vehicle with the given id.
Return None if there is no vehicle with the given id.
@param SuperDuperManager self:
@param str id_:
@rtype: int | None
"""
if id_ in self._vehicles:
return self._vehicles[id_].fuel
class Vehicle:
""" A superclass for a vehicle in the Super Duper system.
Note that this interface specifies *two* public attributes,
and *two* public methods (the constructor is not considered public).
Of the public methods, a default implementation is given for move,
but not fuel_needed.
It also defines a constructor that should be called by each of its
subclasses.
=== Attributes ===
@param tuple(int) position:
The position of this vehicle.
@param int fuel:
The amount of fuel remaining for this vehicle.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, new_fuel, new_position):
"""Initialize a new Vehicle with the given fuel and position.
Precondition: new_fuel >= 0
@param Vehicle self: Vehicle itself
@param int new_fuel: fuel amount
@param (int, int) new_position: destination coordinates
@rtype: None
"""
self.fuel = new_fuel
self.position = new_position
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this vehicle may not move to the given position.
@param Vehicle self: Vehicle itself
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: float
"""
raise NotImplementedError
def move(self, new_x, new_y):
"""Move this vehicle to a new position.
Do nothing if this vehicle does not have enough fuel to move.
@param Vehicle self: Vehicle itself
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: None
"""
needed = self.fuel_needed(new_x, new_y)
if needed <= self.fuel:
self.position = (new_x, new_y)
self.fuel -= needed
# TODO: Implement this class (you can use your work from Exercise 1)
class Car(Vehicle):
"""A Car in the Super Duper system.
Car original position is at (0, 0).
A Car can only move vertically and horizontally, and uses
one unit of fuel per unit distance travelled.
Do nothing if the Car does not have enough fuel to move.
=== Attributes ===
@param tuple(int) position:
The position of this Car.
@param int fuel:
The amount of fuel remaining for this Car.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, fuel, position=(0, 0)):
"""
Initialize a new Car with the given fuel and position.
Precondition: new_fuel >= 0
@param Car self: Car itself.
@param int fuel: fuel amount.
@param (int, int) position: original position.
@rtype: None
"""
Vehicle.__init__(self, fuel, position)
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this Car may not move to the given position.
@param Car self: Car itself.
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: float
"""
distance = abs(new_x - self.position[0]) + abs(new_y - self.position[1])
return distance
# TODO: Implement this class. Note: We've imported the sqrt function for you.
class Helicopter(Vehicle):
"""
A helicopter. Can travel diagonally between points.
Hlicopter original position is (3, 5).
After each move, amount of fuel will round down to the nearest integer.
Do nothing if Helicopter does not have enough fuel to move.
=== Attributes ===
@param tuple(int) position:
The position of this vehicle.
@param int fuel:
The amount of fuel remaining for this vehicle.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, fuel, position=(3, 5)):
"""
Create a Helicopter with fuel amount and default position
Precondition: new_fuel >= 0
@param Car self: Helicopter itself.
@param int fuel: fuel amount.
@param (int, int) position: original position.
@rtype: None
"""
Vehicle.__init__(self, fuel, position)
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this vehicle may not move to the given position.
@param Helicopter self: Helicopter itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: float
"""
return sqrt((abs(new_x - self.position[0]))**2 +
(abs(new_y - self.position[1]))**2)
def move(self, new_x, new_y):
"""Move this Helicopter to a new position.
Do nothing if this Helicopter does not have enough fuel to move.
@param Helicopter self: Helicopter itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: None
"""
needed = self.fuel_needed(new_x, new_y)
if needed <= self.fuel:
self.position = (new_x, new_y)
self.fuel = int(self.fuel - needed)
# TODO: Implement this class. Note: We've imported the random module for you.
class UnreliableMagicCarpet(Vehicle):
"""
An unreliable magic carpet.
An UnreliableMagicCarpet is created at random position (x, y), range of x is
integer between 0 to 10 inclusively, range of y is integer between 0 to 10
inclusively too.
Does not need to use fuel to travel, but ends up in a random position
within two horizontal and vertical units from the target destination.
| |
<gh_stars>10-100
# Copyright (c) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch
import torch.nn.functional as F
from .common import no_dropout, no_layer_norm
class ChildSumTreeLSTMCell(nn.Module):
# https://arxiv.org/abs/1503.00075
def __init__(self, input_size, hidden_size, learn_init_state=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.iou_linear = nn.Linear(input_size + hidden_size, hidden_size * 3)
self.f_input_linear = nn.Linear(
input_size, hidden_size, bias=False)
self.f_hidden_linear = nn.Linear(hidden_size, hidden_size)
self.init_hidden = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
self.init_cell = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
# self.reset_parameters()
# def reset_parameters(self):
# stdv = 1.0 / math.sqrt(self.hidden_size)
# for weight in self.parameters():
# weight.data.uniform_(-stdv, stdv)
def forward(self, input, states=None):
"""
:param input: 1-dimensional tensor
:param states: pairs of 1-dimensional hiddens and cells.
ex. ((h_0, c_0), (h_1, c_1), (h_2, c_2))
:returns: 1-dimensional hidden and cell
"""
# states: pairs of (cell, hidden)
if states is None:
states = ((self.init_hidden, self.init_cell),)
num_children = len(states)
hiddens, cells = zip(*states)
hidden_tensor = torch.cat([hidden.view(1, self.hidden_size)
for hidden in hiddens], dim=0)
cell_tensor = torch.cat([cell.view(1, self.hidden_size)
for cell in cells], dim=0)
io_linear, u_linear = torch.split(
self.iou_linear(torch.cat([input, hidden_tensor.sum(0)], dim=0)),
self.hidden_size * 2, dim=0)
i, o = torch.split(torch.sigmoid(io_linear), self.hidden_size, dim=0)
u = torch.tanh(u_linear)
f = torch.sigmoid(self.f_input_linear(input).repeat(num_children, 1) +
self.f_hidden_linear(hidden_tensor))
new_cell = i * u + (f * cell_tensor).sum(0)
new_h = o * torch.tanh(new_cell)
return new_h, new_cell
class LayerNormChildSumTreeLSTMCell(nn.Module):
"""Combination of tree LSTM and layer normalization & recurrent dropout without memory loss"""
def __init__(self, input_size, hidden_size, dropout=None, layer_norm_enabled=True, learn_init_state=False, cell_ln=None):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.iou_linear = nn.Linear(
input_size + hidden_size, hidden_size * 3, bias=not layer_norm_enabled)
self.f_input_linear = nn.Linear(
input_size, hidden_size, bias=False)
self.f_hidden_linear = nn.Linear(
hidden_size, hidden_size, bias=not layer_norm_enabled)
self.init_hidden = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
self.init_cell = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
if dropout is not None:
if isinstance(dropout, nn.Dropout):
self.dropout = dropout
elif dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = no_dropout
self.layer_norm_enabled = layer_norm_enabled
if layer_norm_enabled:
self.f_ln = nn.LayerNorm(hidden_size)
self.iou_ln_layers = nn.ModuleList(
nn.LayerNorm(hidden_size) for _ in range(3))
# self.iou_ln_layers.append(
# nn.LayerNorm(hidden_size) if u_ln is None else u_ln)
self.cell_ln = nn.LayerNorm(
hidden_size) if cell_ln is None else cell_ln
else:
assert cell_ln is None
# assert u_ln is cell_ln is None
self.f_ln = no_layer_norm
self.iou_ln_layers = (no_layer_norm,) * 3
self.cell_ln = no_layer_norm
def forward(self, input, states=None):
"""
:param input: 1-dimensional tensor
:param states: pairs of 1-dimensional hiddens and cells.
ex. ((h_0, c_0), (h_1, c_1), (h_2, c_2))
:returns: 1-dimensional hidden and cell
"""
# states: pairs of (cell, hidden)
if states is None:
states = ((self.init_hidden, self.init_cell),)
# num_children = len(states)
hiddens, cells = zip(*states)
hidden_tensor = torch.stack(hiddens, dim=0)
cell_tensor = torch.stack(cells, dim=0)
f_linear_tensor = self.f_input_linear(input) + \
self.f_hidden_linear(hidden_tensor)
# f_linear_tensor = self.f_input_linear(input).repeat(num_children, 1) + \
# self.f_hidden_linear(hidden_tensor)
iou_linear_tensors = torch.split(
self.iou_linear(torch.cat([input, hidden_tensor.sum(0)], dim=0)),
self.hidden_size, dim=0)
# if self.layer_norm_enabled:
f_linear_tensor = torch.stack(
[self.f_ln(tensor) for tensor in f_linear_tensor],
dim=0)
iou_linear_tensors = tuple(ln(tensor) for ln, tensor in
zip(self.iou_ln_layers, iou_linear_tensors))
f = torch.sigmoid(f_linear_tensor)
i, o = (torch.sigmoid(tensor) for tensor in iou_linear_tensors[:2])
u = self.dropout(torch.tanh(iou_linear_tensors[2]))
new_cell = self.cell_ln(i * u + (f * cell_tensor).sum(0))
new_h = o * torch.tanh(new_cell)
return new_h, new_cell
class AttentiveChildSumTreeLSTMCell(nn.Module):
"""
The model is based on "Modelling Sentence Pairs with Tree-structured Attentive Encoder, COLING 2016"
And, layer normalization & recurrent dropout without memory loss is applied.
(Also, elementwise attention is added as an option.)
"""
def __init__(self, input_size, hidden_size, external_size=None, dropout=None,
attention_enabled=True, elementwise=False, layer_norm_enabled=True,
learn_init_state=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
if attention_enabled:
assert external_size
self.external_size = external_size
self.attention_input_linear = nn.Linear(
hidden_size +
external_size, (hidden_size + external_size) // 2,
bias=False)
self.attention_linear = nn.Linear(
(hidden_size + external_size) // 2, hidden_size if elementwise else 1,
bias=False)
self.merge_linear = nn.Linear(
hidden_size, hidden_size, bias=not layer_norm_enabled)
if layer_norm_enabled:
self.merge_ln = nn.LayerNorm(hidden_size)
else:
self.merge_ln = no_layer_norm
self.forward_merge = self.forward_attentive_merge
else:
self.forward_merge = self.forward_sum_merge
self.iou_linear = nn.Linear(
input_size + hidden_size, hidden_size * 3, bias=not layer_norm_enabled)
self.f_input_linear = nn.Linear(
input_size, hidden_size, bias=False)
self.f_hidden_linear = nn.Linear(
hidden_size, hidden_size, bias=not layer_norm_enabled)
self.init_hidden = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
self.init_cell = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
if dropout is not None:
if isinstance(dropout, nn.Dropout):
self.dropout = dropout
elif dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = no_dropout
self.layer_norm_enabled = layer_norm_enabled
if layer_norm_enabled:
self.f_ln = nn.LayerNorm(hidden_size)
self.iou_ln_layers = nn.ModuleList(
nn.LayerNorm(hidden_size) for _ in range(3))
self.cell_ln = nn.LayerNorm(hidden_size)
else:
self.f_ln = no_layer_norm
self.iou_ln_layers = (no_layer_norm,) * 3
self.cell_ln = no_layer_norm
def forward(self, input, states=None, external=None):
"""
:param input: 1-dimensional tensor
:param states: pairs of 1-dimensional hiddens and cells.
ex. ((h_0, c_0), (h_1, c_1), (h_2, c_2))
:external: 1-dimensional tensor
:returns: 1-dimensional hidden and cell
"""
if states is None:
states = ((self.init_hidden, self.init_cell),)
# assert not external
# external = enable_cuda(self, torch.zeros(self.hidden_size))
hiddens, cells = zip(*states)
hidden_tensor = torch.stack(hiddens, dim=0)
cell_tensor = torch.stack(cells, dim=0)
f_linear_tensor = self.f_input_linear(input) + \
self.f_hidden_linear(hidden_tensor)
iou_linear_tensors = torch.split(
self.iou_linear(torch.cat(
[input, self.forward_merge(hidden_tensor, external)], dim=0)),
self.hidden_size, dim=0)
f_linear_tensor = torch.stack(
[self.f_ln(tensor) for tensor in f_linear_tensor],
dim=0)
iou_linear_tensors = tuple(ln(tensor) for ln, tensor in
zip(self.iou_ln_layers, iou_linear_tensors))
f = torch.sigmoid(f_linear_tensor)
i, o = (torch.sigmoid(tensor) for tensor in iou_linear_tensors[:2])
u = self.dropout(torch.tanh(iou_linear_tensors[2]))
new_cell = self.cell_ln(i * u + (f * cell_tensor).sum(0))
new_h = o * torch.tanh(new_cell)
return new_h, new_cell
def forward_sum_merge(self, hidden_tensor, external):
return hidden_tensor.sum(0)
def forward_attentive_merge(self, hidden_tensor, external):
# if external is None:
# return self.forward_sum_merge(hidden_tensor, external)
attention_input = torch.tanh(self.attention_input_linear(
torch.cat([hidden_tensor, external.repeat(hidden_tensor.size(0), 1)], dim=1)))
attention = F.softmax(
self.attention_linear(attention_input),
dim=0)
merge_hidden = torch.tanh(self.merge_ln(self.merge_linear(
(hidden_tensor * attention).sum(0))))
return merge_hidden
class LayerNormTreeGRUCell(nn.Module):
"""
The model is based on "Modelling Sentence Pairs with Tree-structured Attentive Encoder, COLING 2016"
And, layer normalization & recurrent dropout without memory loss is applied.
code reference: https://gist.github.com/udibr/7f46e790c9e342d75dcbd9b1deb9d940
"""
def __init__(self, input_size, hidden_size,
dropout=None, layer_norm_enabled=True, init_hidden_enabled=False, learn_init_state=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.z_linear = nn.Linear(
input_size + hidden_size, hidden_size, bias=not layer_norm_enabled)
self.r_input_linear = nn.Linear(
input_size, hidden_size, bias=False)
self.r_hidden_linear = nn.Linear(
hidden_size, hidden_size, bias=not layer_norm_enabled)
self.u_linear = nn.Linear(
input_size + hidden_size, hidden_size, bias=not layer_norm_enabled)
if dropout is not None:
if isinstance(dropout, nn.Dropout):
self.dropout = dropout
elif dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = no_dropout
self.layer_norm_enabled = layer_norm_enabled
if layer_norm_enabled:
self.z_ln = nn.LayerNorm(hidden_size)
self.r_ln = nn.LayerNorm(hidden_size)
self.u_ln = nn.LayerNorm(hidden_size)
else:
self.z_ln = no_layer_norm
self.r_ln = no_layer_norm
self.u_ln = no_layer_norm
self.init_hidden_enabled = init_hidden_enabled
if init_hidden_enabled:
self.init_hidden = nn.Parameter(torch.zeros(hidden_size), learn_init_state)
def forward(self, input, hiddens=None):
"""
:param input: 1-dimensional tensor
:param states: 1-dimensional hiddens.
ex. (h_0, h_1, h_2)
:returns: 1-dimensional hidden
"""
# states: pairs of (cell, hidden)
if hiddens is None:
hiddens = (self.init_hidden,)
# num_children = len(states)
hidden_tensor = torch.stack(hiddens, dim=0)
hidden_sum = hidden_tensor.sum(0)
r = torch.sigmoid(torch.stack(
[self.r_ln(tensor) for tensor in (
self.r_input_linear(input) + self.r_hidden_linear(hidden_tensor))],
dim=0))
z = torch.sigmoid(self.z_ln(self.z_linear(
torch.cat([input, hidden_sum], dim=0))))
u = torch.tanh(self.u_ln(self.u_linear(
torch.cat([input, (r * hidden_tensor).sum(0)], dim=0))))
h = z * hidden_sum + (1 - z) * u
return h
class LayerNormNoInputBinaryTreeLSTMCell(nn.Module):
def __init__(self, hidden_size, dropout=None, layer_norm_enabled=True, learn_init_state=False, cell_ln=None):
super().__init__()
self.hidden_size = hidden_size
self.ffiou_linear = nn.Linear(
hidden_size * 2, hidden_size * 5, bias=not layer_norm_enabled)
if dropout is not None:
if isinstance(dropout, nn.Dropout):
self.dropout = dropout
elif dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = no_dropout
self.layer_norm_enabled = layer_norm_enabled
if layer_norm_enabled:
self.ffio_ln_layers = nn.ModuleList(
nn.LayerNorm(hidden_size) for _ in range(4))
self.u_ln = nn.LayerNorm(hidden_size)
# self.u_ln = nn.LayerNorm(
# hidden_size) if u_ln is None else u_ln
self.cell_ln = nn.LayerNorm(
hidden_size) if cell_ln is None else cell_ln
else:
# assert u_ln is cell_ln is None
assert cell_ln is None
self.ffio_ln_layers = (no_layer_norm,) * 4
self.u_ln = no_layer_norm
self.cell_ln = no_layer_norm
def forward(self, state0, state1):
"""
:param state0: (h0, c0); hidden and cell are 1-dimensional tensors
:param state1: (h1, c1)
:returns: (h_new, c_new)
"""
hiddens, cells = zip(state0, state1)
ffiou_linear_tensors = torch.split(
self.ffiou_linear(torch.cat(hiddens, dim=0)), self.hidden_size, dim=0)
f0, f1, i, o = (torch.sigmoid(ln(tensor)) for ln, tensor in
zip(self.ffio_ln_layers, ffiou_linear_tensors[:4]))
u = self.dropout(torch.tanh(self.u_ln(ffiou_linear_tensors[4])))
new_cell = self.cell_ln(i * | |
attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
Both `explanation_metadata` and `explanation_parameters` must be
passed together when used. For more details, see
`Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
endpoint ("Endpoint"):
Endpoint with the deployed model.
"""
Endpoint._validate_deploy_args(
min_replica_count,
max_replica_count,
accelerator_type,
deployed_model_display_name,
traffic_split,
traffic_percentage,
explanation_metadata,
explanation_parameters,
)
return self._deploy(
endpoint=endpoint,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
encryption_spec_key_name=encryption_spec_key_name
or initializer.global_config.encryption_spec_key_name,
sync=sync,
)
@base.optional_sync(return_input_arg="endpoint", bind_future_to_self=False)
def _deploy(
self,
endpoint: Optional["Endpoint"] = None,
deployed_model_display_name: Optional[str] = None,
traffic_percentage: Optional[int] = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[explain.ExplanationMetadata] = None,
explanation_parameters: Optional[explain.ExplanationParameters] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
) -> Endpoint:
"""Deploys model to endpoint. Endpoint will be created if unspecified.
Args:
endpoint ("Endpoint"):
Optional. Endpoint to deploy model to. If not specified, endpoint
display name will be model display name+'_endpoint'.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the smaller value of min_replica_count or 1 will
be used.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
Both `explanation_metadata` and `explanation_parameters` must be
passed together when used. For more details, see
`Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
endpoint ("Endpoint"):
Endpoint with the deployed model.
"""
if endpoint is None:
display_name = self.display_name[:118] + "_endpoint"
endpoint = Endpoint.create(
display_name=display_name,
project=self.project,
location=self.location,
credentials=self.credentials,
encryption_spec_key_name=encryption_spec_key_name,
)
_LOGGER.log_action_start_against_resource("Deploying model to", "", endpoint)
Endpoint._deploy_call(
endpoint.api_client,
endpoint.resource_name,
self.resource_name,
endpoint._gca_resource.traffic_split,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
)
_LOGGER.log_action_completed_against_resource("model", "deployed", endpoint)
endpoint._sync_gca_resource()
return endpoint
def batch_predict(
self,
job_display_name: str,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bigquery_source: Optional[str] = None,
instances_format: str = "jsonl",
gcs_destination_prefix: Optional[str] = None,
bigquery_destination_prefix: Optional[str] = None,
predictions_format: str = "jsonl",
model_parameters: Optional[Dict] = None,
machine_type: Optional[str] = None,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
starting_replica_count: Optional[int] = None,
max_replica_count: Optional[int] = None,
generate_explanation: Optional[bool] = False,
explanation_metadata: Optional[explain.ExplanationMetadata] = None,
explanation_parameters: Optional[explain.ExplanationParameters] = None,
labels: Optional[Dict[str, str]] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
) -> jobs.BatchPredictionJob:
"""Creates a batch prediction job using this Model and outputs
prediction results to the provided destination prefix in the specified
`predictions_format`. One source and one destination prefix are
required.
Example usage:
my_model.batch_predict(
job_display_name="prediction-123",
gcs_source="gs://example-bucket/instances.csv",
instances_format="csv",
bigquery_destination_prefix="projectId.bqDatasetId.bqTableId"
)
Args:
job_display_name (str):
Required. The user-defined name of the BatchPredictionJob.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source: Optional[Sequence[str]] = None
Google Cloud Storage URI(-s) to your instances to run
batch prediction on. They must match `instances_format`.
May contain wildcards. For more information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
bigquery_source: Optional[str] = None
BigQuery URI to a table, up to 2000 characters long. For example:
`bq://projectId.bqDatasetId.bqTableId`
instances_format: str = "jsonl"
The format in which instances are provided. Must be one
of the formats listed in `Model.supported_input_storage_formats`.
Default is "jsonl" when using `gcs_source`. If a `bigquery_source`
is provided, this is overridden to "bigquery".
gcs_destination_prefix: Optional[str] = None
The Google Cloud Storage location of the directory where the
output is to be written to. In the given directory a new
directory is created. Its name is
``prediction-<model-display-name>-<job-create-time>``, where
timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
Inside of it files ``predictions_0001.<extension>``,
``predictions_0002.<extension>``, ...,
``predictions_N.<extension>`` are created where
``<extension>`` depends on chosen ``predictions_format``,
and N may equal 0001 and depends on the total number of
successfully predicted instances. If the Model has both
``instance`` and ``prediction`` schemata defined then each such
file contains | |
getCogNameString(self):
numCogs = self.getNumCogs()
if numCogs == 1:
return TTLocalizer.ASupervisor
else:
return TTLocalizer.SupervisorP
def doesCogCount(self, avId, cogDict, zoneId, avList):
return bool(CogQuest.doesCogCount(self, avId, cogDict, zoneId, avList) and cogDict['isSupervisor'])
class SupervisorNewbieQuest(SupervisorQuest, NewbieQuest):
def __init__(self, id, quest):
SupervisorQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self)
def doesCogCount(self, avId, cogDict, zoneId, avList):
if SupervisorQuest.doesCogCount(self, avId, cogDict, zoneId, avList):
return self.getNumNewbies(avId, avList)
else:
return 0
class CFOQuest(CogQuest):
def __init__(self, id, quest):
CogQuest.__init__(self, id, quest)
self.checkNumCFOs(self.quest[1])
def getCogType(self):
return Any
def getCogNameString(self):
numCogs = self.getNumCogs()
if numCogs == 1:
return TTLocalizer.ACogCFO
else:
return TTLocalizer.CogCFOs
def doesCogCount(self, avId, cogDict, zoneId, avList):
return cogDict['isBoss'] > 0 and self.isLocationMatch(zoneId)
def doesCFOCount(self, avId, cogDict, zoneId, avList):
return self.doesCogCount(avId, cogDict, zoneId, avList)
class CFONewbieQuest(CFOQuest, NewbieQuest):
def __init__(self, id, quest):
CFOQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self)
def doesCFOCount(self, avId, cogDict, zoneId, avList):
if CFOQuest.doesCFOCount(self, avId, cogDict, zoneId, avList):
return self.getNumNewbies(avId, avList)
else:
return 0
class CJQuest(CogQuest):
def __init__(self, id, quest):
CogQuest.__init__(self, id, quest)
self.checkNumCJs(self.quest[1])
def getCogType(self):
return Any
def getCogNameString(self):
numCogs = self.getNumCogs()
if numCogs == 1:
return TTLocalizer.ACogCJ
else:
return TTLocalizer.CogCJs
def doesCogCount(self, avId, cogDict, zoneId, avList):
return cogDict['isBoss'] > 0 and self.isLocationMatch(zoneId)
def doesCJCount(self, avId, cogDict, zoneId, avList):
return self.doesCogCount(avId, cogDict, zoneId, avList)
class CJNewbieQuest(CJQuest, NewbieQuest):
def __init__(self, id, quest):
CJQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self)
def doesCJCount(self, avId, cogDict, zoneId, avList):
if CJQuest.doesCJCount(self, avId, cogDict, zoneId, avList):
return self.getNumNewbies(avId, avList)
else:
return 0
class CEOQuest(CogQuest):
def __init__(self, id, quest):
CogQuest.__init__(self, id, quest)
self.checkNumCEOs(self.quest[1])
def getCogType(self):
return Any
def getCogNameString(self):
numCogs = self.getNumCogs()
if numCogs == 1:
return TTLocalizer.ACogCEO
else:
return TTLocalizer.CogCEOs
def doesCogCount(self, avId, cogDict, zoneId, avList):
return cogDict['isBoss'] > 0 and self.isLocationMatch(zoneId)
def doesCEOCount(self, avId, cogDict, zoneId, avList):
return self.doesCogCount(avId, cogDict, zoneId, avList)
class CEONewbieQuest(CEOQuest, NewbieQuest):
def __init__(self, id, quest):
CEOQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self)
def doesCEOCount(self, avId, cogDict, zoneId, avList):
if CEOQuest.doesCEOCount(self, avId, cogDict, zoneId, avList):
return self.getNumNewbies(avId, avList)
else:
return 0
class RescueQuest(VPQuest):
def __init__(self, id, quest):
VPQuest.__init__(self, id, quest)
def getNumToons(self):
return self.getNumCogs()
def getProgressString(self, avatar, questDesc):
if self.getCompletionStatus(avatar, questDesc) == COMPLETE:
return CompleteString
elif self.getNumToons() == 1:
return ''
else:
return TTLocalizer.QuestsRescueQuestProgress % {'progress': questDesc[4],
'numToons': self.getNumToons()}
def getObjectiveStrings(self):
numToons = self.getNumCogs()
if numToons == 1:
text = TTLocalizer.QuestsRescueQuestToonS
else:
text = TTLocalizer.QuestsRescueQuestRescueDesc % {'numToons': numToons}
return (text,)
def getString(self):
return TTLocalizer.QuestsRescueQuestRescue % self.getObjectiveStrings()[0]
def getSCStrings(self, toNpcId, progress):
if progress >= self.getNumToons():
return getFinishToonTaskSCStrings(toNpcId)
numToons = self.getNumToons()
if numToons == 1:
text = TTLocalizer.QuestsRescueQuestSCStringS
else:
text = TTLocalizer.QuestsRescueQuestSCStringP
toonLoc = self.getLocationName()
return text % {'toonLoc': toonLoc}
def getHeadlineString(self):
return TTLocalizer.QuestsRescueQuestHeadline
class RescueNewbieQuest(RescueQuest, NewbieQuest):
def __init__(self, id, quest):
RescueQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self, newStr=TTLocalizer.QuestsRescueNewNewbieQuestObjective, oldStr=TTLocalizer.QuestsRescueOldNewbieQuestObjective)
def doesVPCount(self, avId, cogDict, zoneId, avList):
if RescueQuest.doesVPCount(self, avId, cogDict, zoneId, avList):
return self.getNumNewbies(avId, avList)
else:
return 0
class BuildingQuest(CogQuest):
trackCodes = ['c',
'l',
'm',
's',
'g']
trackNames = [TTLocalizer.Bossbot,
TTLocalizer.Lawbot,
TTLocalizer.Cashbot,
TTLocalizer.Sellbot,
TTLocalizer.Boardbot]
def __init__(self, id, quest):
CogQuest.__init__(self, id, quest)
self.checkNumBuildings(self.quest[1])
self.checkBuildingTrack(self.quest[2])
self.checkBuildingFloors(self.quest[3])
def getNumFloors(self):
return self.quest[3]
def getBuildingTrack(self):
return self.quest[2]
def getNumQuestItems(self):
return self.getNumBuildings()
def getNumBuildings(self):
return self.quest[1]
def getCompletionStatus(self, av, questDesc, npc = None):
questId, fromNpcId, toNpcId, rewardId, toonProgress = questDesc
questComplete = toonProgress >= self.getNumBuildings()
return getCompleteStatusWithNpc(questComplete, toNpcId, npc)
def getProgressString(self, avatar, questDesc):
if self.getCompletionStatus(avatar, questDesc) == COMPLETE:
return CompleteString
elif self.getNumBuildings() == 1:
return ''
else:
return TTLocalizer.QuestsBuildingQuestProgressString % {'progress': questDesc[4],
'num': self.getNumBuildings()}
def getObjectiveStrings(self):
count = self.getNumBuildings()
floors = TTLocalizer.QuestsBuildingQuestFloorNumbers[self.getNumFloors() - 1]
buildingTrack = self.getBuildingTrack()
if buildingTrack == Any:
type = TTLocalizer.Cog
else:
type = self.trackNames[self.trackCodes.index(buildingTrack)]
if count == 1:
if floors == '':
text = TTLocalizer.QuestsBuildingQuestDesc
else:
text = TTLocalizer.QuestsBuildingQuestDescF
elif floors == '':
text = TTLocalizer.QuestsBuildingQuestDescC
else:
text = TTLocalizer.QuestsBuildingQuestDescCF
return (text % {'count': count,
'floors': floors,
'type': type},)
def getString(self):
return TTLocalizer.QuestsBuildingQuestString % self.getObjectiveStrings()[0]
def getSCStrings(self, toNpcId, progress):
if progress >= self.getNumBuildings():
return getFinishToonTaskSCStrings(toNpcId)
count = self.getNumBuildings()
floors = TTLocalizer.QuestsBuildingQuestFloorNumbers[self.getNumFloors() - 1]
buildingTrack = self.getBuildingTrack()
if buildingTrack == Any:
type = TTLocalizer.Cog
else:
type = self.trackNames[self.trackCodes.index(buildingTrack)]
if count == 1:
if floors == '':
text = TTLocalizer.QuestsBuildingQuestDesc
else:
text = TTLocalizer.QuestsBuildingQuestDescF
elif floors == '':
text = TTLocalizer.QuestsBuildingQuestDescI
else:
text = TTLocalizer.QuestsBuildingQuestDescIF
objective = text % {'floors': floors,
'type': type}
location = self.getLocationName()
return TTLocalizer.QuestsBuildingQuestSCString % {'objective': objective,
'location': location}
def getHeadlineString(self):
return TTLocalizer.QuestsBuildingQuestHeadline
def doesCogCount(self, avId, cogDict, zoneId, avList):
return 0
def doesBuildingTypeCount(self, type):
buildingTrack = self.getBuildingTrack()
if buildingTrack == Any or buildingTrack == type:
return True
return False
def doesBuildingCount(self, avId, avList):
return 1
class BuildingNewbieQuest(BuildingQuest, NewbieQuest):
def __init__(self, id, quest):
BuildingQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[4])
def getNewbieLevel(self):
return self.quest[4]
def getString(self):
return NewbieQuest.getString(self)
def getHeadlineString(self):
return TTLocalizer.QuestsNewbieQuestHeadline
def doesBuildingCount(self, avId, avList):
return self.getNumNewbies(avId, avList)
class CogdoQuest(CogQuest):
trackCodes = ['c',
'l',
'm',
's',
'g']
trackNames = [TTLocalizer.Bossbot,
TTLocalizer.Lawbot,
TTLocalizer.Cashbot,
TTLocalizer.Sellbot,
TTLocalizer.Boardbot]
def __init__(self, id, quest):
CogQuest.__init__(self, id, quest)
self.checkNumCogdos(self.quest[1])
self.checkCogdoTrack(self.quest[2])
def getCogdoTrack(self):
return self.quest[2]
def getNumQuestItems(self):
return self.getNumCogdos()
def getNumCogdos(self):
return self.quest[1]
def getCompletionStatus(self, av, questDesc, npc = None):
questId, fromNpcId, toNpcId, rewardId, toonProgress = questDesc
questComplete = toonProgress >= self.getNumCogdos()
return getCompleteStatusWithNpc(questComplete, toNpcId, npc)
def getProgressString(self, avatar, questDesc):
if self.getCompletionStatus(avatar, questDesc) == COMPLETE:
return CompleteString
elif self.getNumCogdos() == 1:
return ''
else:
return TTLocalizer.QuestsCogdoQuestProgressString % {'progress': questDesc[4],
'num': self.getNumCogdos()}
def getObjectiveStrings(self):
count = self.getNumCogdos()
buildingTrack = self.getCogdoTrack()
if buildingTrack == Any:
type = TTLocalizer.Cog
else:
type = self.trackNames[self.trackCodes.index(buildingTrack)]
if count == 1:
text = TTLocalizer.QuestsCogdoQuestDesc
else:
text = TTLocalizer.QuestsCogdoQuestDescC
return (text % {'count': count,
'type': type},)
def getString(self):
return TTLocalizer.QuestsCogdoQuestString % self.getObjectiveStrings()[0]
def getSCStrings(self, toNpcId, progress):
if progress >= self.getNumCogdos():
return getFinishToonTaskSCStrings(toNpcId)
count = self.getNumCogdos()
buildingTrack = self.getCogdoTrack()
if buildingTrack == Any:
type = TTLocalizer.Cog
else:
type = self.trackNames[self.trackCodes.index(buildingTrack)]
if count == 1:
text = TTLocalizer.QuestsCogdoQuestDesc
else:
text = TTLocalizer.QuestsCogdoQuestDescI
objective = text % {'type': type,}
location = self.getLocationName()
return TTLocalizer.QuestsCogdoQuestSCString % {'objective': objective,
'location': location}
def getHeadlineString(self):
return TTLocalizer.QuestsCogdoQuestHeadline
def doesCogCount(self, avId, cogDict, zoneId, avList):
return 0
def doesCogdoCount(self, avId, avList):
return 1
def doesCogdoTypeCount(self, type):
CogdoTrack = self.getCogdoTrack()
if CogdoTrack == Any or CogdoTrack == type:
return True
return False
class CogdoNewbieQuest(CogdoQuest, NewbieQuest):
def __init__(self, id, quest):
CogdoQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[3])
def getNewbieLevel(self):
return self.quest[3]
def getString(self):
return NewbieQuest.getString(self)
def getHeadlineString(self):
return TTLocalizer.QuestsNewbieQuestHeadline
def doesCogdoCount(self, avId, avList):
return self.getNumNewbies(avId, avList)
class FactoryQuest(LocationBasedQuest):
factoryTypeNames = {FT_FullSuit: TTLocalizer.Cog,
FT_Leg: TTLocalizer.FactoryTypeLeg,
FT_Arm: TTLocalizer.FactoryTypeArm,
FT_Torso: TTLocalizer.FactoryTypeTorso}
def __init__(self, id, quest):
LocationBasedQuest.__init__(self, id, quest)
self.checkNumFactories(self.quest[1])
def getNumQuestItems(self):
return self.getNumFactories()
def getNumFactories(self):
return self.quest[1]
def getFactoryType(self):
loc = self.getLocation()
type = Any
if loc in ToontownGlobals.factoryId2factoryType:
type = ToontownGlobals.factoryId2factoryType[loc]
return type
def getCompletionStatus(self, av, questDesc, npc = None):
questId, fromNpcId, toNpcId, rewardId, toonProgress = questDesc
questComplete = toonProgress >= self.getNumFactories()
return getCompleteStatusWithNpc(questComplete, toNpcId, npc)
def getProgressString(self, avatar, questDesc):
if self.getCompletionStatus(avatar, questDesc) == COMPLETE:
return CompleteString
elif self.getNumFactories() == 1:
return ''
else:
return TTLocalizer.QuestsFactoryQuestProgressString % {'progress': questDesc[4],
'num': self.getNumFactories()}
def getObjectiveStrings(self):
count = self.getNumFactories()
factoryType = self.getFactoryType()
if factoryType == Any:
type = TTLocalizer.Cog
else:
type = FactoryQuest.factoryTypeNames[factoryType]
if count == 1:
text = TTLocalizer.QuestsFactoryQuestDesc
else:
text = TTLocalizer.QuestsFactoryQuestDescC
return (text % {'count': count,
'type': type},)
def getString(self):
return TTLocalizer.QuestsFactoryQuestString % self.getObjectiveStrings()[0]
def getSCStrings(self, toNpcId, progress):
if progress >= self.getNumFactories():
return getFinishToonTaskSCStrings(toNpcId)
factoryType = self.getFactoryType()
if factoryType == Any:
type = TTLocalizer.Cog
else:
type = FactoryQuest.factoryTypeNames[factoryType]
count = self.getNumFactories()
if count == 1:
text = TTLocalizer.QuestsFactoryQuestDesc
else:
text = TTLocalizer.QuestsFactoryQuestDescI
objective = text % {'type': type}
location = self.getLocationName()
return TTLocalizer.QuestsFactoryQuestSCString % {'objective': objective,
'location': location}
def getHeadlineString(self):
return TTLocalizer.QuestsFactoryQuestHeadline
def doesFactoryCount(self, avId, location, avList):
return self.isLocationMatch(location)
class FactoryNewbieQuest(FactoryQuest, NewbieQuest):
def __init__(self, id, quest):
FactoryQuest.__init__(self, id, quest)
self.checkNewbieLevel(self.quest[2])
def getNewbieLevel(self):
return self.quest[2]
def getString(self):
return NewbieQuest.getString(self)
def getHeadlineString(self):
return TTLocalizer.QuestsNewbieQuestHeadline
def doesFactoryCount(self, avId, location, avList):
if FactoryQuest.doesFactoryCount(self, avId, location, avList):
return self.getNumNewbies(avId, avList)
else:
return num
class MintQuest(LocationBasedQuest):
def __init__(self, id, quest):
LocationBasedQuest.__init__(self, id, quest)
self.checkNumMints(self.quest[1])
def getNumQuestItems(self):
return self.getNumMints()
def getNumMints(self):
return self.quest[1]
def getCompletionStatus(self, av, questDesc, npc = None):
questId, fromNpcId, toNpcId, rewardId, toonProgress = questDesc
questComplete = toonProgress >= self.getNumMints()
return getCompleteStatusWithNpc(questComplete, toNpcId, npc)
def getProgressString(self, | |
#!/usr/bin/env python
"""Helper for observatory and device computed attributes, including aggregate status values"""
__author__ = '<NAME>, <NAME>'
from pyon.core import bootstrap
from pyon.core.exception import BadRequest
from pyon.public import RT, PRED, log
from interface.objects import DeviceStatusType, AggregateStatusType
class ObservatoryUtil(object):
def __init__(self, process=None, container=None, enhanced_rr=None, device_status_mgr=None):
self.process = process
self.container = container or bootstrap.container_instance
self.RR2 = enhanced_rr
self.RR = enhanced_rr or self.container.resource_registry if self.container else None
self.device_status_mgr = device_status_mgr
# -------------------------------------------------------------------------
# Resource registry access
def _set_enhanced_rr(self, enhanced_rr=None):
self.RR2 = enhanced_rr
def _get_predicate_assocs(self, predicate):
if self.RR2:
if not self.RR2.has_cached_predicate(predicate):
self.RR2.cache_predicate(predicate)
assoc_list = self.RR2.get_cached_associations(predicate)
else:
assoc_list = self.container.resource_registry.find_associations(predicate=predicate, id_only=False)
return assoc_list
def _find_objects(self, subject, predicate, object_type='', id_only=False):
if self.RR2:
return self.RR2.find_objects(subject, predicate, object_type, id_only=id_only), None
else:
return self.container.resource_registry.find_objects(subject, predicate, object_type, id_only=id_only)
# -------------------------------------------------------------------------
# Observatory site traversal
def get_child_sites(self, parent_site_id=None, org_id=None, exclude_types=None, include_parents=True, id_only=True):
"""
Returns all child sites and parent site for a given parent site_id.
Returns all child sites and org for a given org_id.
Return type is a tuple (site_resources, site_children) of two elements.
- site_resources is a dict mapping site_id to Site object (or None if id_only==True).
- site_children is a dict mapping site_id to a list of direct child site_ids.
@param include_parents if True, walk up the parents all the way to the root and include
@param id_only if True, return Site objects
"""
if parent_site_id and org_id:
raise BadRequest("Either parent_site_id OR org_id supported!")
if exclude_types is None:
exclude_types = []
parents = self._get_site_parents() # Note: root elements are not in list
if org_id:
obsite_ids,_ = self._find_objects(org_id, PRED.hasResource, RT.Observatory, id_only=True)
if not obsite_ids:
return {}, {}
parent_site_id = org_id
for obsite_id in obsite_ids:
parents[obsite_id] = ('Observatory', org_id, 'Org')
elif parent_site_id:
if parent_site_id not in parents:
parents[parent_site_id] = ('Observatory', None, 'Org')
else:
raise BadRequest("Must provide either parent_site_id or org_id")
matchlist = [] # sites with wanted parent
ancestors = {} # child ids for sites in result set
for site_id, (st, parent_id, pt) in parents.iteritems():
# Iterate through sites and find the ones with a wanted parent
if st in exclude_types:
continue
parent_stack = [site_id, parent_id]
while parent_id:
# Walk up to parents
if parent_id == parent_site_id:
matchlist.append(site_id)
# Fill out ancestors
par = parent_stack.pop()
while parent_stack:
ch = parent_stack.pop()
if par not in ancestors:
ancestors[par] = []
if ch not in ancestors[par]:
ancestors[par].append(ch)
par = ch
parent_id = None
else:
_,parent_id,_ = parents.get(parent_id, (None,None,None))
parent_stack.append(parent_id)
# Go all the way up to the roots
if include_parents:
matchlist.append(parent_site_id)
child_id = parent_site_id
parent = parents.get(child_id, None)
while parent:
st, parent_id, pt = parent
if parent_id:
matchlist.append(parent_id)
if parent_id not in ancestors:
ancestors[parent_id] = []
ancestors[parent_id].append(child_id)
child_id = parent_id
parent = parents.get(child_id, None)
if id_only:
child_site_dict = dict(zip(matchlist, [None]*len(matchlist)))
else:
all_res = self.RR.read_mult(matchlist) if matchlist else []
child_site_dict = dict(zip([res._id for res in all_res], all_res))
return child_site_dict, ancestors
def _get_site_parents(self):
"""Returns a dict mapping a site_id to site type and parent site_id."""
# This function makes one RR call retrieving all hasSite associations.
# @TODO: see if this can be done with an id_only=False argument
parents = {}
assoc_list = self._get_predicate_assocs(PRED.hasSite)
for assoc in assoc_list:
parents[assoc.o] = (assoc.ot, assoc.s, assoc.st)
return parents
def get_device_relations(self, site_list):
"""
Returns a dict of site_id or device_id mapped to list of (site/device type, device_id, device type)
tuples, or None, based on hasDevice associations.
This is a combination of 2 results: site->device(primary) and device(parent)->device(child)
"""
assoc_list = self._get_predicate_assocs(PRED.hasDevice)
res_dict = {}
site_devices = self.get_site_devices(site_list, assoc_list=assoc_list)
res_dict.update(site_devices)
# Add information for each device
device_ids = [tuple_list[0][1] for tuple_list in site_devices.values() if tuple_list]
for device_id in device_ids:
res_dict.update(self.get_child_devices(device_id, assoc_list=assoc_list))
return res_dict
def get_site_devices(self, site_list, assoc_list=None):
"""
Returns a dict of site_id mapped to a list of (site type, device_id, device type) tuples,
based on hasDevice association for given site_list.
"""
site_devices = self._get_site_devices(assoc_list=assoc_list)
res_sites = {}
for site_id in site_list:
sd_tup = site_devices.get(site_id, None)
res_sites[site_id] = [sd_tup] if sd_tup else []
return res_sites
def _get_site_devices(self, assoc_list=None):
"""
Returns a dict of site_id mapped to a list of (site type, device_id, device type) tuples,
based on hasDevice association for all sites.
"""
sites = {}
if not assoc_list:
assoc_list = self._get_predicate_assocs(PRED.hasDevice)
for assoc in assoc_list:
if assoc.st in [RT.PlatformSite, RT.InstrumentSite]:
sites[assoc.s] = (assoc.st, assoc.o, assoc.ot)
return sites
def get_child_devices(self, device_id, assoc_list=None):
"""Returns a dict of keys device_id and all children of device_id to
lists of 3-tuples (parent type, child id, child type
"""
child_devices = self._get_child_devices(assoc_list=assoc_list)
all_children = set([device_id])
def add_children(dev_id):
ch_list = child_devices.get(dev_id, [])
for _,ch_id,_ in ch_list:
all_children.add(ch_id)
add_children(ch_id)
add_children(device_id)
for dev_id in list(child_devices.keys()):
if dev_id not in all_children:
del child_devices[dev_id]
for dev_id in all_children:
if dev_id not in child_devices:
child_devices[dev_id] = []
return child_devices
def _get_child_devices(self, assoc_list=None):
"""
Returns a dict mapping a device_id to parent type, child device_id, child type based on hasDevice association.
"""
sites = {}
if not assoc_list:
assoc_list = self._get_predicate_assocs(PRED.hasDevice)
for assoc in assoc_list:
if assoc.st in [RT.PlatformDevice, RT.InstrumentDevice] and assoc.ot in [RT.PlatformDevice, RT.InstrumentDevice]:
if assoc.s not in sites:
sites[assoc.s] = []
sites[assoc.s].append((assoc.st, assoc.o, assoc.ot))
return sites
def get_site_root(self, res_id, site_parents=None, ancestors=None):
if ancestors:
site_parents = {}
for site_id, ch_ids in ancestors.iteritems():
if ch_ids:
for ch_id in ch_ids:
site_parents[ch_id] = ('', site_id, '')
parent_id = res_id
parent = site_parents.get(parent_id, None)
while parent:
_,pid,_ = parent
parent_id = pid
parent = site_parents.get(parent_id, None)
return parent_id
# -------------------------------------------------------------------------
# Finding data products
def get_resource_data_products(self, res_list, assoc_list=None):
"""
Returns a dict of resource id mapped to data product id based on hasSource association.
"""
device_dps = self._get_data_products_by_source(assoc_list=assoc_list)
res_dps = {}
for dev_id in res_list:
res_dps[dev_id] = device_dps.get(dev_id, None)
return res_dps
def _get_data_products_by_source(self, assoc_list=None):
"""
Returns a dict of resource_id (site/device) mapped to data product id based on hasSource association.
"""
data_products = {}
if not assoc_list:
assoc_list = self._get_predicate_assocs(PRED.hasSource)
for assoc in assoc_list:
if assoc.st == RT.DataProduct:
data_products.setdefault(assoc.o, []).append(assoc.s)
return data_products
def get_site_data_products(self, res_id, res_type=None, include_sites=False, include_devices=False, include_data_products=False):
"""
Determines efficiently all data products for the given site and child sites.
For given site_id, first determine all child sites (following child hasSite associations).
Then find all currently primary devices to all child sites (following hasDevice associations).
Then find all data products that are derived from the devices (following hasSource associations).
@retval A dict containing the following keys:
"site_resources": A dict mapping site_id to Site resource object (if include_sites==True) or None
"site_children": A dict mapping site/org id to list of site ids for children
"site_devices": A dict mapping site id to tuple (site type, device id, device type)
"device_resources": A dict mapping device_id to Device object (if include_devices==True)
"device_data_products": A dict mapping device_id to data_product_id
"data_product_resources": A dict mapping data_product_id to DataProduct resource object
"""
res_obj = None
if not res_type:
res_obj = self.RR.read(res_id)
res_type = res_obj._get_type()
device_list = []
child_sites, site_devices, site_ancestors = None, None, None
if res_type in [RT.Org, RT.Observatory, RT.Subsite, RT.PlatformSite, RT.InstrumentSite]:
if res_type == RT.Org:
child_sites, site_ancestors = self.get_child_sites(org_id=res_id, include_parents=False, id_only=not include_devices)
else:
child_sites, site_ancestors = self.get_child_sites(parent_site_id=res_id, include_parents=False, id_only=not include_devices)
child_sites[res_id] = res_obj or self.RR.read(res_id) if include_sites else None
site_devices = self.get_device_relations(child_sites.keys())
device_list = list({tup[1] for key,dev_list in site_devices.iteritems() if dev_list for tup in dev_list})
elif res_type in [RT.PlatformDevice, RT.InstrumentDevice]:
# See if current device has child devices
device_list = list(set(self.get_child_devices(res_id)))
else:
raise BadRequest("Unsupported resource type: %s" % res_type)
device_objs = self.RR.read_mult(device_list) if include_devices else None
res_list = device_list + child_sites.keys() if child_sites is not None else []
device_dps = self.get_resource_data_products(res_list)
if include_data_products:
dpid_list = list({dp_id for device_id, dp_list in device_dps.iteritems() if dp_list is not None for dp_id in dp_list if dp_id is not None})
dpo_list = self.RR.read_mult(dpid_list)
dp_objs = dict(zip(dpid_list, dpo_list))
else:
dp_objs = None
res_dict = dict(
site_resources=child_sites,
site_children=site_ancestors,
site_devices=site_devices,
device_resources=device_objs,
device_data_products=device_dps,
data_product_resources=dp_objs,
)
return res_dict
# -------------------------------------------------------------------------
# Status roll up
def get_status_roll_ups(self, res_id, res_type=None, include_structure=False):
"""
For given parent device/site/org res_id compute the status roll ups.
The result is a dict of id with value dict of status values.
Includes all parents of given | |
float]]:
"""Returns the geometries' bounding box.
Returns:
tuple (xmin, ymin, xmax, ymax) for the bounding box or None if the LineCollection
is empty
"""
if len(self._lines) == 0:
return None
else:
return (
float(min((line.real.min() for line in self._lines))),
float(min((line.imag.min() for line in self._lines))),
float(max((line.real.max() for line in self._lines))),
float(max((line.imag.max() for line in self._lines))),
)
def width(self) -> float:
"""Returns the total width of the geometries.
Returns:
the width (xmax - xmin) or 0.0 if the LineCollection is empty
"""
if self._lines:
return float(
max((line.real.max() for line in self._lines))
- min((line.real.min() for line in self._lines))
)
else:
return 0.0
def height(self) -> float:
"""Returns the total height of the geometries.
Returns:
the width (ymax - ymin) or 0.0 if the LineCollection is empty
"""
if self._lines:
return float(
max((line.imag.max() for line in self._lines))
- min((line.imag.min() for line in self._lines))
)
else:
return 0.0
def length(self) -> float:
"""Return the total length of the paths.
Returns:
the total length
"""
return sum(np.sum(np.abs(np.diff(line))) for line in self._lines)
def pen_up_trajectories(self) -> "LineCollection":
"""Returns a LineCollection containing the pen-up trajectories."""
return LineCollection(
([self._lines[i][-1], self._lines[i + 1][0]] for i in range(len(self._lines) - 1)),
)
def pen_up_length(self) -> Tuple[float, float, float]:
"""Returns statistics on the pen-up distance corresponding to the path.
The total, mean, and median distance are returned. The pen-up distance is the distance
between a path's end and the next path's beginning.
Returns:
tuple (total, mean, median) for the pen-up distances
"""
if len(self.lines) < 2:
return 0.0, 0.0, 0.0
ends = np.array([line[-1] for line in self.lines[:-1]])
starts = np.array([line[0] for line in self.lines[1:]])
dists = np.abs(starts - ends)
# noinspection PyTypeChecker
return float(np.sum(dists)), float(np.mean(dists)), float(np.median(dists))
def segment_count(self) -> int:
"""Returns the total number of segment across all lines.
Returns:
the total number of segments in the geometries
"""
return sum(max(0, len(line) - 1) for line in self._lines)
class Document:
"""This class is the core data model of vpype and represent the data that is passed from
one command to the other. At its core, a Document is a collection of layers identified
by non-zero positive integers and each represented by a :py:class:`LineCollection`.
In addition, the Document class maintains a :py:attr:`page_size` attribute which describe
the physical size of the document. This attribute is not strictly linked to the actual
Document's content, but can be set based on it.
"""
def __init__(
self,
line_collection: LineCollection = None,
page_size: Optional[Tuple[float, float]] = None,
):
"""Create a Document, optionally providing a :py:class:`LayerCollection` for layer 1.
Args:
line_collection: if provided, used as layer 1
"""
self._layers: Dict[int, LineCollection] = {}
self._page_size: Optional[Tuple[float, float]] = page_size
if line_collection:
self.add(line_collection, 1)
def empty_copy(self) -> "Document":
"""Create an empty copy of this document with the same page size."""
return Document(page_size=self.page_size)
@property
def layers(self) -> Dict[int, LineCollection]:
"""Returns a reference to the layer dictionary.
Returns:
the internal layer dictionary
"""
return self._layers
@property
def page_size(self) -> Optional[Tuple[float, float]]:
"""Returns the page size or None if it hasn't been set."""
return self._page_size
@page_size.setter
def page_size(self, page_size=Optional[Tuple[float, float]]) -> None:
"""Sets the page size to a new value."""
self._page_size = page_size
def extend_page_size(self, page_size: Optional[Tuple[float, float]]) -> None:
"""Adjust the page sized according to the following logic:
- if ``page_size`` is None, the the page size is unchanged
- if ``self.page_size`` is None, it is set to ``page_size``
- if both page sizes are not None, the page size is set to the largest value in
both direction
Args:
page_size: page dimension to use to update ``self.page_size``
"""
if page_size:
if self.page_size:
self.page_size = (
max(self.page_size[0], page_size[0]),
max(self.page_size[1], page_size[1]),
)
else:
self.page_size = page_size
def ids(self) -> Iterable[int]:
"""Returns the list of layer IDs"""
return self._layers.keys()
def layers_from_ids(self, layer_ids: Iterable[int]) -> Iterator[LineCollection]:
"""Returns an iterator that yield layers corresponding to the provided IDs, provided
they exist. This is typically used to process a command's layer list option, in
combination with :py:func:`multiple_to_layer_ids`.
Non-existent layer IDs in the input are ignored.
Args:
layer_ids: iterable of layer IDs
Returns:
layer iterator
"""
return (self._layers[lid] for lid in layer_ids if lid in self._layers)
def exists(self, layer_id: int) -> bool:
"""Test existence of a layer.
Note that existence of a layer does not necessarily imply that it isn't empty.
Args:
layer_id: layer ID to test
Returns:
True if the layer ID exists
"""
return layer_id in self._layers
def __getitem__(self, layer_id: int):
return self._layers.__getitem__(layer_id)
def __setitem__(self, layer_id: int, value: LineCollectionLike):
if layer_id < 1:
raise ValueError(f"expected non-null, positive layer id, got {layer_id} instead")
if isinstance(value, LineCollection):
self._layers[layer_id] = value
else:
self._layers[layer_id] = LineCollection(value)
def free_id(self) -> int:
"""Returns the lowest unused layer id.
Returns:
the unused layer ID
"""
vid = 1
while vid in self._layers:
vid += 1
return vid
def add(self, lc: LineCollection, layer_id: Union[None, int] = None) -> None:
"""Add a the content of a :py:class:`LineCollection` to a given layer.
If the given layer is None, the input LineCollection is used to create a new layer
using the lowest available layer ID.
"""
if layer_id is None:
layer_id = 1
while layer_id in self._layers:
layer_id += 1
if layer_id in self._layers:
self._layers[layer_id].extend(lc)
else:
self._layers[layer_id] = lc
def extend(self, doc: "Document") -> None:
"""Extend a Document with the content of another Document.
The layer structure of the source Document is maintained and geometries are either
appended to the destination's corresponding layer or new layers are created, depending
on if the layer existed or not in the destination Document.
The :py:attr:`page_size` attribute is adjusted using :meth:`extend_page_size`.
Args:
doc: source Document
"""
self.extend_page_size(doc.page_size)
for layer_id, layer in doc.layers.items():
self.add(layer, layer_id)
def is_empty(self) -> bool:
"""Returns True if all layers are empty.
Returns:
True if all layers are empty"""
for layer in self.layers.values():
if not layer.is_empty():
return False
return True
def pop(self, layer_id: int) -> LineCollection:
"""Removes a layer from the Document.
Args:
layer_id: ID of the layer to be removed
Returns:
the :py:class:`LineCollection` corresponding to the removed layer
"""
return self._layers.pop(layer_id)
def count(self) -> int:
"""Returns the total number of layers.
Returns:
total number of layer"""
return len(self._layers.keys())
def translate(self, dx: float, dy: float) -> None:
"""Translates all line by a given offset.
Args:
dx: offset along X axis
dy: offset along Y axis
"""
for layer in self._layers.values():
layer.translate(dx, dy)
def scale(self, sx: float, sy: Optional[float] = None) -> None:
"""Scale the geometry.
The scaling is performed about the coordinates origin (0, 0). To scale around a
specific location, appropriate translations must be performed before and after the
scaling (see :func:`LineCollection.scale`).
Args:
sx: scale factor along x
sy: scale factor along y (if None, then sx is used)
"""
for layer in self._layers.values():
layer.scale(sx, sy)
def rotate(self, angle: float) -> None:
"""Rotate the Document's content..
The rotation is performed about the coordinates origin (0, 0). To rotate around a
specific location, appropriate translations must be performed before and after the
scaling (see :func:`LineCollection.rotate`).
Args:
angle: rotation angle (radian)
"""
for layer in self._layers.values():
layer.rotate(angle)
def bounds(
self, layer_ids: Union[None, Iterable[int]] = None
) -> Optional[Tuple[float, float, float, float]]:
"""Compute bounds of the document.
If layer_ids is provided, bounds are computed only for the corresponding IDs.
Note: the bounds are computed based on the actual geometries contained in this
:class:`Document` instance. The document's page size, if any, is not taken into account
by this calculation.
Args:
layer_ids: layers to consider in the bound calculation
Returns:
boundaries of the geometries
"""
if layer_ids is None:
layer_ids = self.ids()
a = np.array(
[
self._layers[vid].bounds()
for vid in layer_ids
if self.exists(vid) and len(self._layers[vid]) > 0
]
)
if len(a) > 0:
return a[:, 0].min(), a[:, 1].min(), a[:, 2].max(), a[:, 3].max()
else:
return None
def crop(self, x1: float, y1: float, x2: float, y2: float) -> None:
"""Crop all layers to a rectangular area.
Args:
x1, y1: first corner of the crop area
x2, y2: second corner | |
import copy
import enum
import json
import os
import iwp.labels
# module for all things related to Scalabel frames.
# NOTE: we encode slice parameters in the Scalabel frame's name to support a
# full round trip during labeling. without this, we do not have a way to
# convey metadata about the frame that is needed when we convert to/from
# canonical IWP label format with the following:
#
# 1. build Scalabel frames
# 2. set canonical IWP labels on the associated Scalabel frames
# 3. label using the Scalabel frames
# 4. extract IWP labels from Scalabel frames
# 5. normalize IWP labels to create new canonical labels
#
# enumeration of labeling strategies for Scalabel playlists:
#
# no_order - frames have no special ordering and may be presented in any
# order.
# xy_slices - frames are sorted such that each location within the data volume
# is grouped together. this results in all of one XY slice's data
# being temporally ordered before another XY slice is visited.
# z_stacks - frames are sorted such that XY slices within a single time step
# are grouped together. this results in full stacks of XY slices
# for a single time step being grouped together.
# variables - frames are sorted such that variables from a single XY slice
# are grouped together.
#
# NOTE: this does not appear to be a useful sort order and will
# likely be removed in the future.
#
@enum.unique
class LabelingStrategyType( enum.Enum ):
NO_ORDER = 1
XY_SLICES = 2
Z_STACKS = 3
VARIABLES = 4
def build_slice_name( experiment_name, variable_name, time_index, xy_slice_index ):
"""
Builds a unique name for a slice based on the experiment, variable, and location
within the dataset.
Takes 4 arguments:
experiment_name - String specifying the experiment that generated the slice.
variable_name - String specifying the variable associated with the slice.
time_index - Non-negative index specifying the time step associated with
the slice.
xy_slice_index - Non-negative index specifying the XY slice.
Returns 1 value:
slice_name - String containing the constructed name.
"""
return "{:s}-{:s}-z={:03d}-Nt={:03d}".format(
experiment_name,
variable_name,
xy_slice_index,
time_index )
def slice_name_to_components( slice_name ):
"""
Decomposes a slice name into a map of its unique components. This is the
inverse of build_slice_name(). Also handles slice_name's which have been
converted into a path or URL as a prefix.
Takes 1 argument:
slice_name - String specifying the slice's name, as generated by build_slice_name().
Returns 1 value:
slice_map - Dictionary containing the decomposed metadata from slice_name.
Has the following keys:
"experiment": Experiment name
"variable": Variable name
"z_index": XY slice index
"time_step_index": Time step index
"""
slice_components = slice_name.split( "-" )
# handle slice names that have been turned into paths with extensions.
if "." in slice_components[-1]:
slice_components[-1] = slice_components[-1].split( "." )[0]
# map the individual components to their names.
#
# NOTE: we use negative indexing to handle the case where the experiment
# name may contain one or more hyphens.
#
slice_map = {
"experiment": "-".join( slice_components[:-3] ),
"variable": slice_components[-3],
"z_index": int( slice_components[-2].split( "=" )[1] ),
"time_step_index": int( slice_components[-1].split( "=" )[1] )
}
return slice_map
def build_slice_video_name( playlist_strategy, experiment_name, variable_name, time_index, xy_slice_index ):
"""
Builds a video name for a slice based on the experiment variable, and location
within the dataset. The structure of the returned name is governed by the
strategy specified which ultimately controls how the slice is sorted within a
playlist when loaded into Scalabel.ai.
The returned names' structures are as follows:
NO_ORDER - <experiment>
XY_SLICES - <experiment>-<variable>-z=<slice_index>
Z_STACKS - <experiment>-<variable>-Nt=<time_index>
VARIABLES - <experiment>-z=<slice_index>-Nt=<time_index>
Takes 5 arguments:
playlist_strategy - Enumeration of type iwp.labels.scalabel.LabelingStrategyType
that controls the sort order of a frame containing the
returned video name. See the description above for the
individual sort strategies.
experiment_name - String specifying the experiment that generated the slice.
variable_name - String specifying the variable associated with the slice.
time_index - Non-negative index specifying the time step associated with
the slice.
xy_slice_index - Non-negative index specifying the XY slice.
Returns 1 value:
video_name - String containing the constructed video name.
"""
# default to the experiment name which makes all slices equal to each other.
# this results in playlists retaining their natural ordering.
video_name = experiment_name
if playlist_strategy == LabelingStrategyType.XY_SLICES:
video_name = "{:s}-{:s}-z={:03d}".format(
experiment_name,
variable_name,
xy_slice_index )
elif playlist_strategy == LabelingStrategyType.Z_STACKS:
video_name = "{:s}-{:s}-Nt={:03d}".format(
experiment_name,
variable_name,
time_index )
elif playlist_strategy == LabelingStrategyType.VARIABLES:
video_name = "{:s}-z={:03d}-Nt={:03d}".format(
experiment_name,
time_index,
xy_slice_index )
return video_name
def build_slice_path( data_root, data_suffix, experiment_name, variable_name, time_index, xy_slice_index, index_precision=3 ):
"""
Returns the on-disk path to a specific slice. The path generated has the following
form:
<root>/<variable>/<experiment>-<variable>-z=<slice>-Nt=<time><suffix>
<slice> and <time> are zero-padded integers formatted according to an
optional precision parameter.
Takes 7 arguments:
data_root - String specifying the root on-disk path for the slice.
data_suffix - String specifying the path suffix for the slice.
experiment_name - String specifying the experiment that generated the slice.
variable_name - String specifying the variable associated with the slice.
time_index - Non-negative index specifying the time step associated with
the slice.
xy_slice_index - Non-negative index specifying the XY slice.
index_precision - Optional non-negative integer specifying the precision used
when formatting "<slice>" and "<time>". If omitted, defaults
to 3.
Returns 1 value:
slice_name - String specifying the constructed path.
"""
return "{:s}/{:s}/{:s}-{:s}-z={:0{index_precision}d}-Nt={:0{index_precision}d}.png".format(
data_root,
variable_name,
experiment_name,
variable_name,
xy_slice_index,
time_index,
index_precision=index_precision )
def build_slice_url( url_prefix, slice_path, number_components=0 ):
"""
Returns the URL to a specific slice's on-disk path. The URL prefix is combined with
a portion of the specified path to create the slice URL.
Takes 3 arguments:
url_prefix - URL prefix of the slice.
slice_path - Path to the slice's item. May be either absolute or relative.
number_components - Optional, non-negative integer specifying the number of leading
components to remove from slice_path. No components are
removed when number_components is zero.
Takes 1 value:
slice_url - Combination of url_prefix and slice_path with number_components-many
path components removed from slice_path.
"""
path_components = slice_path.split( "/" )
# handle absolute paths. these generate an empty component which should be
# ignored to make number_components consistent between absolute and relative
# paths.
if slice_path.startswith( "/" ):
path_components = path_components[1:]
if number_components < 0:
raise ValueError( "Invalid number of components specified! ({})".format(
number_components ) )
if number_components >= len( path_components ):
raise IndexError( "Can't remove {:d} components from {:s} - only has {:d}.".format(
number_components,
slice_path,
len( path_components ) ) )
return "{:s}/{:s}".format(
url_prefix,
"/".join( path_components[number_components:] ) )
def build_scalabel_frames( experiment_name,
variables_list,
time_range,
xy_slice_range,
data_root,
data_suffix,
url_prefix,
component_count,
labeling_strategy=LabelingStrategyType.NO_ORDER,
check_data_flag=False ):
"""
Builds a sequence of minimal, Scalabel frames according to the slice metadata provided.
Serializing the generated frames is sufficient for an Items list to start a new Scalabel.ai
video labeling project.
Frames are constructed in (Z, time, variable) order in the generated structure
though are sorted by Scalabel.ai when loaded. The labeling order within the
application is governed by the labeling strategy specified:
NO_ORDER - No particular order is specified. All frames are from the same
"video" and Scalabel.ai sorts by time stamp and frame name.
XY_SLICES - Frames are sorted by location within the dataset. Each XY slice
is from the same "video" which results in each of its time steps
being grouped together.
Z_STACKS - Frames are sorted by time within the dataset. Each time step is
from the same "video" which results in each of its XY slices being
grouped together (in a stack).
VARIABLES - Frames are sorted by time and location within the dataset. Each
XY slice, per time step, is from the same "video" which results in
each of its variables being grouped together. There is no guarantee
that adjacent slices, either in XY slice or time step order, are
consecutive.
NOTE: None of the frames generated have labels. These must be set by hand or with
set_iwp_labels().
The supplied experiment name is used as the underlying video name, and individual
frame names are constructed by build_slice_name(). Frame URLs are constructed by
build_slice_path() and build_slice_url().
Raises FileNotFoundError if a datum associated with a | |
<reponame>muntaza/Open-Aset<filename>openaset/gedungbangunan/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('umum', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FotoGedungBangunan',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('foto', models.FileField(help_text=b'PERINGATAN: Hanya Foto Aset dan Hasil Scan File Dokumen Kepemilikan, Bukan Foto Pengguna Aset!!!', upload_to=b'gedung_bangunan/', verbose_name=b'Foto', db_column=b'foto')),
('tanggal', models.DateField(help_text=b'Tanggal Foto yang di Upload', null=True, verbose_name=b'Tanggal', db_column=b'tanggal', blank=True)),
('catatan', models.CharField(help_text=b'Catatan Mengenai File yang di Upload', max_length=200, verbose_name=b'Catatan', db_column=b'catatan')),
],
options={
'db_table': 'foto_gedung_bangunan',
'verbose_name': 'Foto Gedung Bangunan',
'verbose_name_plural': 'Foto Gedung Bangunan',
},
),
migrations.CreateModel(
name='GedungBangunan',
fields=[
('id', models.AutoField(serialize=False, verbose_name=b'Register', primary_key=True, db_column=b'id')),
('nama_barang', models.CharField(max_length=300, verbose_name=b'Nama Barang', db_column=b'nama_barang')),
('tanggal_dokumen_gedung', models.DateField(null=True, verbose_name=b'Tanggal Dokumen Gedung', db_column=b'tanggal_dokumen_gedung', blank=True)),
('nomor_dokumen_gedung', models.CharField(max_length=300, null=True, verbose_name=b'Nomor Dokumen Gedung', db_column=b'nomor_dokumen_gedung', blank=True)),
('banyak_barang', models.IntegerField(default=1, verbose_name=b'Banyak Barang', db_column=b'banyak_barang')),
('keterangan', models.TextField(null=True, verbose_name=b'Keterangan', db_column=b'keterangan', blank=True)),
],
options={
'db_table': 'gedung_bangunan',
'verbose_name': 'Gedung Bangunan',
'verbose_name_plural': 'Gedung Bangunan',
},
),
migrations.CreateModel(
name='HargaGedungBangunan',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('luas_lantai', models.DecimalField(default=0, decimal_places=1, verbose_name=b'Luas Lantai (m2)', max_digits=10, db_column=b'luas_lantai')),
('harga_bertambah', models.DecimalField(default=0, decimal_places=0, verbose_name=b'Harga Bertambah', max_digits=15, db_column=b'harga_bertambah')),
('harga_berkurang', models.DecimalField(default=0, decimal_places=0, verbose_name=b'Harga Berkurang', max_digits=15, db_column=b'harga_berkurang')),
('catatan', models.CharField(help_text=b'Catatan pada Daftar Pengadaan', max_length=250, verbose_name=b'Catatan', db_column=b'catatan')),
('id_asal_usul', models.ForeignKey(db_column=b'id_asal_usul', verbose_name=b'Asal Usul', to='umum.AsalUsul')),
],
options={
'db_table': 'harga_gedung_bangunan',
'verbose_name': 'Harga Gedung Bangunan',
'verbose_name_plural': 'Harga Gedung Bangunan',
},
),
migrations.CreateModel(
name='KontrakGedungBangunan',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('pihak_ketiga', models.CharField(max_length=200, verbose_name=b'Pihak Ketiga', db_column=b'pihak_ketiga')),
('nomor_kontrak', models.CharField(max_length=200, null=True, verbose_name=b'Nomor Kontrak', db_column=b'nomor_kontrak', blank=True)),
('tanggal_kontrak', models.DateField(null=True, verbose_name=b'Tanggal Kontrak', db_column=b'tanggal_kontrak', blank=True)),
('nomor_sp2d', models.CharField(max_length=200, verbose_name=b'Nomor SP2D', db_column=b'nomor_sp2d')),
('tanggal_sp2d', models.DateField(null=True, verbose_name=b'Tanggal SP2D', db_column=b'tanggal_sp2d', blank=True)),
('id_skpd', models.ForeignKey(db_column=b'id_skpd', verbose_name=b'SKPD', to='umum.SKPD')),
],
options={
'db_table': 'kontrak_gedung_bangunan',
'verbose_name': 'Kontrak Gedung Bangunan',
'verbose_name_plural': 'Kontrak Gedung Bangunan',
},
),
migrations.CreateModel(
name='PenambahanUmur',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('kode_barang', models.CharField(max_length=50, verbose_name=b'Kode Barang', db_column=b'kode_barang')),
('umur', models.IntegerField(verbose_name=b'Umur', db_column=b'umur')),
],
options={
'ordering': ['kode_barang', 'persen'],
'db_table': 'penambahan_umur',
'verbose_name': 'Penambahan Umur',
'verbose_name_plural': 'Penambahan Umur',
},
),
migrations.CreateModel(
name='Persen',
fields=[
('persen', models.IntegerField(serialize=False, primary_key=True, db_column=b'persen')),
],
options={
'ordering': ['persen'],
'db_table': 'persen',
'verbose_name': 'Persen',
'verbose_name_plural': 'Persen',
},
),
migrations.CreateModel(
name='Ruangan',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('kode_ruangan', models.CharField(max_length=10, verbose_name=b'Kode Ruangan', db_column=b'kode_ruangan')),
('nama_ruangan', models.CharField(max_length=250, verbose_name=b'Nama Ruangan', db_column=b'nama_ruangan')),
],
options={
'ordering': ['id_gedung_bangunan', 'kode_ruangan'],
'db_table': 'ruangan',
'verbose_name': 'Ruangan',
'verbose_name_plural': 'Ruangan',
},
),
migrations.CreateModel(
name='StatusBeton',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('status_beton', models.CharField(unique=True, max_length=100, verbose_name=b'Status Beton', db_column=b'status_beton')),
],
options={
'db_table': 'status_beton',
'verbose_name': 'Status Beton',
'verbose_name_plural': 'Status Beton',
},
),
migrations.CreateModel(
name='StatusTingkat',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'id')),
('status_tingkat', models.CharField(unique=True, max_length=100, verbose_name=b'Status Tingkat', db_column=b'status_tingkat')),
],
options={
'db_table': 'status_tingkat',
'verbose_name': 'Status Tingkat',
'verbose_name_plural': 'Status Tingkat',
},
),
migrations.CreateModel(
name='PemanfaatanGedungBangunan',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('id_jenis_pemanfaatan', models.ForeignKey(db_column=b'id_jenis_pemanfaatan', verbose_name=b'Jenis Pemanfaatan', to='umum.JenisPemanfaatan')),
],
options={
'db_table': 'pemanfaatan_gedung_bangunan',
'verbose_name': 'Pemanfaatan Gedung Bangunan',
'verbose_name_plural': 'Pemanfaatan Gedung Bangunan',
},
),
migrations.CreateModel(
name='PenghapusanGedungBangunan',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('id_sk_penghapusan', models.ForeignKey(db_column=b'id_sk_penghapusan', verbose_name=b'SK Penghapusan', to='umum.SKPenghapusan')),
],
options={
'db_table': 'penghapusan_gedung_bangunan',
'verbose_name': 'Penghapusan Gedung Bangunan',
'verbose_name_plural': 'Penghapusan Gedung Bangunan',
},
),
migrations.CreateModel(
name='SKPDAsalGedungBangunan',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('id_skpd', models.ForeignKey(db_column=b'id_skpd', verbose_name=b'SKPD', to='umum.SKPD')),
],
options={
'db_table': 'skpd_asal_gedung_bangunan',
'verbose_name': 'SKPD Asal Gedung Bangunan',
'verbose_name_plural': 'SKPD Asal Gedung Bangunan',
},
),
migrations.CreateModel(
name='SKPDTujuanGedungBangunan',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('id_skpd', models.ForeignKey(db_column=b'id_skpd', verbose_name=b'SKPD', to='umum.SKPD')),
],
options={
'db_table': 'skpd_tujuan_gedung_bangunan',
'verbose_name': 'SKPD Tujuan Gedung Bangunan',
'verbose_name_plural': 'SKPD Tujuan Gedung Bangunan',
},
),
migrations.CreateModel(
name='TahunBerkurangGedungBangunan',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('tahun_berkurang', models.ForeignKey(db_column=b'tahun_berkurang', verbose_name=b'Tahun Berkurang', to='umum.Tahun')),
],
options={
'db_table': 'tahun_berkurang_gedung_bangunan',
'verbose_name': 'Tahun Berkurang Gedung Bangunan',
'verbose_name_plural': 'Tahun Berkurang Gedung Bangunan',
},
),
migrations.CreateModel(
name='TahunBerkurangUsulHapusGedung',
fields=[
('id', models.OneToOneField(primary_key=True, db_column=b'id_gedung_bangunan', serialize=False, to='gedungbangunan.GedungBangunan')),
('tahun_berkurang', models.ForeignKey(db_column=b'tahun_berkurang', verbose_name=b'Tahun Berkurang', to='umum.Tahun')),
],
options={
'db_table': 'tahun_berkurang_usul_hapus_gedung',
'verbose_name': 'Tahun Berkurang Usul Hapus Gedung',
'verbose_name_plural': 'Tahun Berkurang Usul Hapus Gedung',
},
),
migrations.AddField(
model_name='ruangan',
name='id_gedung_bangunan',
field=models.ForeignKey(db_column=b'id_gedung_bangunan', verbose_name=b'Gedung Bangunan', to='gedungbangunan.GedungBangunan'),
),
migrations.AddField(
model_name='penambahanumur',
name='persen',
field=models.ForeignKey(db_column=b'persen', verbose_name=b'Persen', to='gedungbangunan.Persen'),
),
migrations.AddField(
model_name='hargagedungbangunan',
name='id_gedung_bangunan',
field=models.ForeignKey(db_column=b'id_gedung_bangunan', verbose_name=b'Gedung Bangunan', to='gedungbangunan.GedungBangunan'),
),
migrations.AddField(
model_name='hargagedungbangunan',
name='id_kontrak_gedung_bangunan',
field=models.ForeignKey(db_column=b'id_kontrak_gedung_bangunan', verbose_name=b'Kontrak Gedung Bangunan', to='gedungbangunan.KontrakGedungBangunan'),
),
migrations.AddField(
model_name='hargagedungbangunan',
name='tahun',
field=models.ForeignKey(db_column=b'tahun', verbose_name=b'Tahun', to='umum.Tahun', help_text=b'Tahun Anggaran'),
),
migrations.AddField(
model_name='hargagedungbangunan',
name='tahun_mutasi',
field=models.ForeignKey(related_name='+', db_column=b'tahun_mutasi', to='umum.Tahun', blank=True, help_text=b'Tahun Mutasi', null=True, verbose_name=b'Tahun Mutasi'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_golongan_barang',
field=models.ForeignKey(db_column=b'id_golongan_barang', default=3, verbose_name=b'Golongan Barang', to='umum.GolonganBarang'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_keadaan_barang',
field=models.ForeignKey(db_column=b'id_keadaan_barang', default=1, verbose_name=b'Keadaan Barang', to='umum.KeadaanBarang'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_kode_barang',
field=models.ForeignKey(db_column=b'id_kode_barang', verbose_name=b'Kode Barang', to='umum.KodeBarang'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_mutasi_berkurang',
field=models.ForeignKey(db_column=b'id_mutasi_berkurang', default=5, verbose_name=b'Mutasi Berkurang', to='umum.MutasiBerkurang'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_satuan_barang',
field=models.ForeignKey(db_column=b'id_satuan_barang', verbose_name=b'Satuan Barang', to='umum.SatuanBarang'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_status_beton',
field=models.ForeignKey(db_column=b'id_status_beton', verbose_name=b'Status Beton', to='gedungbangunan.StatusBeton'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_status_tingkat',
field=models.ForeignKey(db_column=b'id_status_tingkat', verbose_name=b'Status Tingkat', to='gedungbangunan.StatusTingkat'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_sub_skpd',
field=models.ForeignKey(db_column=b'id_sub_skpd', verbose_name=b'SUB SKPD', to='umum.SUBSKPD'),
),
migrations.AddField(
model_name='gedungbangunan',
name='id_tanah',
field=models.ForeignKey(db_column=b'id_tanah', verbose_name=b'Tanah', to='umum.Tanah'),
),
migrations.AddField(
model_name='gedungbangunan',
name='tahun',
field=models.ForeignKey(db_column=b'tahun', verbose_name=b'Tahun Awal', to='umum.Tahun', help_text=b'Tahun Awal Kapitalisasi'),
),
migrations.AddField(
model_name='fotogedungbangunan',
name='id_gedung_bangunan',
field=models.ForeignKey(db_column=b'id_gedung_bangunan', verbose_name=b'Gedung Bangunan', to='gedungbangunan.GedungBangunan'),
),
migrations.CreateModel(
name='FotoGedungBangunanAwayan',
fields=[
],
options={
'verbose_name': '34 Foto Gedung Awayan',
'proxy': True,
'verbose_name_plural': '34 Foto Gedung Awayan',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBAPPEDA',
fields=[
],
options={
'verbose_name': '21 Foto Gedung BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 Foto Gedung BAPPEDA',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBatumandi',
fields=[
],
options={
'verbose_name': '32 Foto Gedung Batumandi',
'proxy': True,
'verbose_name_plural': '32 Foto Gedung Batumandi',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBatuPiring',
fields=[
],
options={
'verbose_name': '37 Foto Gedung Batu Piring',
'proxy': True,
'verbose_name_plural': '37 Foto Gedung Batu Piring',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBKD',
fields=[
],
options={
'verbose_name': '19 Foto Gedung BKD',
'proxy': True,
'verbose_name_plural': '19 Foto Gedung BKD',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBKPPD',
fields=[
],
options={
'verbose_name': '26 Foto Gedung BKPPD',
'proxy': True,
'verbose_name_plural': '26 Foto Gedung BKPPD',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBPBD',
fields=[
],
options={
'verbose_name': '39 Foto Gedung BPBD',
'proxy': True,
'verbose_name_plural': '39 Foto Gedung BPBD',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanBPPD',
fields=[
],
options={
'verbose_name': '48 Foto Gedung BPPD',
'proxy': True,
'verbose_name_plural': '48 Foto Gedung BPPD',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkes',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesAwayan',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Awayan',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Awayan',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesBatumandi',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Batumandi',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Batumandi',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesHalong',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Halong',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Halong',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesJuai',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Juai',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Juai',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesKantor',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Kantor',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Kantor',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesLampihong',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Lampihong',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Lampihong',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesLokbatu',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Lokbatu',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Lokbatu',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesParingin',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Paringin',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Paringin',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesParinginSelatan',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Paringin Selatan',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Paringin Selatan',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesPirsus',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Pirsus',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Pirsus',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesRSUD',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes RSUD',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes RSUD',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesTanahHabang',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Tanah Habang',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Tanah Habang',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesTebingTinggi',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Tebing Tinggi',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDinkesUren',
fields=[
],
options={
'verbose_name': '05 Foto Gedung Dinkes Uren',
'proxy': True,
'verbose_name_plural': '05 Foto Gedung Dinkes Uren',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDisdik',
fields=[
],
options={
'verbose_name': '07 Foto Gedung Disdik',
'proxy': True,
'verbose_name_plural': '07 Foto Gedung Disdik',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDisdikAwayan',
fields=[
],
options={
'verbose_name': '07 Foto Gedung Disdik Awayan',
'proxy': True,
'verbose_name_plural': '07 Foto Gedung Disdik Awayan',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDisdikBatumandi',
fields=[
],
options={
'verbose_name': '07 Foto Gedung Disdik Batumandi',
'proxy': True,
'verbose_name_plural': '07 Foto Gedung Disdik Batumandi',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDisdikHalong',
fields=[
],
options={
'verbose_name': '07 Foto Gedung Disdik Halong',
'proxy': True,
'verbose_name_plural': '07 Foto Gedung Disdik Halong',
},
bases=('gedungbangunan.fotogedungbangunan',),
),
migrations.CreateModel(
name='FotoGedungBangunanDisdikJuai',
fields=[
],
options={
'verbose_name': '07 Foto Gedung Disdik Juai',
| |
import torch
import math
from torch.nn import functional as F
class DenseBlock(torch.nn.Module):
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
super(DenseBlock, self).__init__()
self.fc = torch.nn.Linear(input_size, output_size, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm1d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm1d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.fc(x))
else:
out = self.fc(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ResnetBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'):
super(ResnetBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.conv2 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(num_filter)
elif norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(num_filter)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
residual = x
if self.norm is not None:
out = self.bn(self.conv1(x))
else:
out = self.conv1(x)
if self.activation is not None:
out = self.act(out)
if self.norm is not None:
out = self.bn(self.conv2(out))
else:
out = self.conv2(out)
out = torch.add(out, residual)
if self.activation is not None:
out = self.act(out)
return out
class UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(UpBlock, self).__init__()
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class UpBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, scale=4, bias=True, activation='prelu', norm=None):
super(UpBlockPix, self).__init__()
self.up_conv1 = Upsampler(scale,num_filter)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = Upsampler(scale,num_filter)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class D_UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None):
super(D_UpBlock, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class D_UpBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, scale=4, bias=True, activation='prelu', norm=None):
super(D_UpBlockPix, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.up_conv1 = Upsampler(scale,num_filter)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = Upsampler(scale,num_filter)
def forward(self, x):
x = self.conv(x)
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class DownBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(DownBlock, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class DownBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, scale=4,bias=True, activation='prelu', norm=None):
super(DownBlockPix, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = Upsampler(scale,num_filter)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class D_DownBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None):
super(D_DownBlock, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class D_DownBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, scale=4, bias=True, activation='prelu', norm=None):
super(D_DownBlockPix, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = Upsampler(scale,num_filter)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class PSBlock(torch.nn.Module):
def __init__(self, input_size, output_size, scale_factor, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'):
super(PSBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size * scale_factor**2, kernel_size, stride, padding, bias=bias)
self.ps = torch.nn.PixelShuffle(scale_factor)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.ps(self.conv(x)))
else:
out = self.ps(self.conv(x))
if self.activation is not None:
out = self.act(out)
return out
class Upsampler(torch.nn.Module):
def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True):
super(Upsampler, self).__init__()
modules = []
for _ in range(int(math.log(scale, 2))):
modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None))
modules.append(torch.nn.PixelShuffle(2))
if bn: modules.append(torch.nn.BatchNorm2d(n_feat))
#modules.append(torch.nn.PReLU())
self.up = torch.nn.Sequential(*modules)
self.activation = act
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
out = self.up(x)
if self.activation is not None:
out = self.act(out)
return out
class Upsample2xBlock(torch.nn.Module):
def __init__(self, input_size, output_size, bias=True, upsample='deconv', activation='relu', norm='batch'):
super(Upsample2xBlock, self).__init__()
scale_factor = 2
# 1. Deconvolution (Transposed convolution)
if upsample == 'deconv':
self.upsample = DeconvBlock(input_size, output_size,
kernel_size=4, stride=2, padding=1,
bias=bias, activation=activation, norm=norm)
# 2. Sub-pixel convolution (Pixel shuffler)
elif upsample == 'ps':
self.upsample = PSBlock(input_size, output_size, scale_factor=scale_factor,
bias=bias, activation=activation, norm=norm)
# 3. Resize and Convolution
elif upsample == 'rnc':
self.upsample = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=scale_factor, mode='nearest'),
ConvBlock(input_size, output_size,
kernel_size=3, stride=1, padding=1,
bias=bias, activation=activation, norm=norm)
)
def forward(self, x):
out = self.upsample(x)
return out
class NonLocalBlock2D(torch.nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=2, sub_sample=True):
super(NonLocalBlock2D,self).__init__()
assert dimension in [2]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
conv_2d = torch.nn.Conv2d
max_pool_layer = torch.nn.MaxPool2d(kernel_size=(2, 2))
self.g = conv_2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = conv_2d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
torch.nn.init.constant_(self.W.weight, 0)
torch.nn.init.constant_(self.W.bias, 0)
self.theta = conv_2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = | |
<reponame>chrisdjscott/Atoman
# -*- coding: utf-8 -*-
"""
Analysis is performed using an *Analysis pipeline*, found on the *Analysis toolbar* on the left of the application (see
right). Multiple pipelines can be configured at once; a pipeline is viewed in a renderer window.
An individual pipeline takes a reference and an input system as its input and contains one or more filter/calculator
lists. These lists operate independently of one another and calculate properties or filter the input system. Available
filters/calculators are shown below:
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import glob
import math
import logging
import functools
import uuid
from PySide2 import QtGui, QtCore, QtWidgets
import vtk
import numpy as np
from ..visutils.utilities import iconPath
from . import filterList
from . import picker
from .dialogs import infoDialogs
from . import utils
from ..rendering import highlight
from .dialogs import simpleDialogs
import six
from six.moves import range
################################################################################
class PipelineForm(QtWidgets.QWidget):
def __init__(self, parent, mainWindow, width, pipelineIndex, pipelineString):
super(PipelineForm, self).__init__(parent)
self.mainToolbar = parent
self.mainWindow = mainWindow
self.toolbarWidth = width
self.pipelineIndex = pipelineIndex
self.pipelineString = pipelineString
self.systemsDialog = mainWindow.systemsDialog
self.logger = logging.getLogger(__name__)
self.rendererWindows = self.mainWindow.rendererWindows
self.pickerContextMenuID = uuid.uuid4()
self.pickerContextMenu = QtWidgets.QMenu(self)
self.pickerContextMenu.aboutToHide.connect(self.hidePickerMenuHighlight)
self.filterListCount = 0
self.filterLists = []
self.onScreenInfo = {}
self.onScreenInfoActors = vtk.vtkActor2DCollection()
self.visAtomsList = []
self.refState = None
self.inputState = None
self.extension = None
self.inputStackIndex = None
self.filename = None
self.currentRunID = None
self.abspath = None
self.PBC = None
self.linkedLattice = None
self.fromSFTP = None
self.scalarBarAdded = False
# layout
filterTabLayout = QtWidgets.QVBoxLayout(self)
filterTabLayout.setContentsMargins(0, 0, 0, 0)
filterTabLayout.setSpacing(0)
filterTabLayout.setAlignment(QtCore.Qt.AlignTop)
# row
row = QtWidgets.QWidget()
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignHCenter)
rowLayout.setContentsMargins(0, 0, 0, 0)
rowLayout.setSpacing(0)
label = QtWidgets.QLabel("<b>Pipeline %d settings</b>" % pipelineIndex)
rowLayout.addWidget(label)
filterTabLayout.addWidget(row)
# row
row = QtWidgets.QWidget()
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignTop)
rowLayout.setContentsMargins(0, 0, 0, 0)
rowLayout.setSpacing(0)
# reference selector
self.refCombo = QtWidgets.QComboBox()
self.refCombo.setFixedWidth(220)
self.refCombo.setToolTip("Select the reference system for this pipeline")
self.refCombo.currentIndexChanged.connect(self.refChanged)
# add to row
rowLayout.addWidget(QtWidgets.QLabel("Reference:"))
rowLayout.addWidget(self.refCombo)
filterTabLayout.addWidget(row)
# row
row = QtWidgets.QWidget()
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignTop)
rowLayout.setContentsMargins(0, 0, 0, 0)
rowLayout.setSpacing(0)
# reference selector
self.inputCombo = QtWidgets.QComboBox()
self.inputCombo.setFixedWidth(220)
self.inputCombo.setToolTip("Select the input system for this pipeline")
self.inputCombo.currentIndexChanged.connect(self.inputChanged)
# add to row
rowLayout.addWidget(QtWidgets.QLabel("Input:"))
rowLayout.addWidget(self.inputCombo)
filterTabLayout.addWidget(row)
row = QtWidgets.QHBoxLayout()
row.setAlignment(QtCore.Qt.AlignHCenter)
row.addWidget(QtWidgets.QLabel("<b>Property/filter lists:</b>"))
filterTabLayout.addLayout(row)
# row
row = QtWidgets.QWidget()
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignTop)
rowLayout.setContentsMargins(0, 0, 0, 0)
rowLayout.setSpacing(0)
# buttons for new/trash filter list
runAll = QtWidgets.QPushButton(QtGui.QIcon(iconPath('oxygen/view-refresh.png')), 'Apply lists')
runAll.setStatusTip("Apply all property/filter lists")
runAll.setToolTip("Apply all property/filter lists")
runAll.clicked.connect(self.runAllFilterLists)
add = QtWidgets.QPushButton(QtGui.QIcon(iconPath('oxygen/tab-new-background.png')), 'New list')
add.setToolTip("New property/filter list")
add.setStatusTip("New property/filter list")
add.clicked.connect(self.addFilterList)
clear = QtWidgets.QPushButton(QtGui.QIcon(iconPath('oxygen/tab-close-other.png')), 'Clear lists')
clear.setStatusTip("Clear all property/filter lists")
clear.setToolTip("Clear all property/filter lists")
clear.clicked.connect(self.clearAllFilterLists)
rowLayout.addWidget(add)
rowLayout.addWidget(clear)
rowLayout.addWidget(runAll)
filterTabLayout.addWidget(row)
# add tab bar for filter lists
self.filterTabBar = QtWidgets.QTabWidget(self)
self.filterTabBar.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.filterTabBar.currentChanged[int].connect(self.filterTabBarChanged)
self.filterTabBar.setTabsClosable(True)
self.filterTabBar.tabCloseRequested.connect(self.tabCloseRequested)
filterTabLayout.addWidget(self.filterTabBar)
# add a filter list
self.addFilterList()
# add pbc options
group = QtWidgets.QGroupBox("Periodic boundaries")
group.setAlignment(QtCore.Qt.AlignHCenter)
groupLayout = QtWidgets.QVBoxLayout(group)
groupLayout.setSpacing(0)
groupLayout.setContentsMargins(0, 0, 0, 0)
# add PBC check boxes
self.PBCXCheckBox = QtWidgets.QCheckBox("x")
self.PBCXCheckBox.setChecked(QtCore.Qt.Checked)
self.PBCYCheckBox = QtWidgets.QCheckBox("y")
self.PBCYCheckBox.setChecked(QtCore.Qt.Checked)
self.PBCZCheckBox = QtWidgets.QCheckBox("z")
self.PBCZCheckBox.setChecked(QtCore.Qt.Checked)
self.PBCXCheckBox.stateChanged[int].connect(self.PBCXChanged)
self.PBCYCheckBox.stateChanged[int].connect(self.PBCYChanged)
self.PBCZCheckBox.stateChanged[int].connect(self.PBCZChanged)
row = QtWidgets.QWidget(self)
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignHCenter)
rowLayout.addWidget(self.PBCXCheckBox)
rowLayout.addWidget(self.PBCYCheckBox)
rowLayout.addWidget(self.PBCZCheckBox)
groupLayout.addWidget(row)
# add shift cell and replicate cell buttons
self.replicateCellButton = QtWidgets.QPushButton("Replicate cell")
self.replicateCellButton.clicked.connect(self.replicateCell)
self.replicateCellButton.setToolTip("Replicate in periodic directions")
self.shiftCellButton = QtWidgets.QPushButton("Shift cell")
self.shiftCellButton.clicked.connect(self.shiftCell)
self.shiftCellButton.setToolTip("Shift cell in periodic directions")
hbox = QtWidgets.QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addStretch(1)
hbox.addWidget(self.shiftCellButton)
hbox.addWidget(self.replicateCellButton)
hbox.addStretch(1)
groupLayout.addLayout(hbox)
# add shift atom button
row = QtWidgets.QWidget(self)
rowLayout = QtWidgets.QHBoxLayout(row)
rowLayout.setAlignment(QtCore.Qt.AlignHCenter)
self.shiftAtomButton = QtWidgets.QPushButton("Shift atoms")
self.shiftAtomButton.clicked.connect(self.shiftAtom)
self.shiftAtomButton.setToolTip("Shift an atom (or set of atoms) in periodic directions")
rowLayout.addWidget(self.shiftAtomButton)
groupLayout.addWidget(row)
filterTabLayout.addWidget(group)
# add systems to combos
for fn in self.systemsDialog.getDisplayNames():
self.refCombo.addItem(fn)
for fn in self.systemsDialog.getDisplayNames():
self.inputCombo.addItem(fn)
# refresh if ref already loaded
if self.mainWindow.refLoaded:
self.refreshAllFilters()
def shiftAtom(self):
"""
Shift atom
"""
# lattice
lattice = self.inputState
# show dialog
dlg = simpleDialogs.ShiftAtomDialog(-1, self.PBC, lattice.cellDims, lattice.NAtoms, parent=self)
status = dlg.exec_()
if status == QtWidgets.QDialog.Accepted:
# amount
shift = np.empty(3, np.float64)
shift[0] = dlg.shiftXSpin.value()
shift[1] = dlg.shiftYSpin.value()
shift[2] = dlg.shiftZSpin.value()
# atomIDstring
atomIDstring = dlg.lineEdit.text()
# parse atomIDstring
array = [val for val in atomIDstring.split(",") if val]
num = len(array)
rangeArray = np.empty((num, 2), np.int32)
for i, item in enumerate(array):
if "-" in item:
values = [val for val in item.split("-") if val]
minval = int(values[0])
if len(values) == 1:
maxval = minval
else:
maxval = int(values[1])
else:
minval = maxval = int(item)
rangeArray[i][0] = minval
rangeArray[i][1] = maxval
# loop over atoms
if (shift[0] or shift[1] or shift[2]) and (num>0):
self.logger.debug("Shifting atom: x = %f; y = %f; z = %f", shift[0], shift[1], shift[2])
# set override cursor
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
# shift atoms
for i in range(num):
for k in range(rangeArray[i][1]-rangeArray[i][0]+1):
i3 = 3 * (rangeArray[i][0]+k-1)
for j in range(3):
lattice.pos[i3 + j] += shift[j]
# wrap atoms back into periodic cell
lattice.wrapAtoms()
finally:
QtGui.QApplication.restoreOverrideCursor()
# run post ref render of Renderer (redraws cell)
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.renderer.postRefRender()
rw.textSelector.refresh()
# run post input loaded method
self.postInputLoaded()
def shiftCell(self):
"""
Shift cell
"""
# lattice
lattice = self.inputState
# show dialog
dlg = simpleDialogs.ShiftCellDialog(self.PBC, lattice.cellDims, parent=self)
status = dlg.exec_()
if status == QtWidgets.QDialog.Accepted:
# amount
shift = np.empty(3, np.float64)
shift[0] = dlg.shiftXSpin.value()
shift[1] = dlg.shiftYSpin.value()
shift[2] = dlg.shiftZSpin.value()
# loop over atoms
if shift[0] or shift[1] or shift[2]:
self.logger.debug("Shifting cell: x = %f; y = %f; z = %f", shift[0], shift[1], shift[2])
# progress update interval
progressInterval = int(lattice.NAtoms / 10)
if progressInterval < 50:
progressInterval = 50
elif progressInterval > 500:
progressInterval = 500
# set override cursor
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
# add progress dialog
self.mainWindow.updateProgress(0, lattice.NAtoms, "Shifting cell")
# loop over atoms
for i in range(lattice.NAtoms):
i3 = 3 * i
for j in range(3):
lattice.pos[i3 + j] += shift[j]
# progress
if i % progressInterval == 0:
self.mainWindow.updateProgress(i, lattice.NAtoms, "Shifting cell")
# wrap atoms back into periodic cell
lattice.wrapAtoms()
finally:
self.mainWindow.hideProgressBar()
QtWidgets.QApplication.restoreOverrideCursor()
# run post ref render of Renderer (redraws cell)
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.renderer.postRefRender()
rw.textSelector.refresh()
# run post input loaded method
self.postInputLoaded()
def replicateCell(self):
"""
Replicate cell
"""
self.logger.warning("'Replicate cell' is an experimental feature!")
dlg = simpleDialogs.ReplicateCellDialog(self.PBC, parent=self)
status = dlg.exec_()
if status == QtWidgets.QDialog.Accepted:
repDirs = np.zeros(3, np.int32)
replicate = False
numx = dlg.replicateInXSpin.value()
if numx:
repDirs[0] = numx
replicate = True
numy = dlg.replicateInYSpin.value()
if numy:
repDirs[1] = numy
replicate = True
numz = dlg.replicateInZSpin.value()
if numz:
repDirs[2] = numz
replicate = True
if replicate:
self.logger.warning("Replicating cell: this will modify the current input state everywhere")
self.logger.debug("Replicating cell: %r", repDirs)
# TODO: write in C
lattice = self.inputState
newpos = np.empty(3, np.float64)
cellDims = lattice.cellDims
# calculate final number of atoms
numfin = lattice.NAtoms
for i in range(3):
numfin += numfin * repDirs[i]
numadd = numfin - lattice.NAtoms
self.logger.debug("Replicating cell: adding %d atoms", numadd)
# progress update interval
progressInterval = int(numadd / 10)
if progressInterval < 50:
progressInterval = 50
elif progressInterval > 500:
progressInterval = 500
# set override cursor
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
# add progress dialog
self.mainWindow.updateProgress(0, numadd, "Replicating cell")
# loop over directions
count = 0
for i in range(3):
self.logger.debug("Replicating along axis %d: %d times", i, repDirs[i])
# store num atoms at beginning of this direction
NAtoms = lattice.NAtoms
# loop over number of replications in this direction
for j in range(repDirs[i]):
# loop over atoms
for k in range(NAtoms):
# attributes to copy to new atom
sym = lattice.atomSym(k)
q = lattice.charge[k]
scalarVals = {}
for name, scalarsArray in six.iteritems(lattice.scalarsDict):
scalarVals[name] = scalarsArray[k]
vectorVals = {}
for name, vectorsArray in six.iteritems(lattice.vectorsDict):
vectorVals[name] = vectorsArray[k]
# new position
pos = lattice.atomPos(k)
newpos[:] = pos[:]
newpos[i] += (j + 1) * cellDims[i]
# add atom
lattice.addAtom(sym, newpos, q, scalarVals=scalarVals, vectorVals=vectorVals)
# progress
count += 1
if count % progressInterval == 0:
self.mainWindow.updateProgress(count, numadd, "Replicating cell")
# change cell dimension
lattice.cellDims[i] += cellDims[i] * repDirs[i]
self.logger.debug("New cellDims along axis %d: %f", i, lattice.cellDims[i])
finally:
self.mainWindow.hideProgressBar()
QtWidgets.QApplication.restoreOverrideCursor()
# run post ref render of Renderer (redraws cell)
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.renderer.postRefRender()
rw.textSelector.refresh()
# run post input loaded method
self.postInputLoaded()
def PBCXChanged(self, val):
"""
PBC changed.
"""
if self.PBCXCheckBox.isChecked():
self.PBC[0] = 1
else:
self.PBC[0] = 0
def PBCYChanged(self, val):
| |
' + str(j)
if len(sources.getSources(self.Message,self.manager_sourcelist,self.manager_network_name_nxt.get(),'network')) == 0 or self.Networks_UpdateSource():
sourcedict = {'network':self.manager_network_name_nxt.get(),'networkid':sources.getSource(self.Message,self.manager_networklist,self.manager_network_name_nxt.get())['id'],'name':nname,'protocol':'LOCAL','host':None,'username':None,'password':<PASSWORD>,'path':None,'filenameformat':None}
#add metadata
self.manager_sourcelist.append(sourcedict)
self.manager_source_name_nxt.set(nname)
if len(sources.getSources(self.Message,self.manager_sourcelist,self.manager_network_name_nxt.get(),'network')) > 1:
if self.Networks_UpdateSource():
self.Networks_LoadSource()
else:
self.manager_source_name_pre.set(nname)
self.manager_source_name.set(nname)
self.Networks_LoadSource()
def Networks_DuplicateSource(self):
if self.Networks_UpdateSource():
sourcedict = deepcopy(self.manager_source)
nname = sources.listSources(self.Message,self.manager_sourcelist,self.manager_network_name_pre.get())
j = 1
for n,v in enumerate(nname):
nname[n] = parsers.validateName(v).lower()
while parsers.validateName('New camera ' + str(j)).lower() in nname:
j += 1
nname = 'New camera ' + str(j)
sourcedict.update({'name':nname})
self.manager_sourcelist.append(sourcedict)
self.manager_source_name_nxt.set(nname)
self.Networks_UpdateSource()
self.Networks_LoadSource()
def Networks_RemoveNetwork(self):
self.manager_networklist.remove(sources.getSource(self.Message,self.manager_networklist,self.manager_network_name_nxt.get()))
self.manager_sourcelist = self.Networks_RenameNetworkSources(self.manager_sourcelist,self.manager_network['name'],None)
if len(self.manager_networklist) > 0:
self.manager_network_name.set(self.manager_networklist[0]['name'])
self.manager_network_name_pre.set(self.manager_networklist[0]['name'])
self.manager_network_name_nxt.set(self.manager_networklist[0]['name'])
self.Networks_LoadNetwork()
else:
self.Networks_AddNetwork()
def Networks_RenameNetworkSources(self,sourcelist,oldname,newname): #or remove (newname=None)
removelist = []
if newname == None:
for s,source in enumerate(sourcelist):
if source['network'] == oldname:
removelist.append(source)
for source in removelist:
sourcelist.remove(source)
else:
for s,source in enumerate(sourcelist):
if source['network'] == oldname:
sourcelist[s].update({'network':newname})
return sourcelist
def Networks_RemoveSource(self):
self.manager_sourcelist.remove(sources.getSource(self.Message,sources.getSources(self.Message,self.manager_sourcelist,self.manager_source_name_nxt.get()),self.manager_network_name.get(),'network'))
try:
self.manager_source_name_nxt.set(sources.getSource(self.Message,self.manager_sourcelist,self.manager_network_name.get(),'network')['name'])
self.manager_source_name_pre.set(self.manager_source_name_nxt.get())
self.manager_source_name.set(self.manager_source_name_nxt.get())
self.Networks_LoadSource()
except:
self.Networks_AddSource()
def Networks_BrowseCNIF(self):
if self.manager_network_protocol.get() == 'LOCAL':
self.file_opt = options = {}
options['defaultextension'] = '.tsvx'
options['filetypes'] = [ ('Extended tab seperated value files', '.tsvx'),('all files', '.*')]
options['title'] = 'Select CNIF...'
ans = tkFileDialog.askopenfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
self.manager_network_file.set(ans)
else:
tkMessageBox.showwarning('Browse CNIF','CNIF can only be browsed if the protocol is LOCAL (if the file is in the local computer).')
def Networks_BrowseImages(self):
if self.manager_source_protocol.get() == 'LOCAL':
self.file_opt = options = {}
options['title'] = 'Select the directory to the images...'
ans = tkFileDialog.askdirectory(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
self.manager_source_path.set(ans)
else:
tkMessageBox.showwarning('Browse directory to the images','Directory to the images can only be browsed if the protocol is LOCAL (if the images are in the local computer).')
def Networks_ReadCNIF(self):
self.manager_network['file'] = self.manager_network_file.get()
if self.manager_network['file'] == None:# or not os.path.exists(self.manager_network['file']):
tkMessageBox.showwarning('Fail', self.manager_network['name'] + ' CNIF can not be found or not set up. Check network parameters')
return False
if self.Networks_UpdateNetwork():
f = fetchers.fetchFile(self,self.Message,TmpDir, self.manager_network['localfile'], self.manager_network['protocol'],self.manager_network['host'], self.manager_network['username'], self.manager_network['password'], self.manager_network['file'], self.proxy, self.connection)
if f is False:
tkMessageBox.showwarning('Fail','Can not fetch or download CNIF. Check network parameters. If protocol is FTP, HTTP or HTTPS, check proxy settings and internet connection.')
return False
else:
n = sources.readTSVx(os.path.join(TmpDir,f))
if not self.manager_network['name'] in sources.listNetworks(self.Message,self.manager_networklist):
new_net = deepcopy(self.manager_network)
i = 1
ids = []
for network in self.manager_networklist:
ids.append(network['id'])
while len(ids) > 0 and str(i) in ids:
i += 1
new_net.update({'id':str(i)})
self.manager_networklist.append(new_net)
self.Message.set(self.manager_network['name']+' added to the camera networks.')
for source in n:
if source['name'] in sources.listSources(self.Message,self.manager_sourcelist,self.manager_network['name']):
self.manager_sourcelist.remove(sources.getSource(self.Message,sources.getSources(self.Message,self.manager_sourcelist,self.manager_network['name'],'network'),source['name']))
self.Message.set(source['name'] + ' is removed from the camera network '+self.manager_network['name'])
source.update({'networkid':sources.getSource(self.Message,self.manager_networklist,self.manager_network['name'])['id'],'network':self.manager_network['name']})
self.manager_sourcelist.append(source)
self.Message.set(source['name']+'is added to the camera network: ' + self.manager_network['name'])
self.Networks_LoadNetwork()
tkMessageBox.showwarning('Read CNIF',str(len(n)) + ' cameras found in the CNIF and added/replaced to the camera network '+self.manager_network['name']+'.')
else:
return False
def Networks_Network_Discard(self):
if tkMessageBox.askyesno('Discard changes','Changes in all camera networks will be discarded. Are you sure?'):
self.manager_networklist = self.networklist[:]
self.manager_sourcelist = self.sourcelist[:]
self.manager_network_name.set(self.manager_networklist[0]['name'])
self.manager_network_name_nxt.set(self.manager_networklist[0]['name'])
self.manager_network_name_pre.set(self.manager_networklist[0]['name'])
self.Networks_LoadNetwork()
self.manager_network_window.grab_set()
self.manager_network_window.lift()
def Networks_Network_Save(self):
if self.Networks_UpdateNetwork():
if tkMessageBox.askyesno('Save changes','Changes will be permanent. Are you sure?'):
dictlist = deepcopy(self.manager_networklist[:])
for d,dict in enumerate(dictlist):
if isinstance(dictlist[d]['password'],str) and dictlist[d]['password'] == '*'+validateName(dictlist[d]['protocol']+dictlist[d]['host']).lower()+'*password*':
dictlist[d]['password'] = '*'
if isinstance(dictlist[d]['username'],str) and dictlist[d]['username'] == '*'+validateName(dictlist[d]['protocol']+dictlist[d]['host']).lower()+'*username*':
dictlist[d]['username'] = '*'
if dictlist[d]['file'] == None:# or (dictlist[d]['protocol'] == 'LOCAL' and not os.path.exists(dictlist[d]['file'])):
tkMessageBox.showwarning('Fail', dictlist[d]['name'] + ' CNIF can not be found or not set up. Check network parameters')
return False
self.networklist = deepcopy(self.manager_networklist[:])
sourcelist_pre = deepcopy(self.sourcelist)
self.sourcelist = deepcopy(self.manager_sourcelist[:])
if self.NetworkNameVariable.get() not in sources.listNetworks(self.Message,self.networklist):
self.NetworkNameVariable.set(self.networklist[0]['name'])
sources.writeTSVx(NetworklistFile,dictlist)
self.manager_network_window.destroy()
self.lift()
self.makeDirStorage()
self.migrateStorage(self.imagespath.get(),sourcelist_pre,self.imagespath.get(),self.sourcelist)
if self.ActiveMenu.get() == "Camera":
self.Menu_Main_Camera()
def Networks_Source_Discard(self):
if tkMessageBox.askyesno('Discard changes','Changes in all cameras will be discarded. Are you sure?'):
self.manager_sourcelist = self.sourcelist[:]
self.manager_source_name.set(sources.getSource(self.Message,self.manager_sourcelist,self.manager_network_name.get(),'network')['name'])
self.manager_source_name_nxt.set(sources.getSource(self.Message,self.manager_sourcelist,self.manager_network_name.get(),'network')['name'])
self.manager_source_name_pre.set(sources.getSource(self.Message,self.manager_sourcelist,self.manager_network_name.get(),'network')['name'])
self.Networks_LoadSource()
self.manager_source_window.grab_set()
self.manager_source_window.lift()
def Networks_Source_Save(self):
if self.Networks_UpdateSource():
dictlist = deepcopy(sources.getSources(self.Message,self.manager_sourcelist,self.manager_network_name.get(),'network'))
for d,dict in enumerate(dictlist):
del dictlist[d]['networkid']
if isinstance(dictlist[d]['password'],str) and dictlist[d]['password'] == '*'+validateName(dictlist[d]['protocol']+dictlist[d]['host']).lower()+'*password*':
dictlist[d]['password'] = '*'
if isinstance(dictlist[d]['username'],str) and dictlist[d]['username'] == '*'+validateName(dictlist[d]['protocol']+dictlist[d]['host']).lower()+'*username*':
dictlist[d]['username'] = '*'
if self.manager_network_protocol.get() == 'LOCAL':
tkMessageBox.showinfo('Save changes','Program now will export the CNIF. Select the location you want to keep it. CNIF should not be removed before removing the camera network from the camera manager.')
else:
tkMessageBox.showinfo('Save changes','Program now will export the CNIF. Upload it to the host \''+self.manager_network_host.get()+'\' under directory \'' +os.path.split(self.manager_network_file.get())[0]+ ' \'with the name \''+os.path.split(self.manager_network_file.get())[1]+'\'. Notice that for HTTP connections, it might take some time until the updated file is readable.')
self.file_opt = options = {}
options['defaultextension'] = '.tsvx'
options['filetypes'] = [ ('Extended tab seperated value files', '.tsvx'),('all files', '.*')]
options['title'] = 'Set filename to export CNIF to...'
ans = tkFileDialog.asksaveasfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
sources.writeTSVx(ans,dictlist)
if self.manager_network_protocol.get() == 'LOCAL':
self.manager_network_file.set(ans)
self.manager_source_window.destroy()
self.manager_network_window.grab_set()
self.manager_network_window.lift()
def Networks_AddOnlineCNIF(self):
self.manager_network_addonline_window = Tkinter.Toplevel(self,padx=10,pady=10)
self.manager_network_addonline_window.grab_set()
self.manager_network_addonline_window.wm_title('Add camera network')
self.manager_network_addonline_window.columnconfigure(1,minsize=self.MenuX)
self.manager_network_nname = Tkinter.StringVar()
self.manager_network_link = Tkinter.StringVar()
self.manager_network_user = Tkinter.StringVar()
self.manager_network_pass = Tkinter.StringVar()
r = 0
Tkinter.Label(self.manager_network_addonline_window,anchor='w',text='Camera network name:').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
r += 1
Tkinter.Button(self.manager_network_addonline_window,text='?',width=1,command=lambda: tkMessageBox.showinfo('Network Manager','A name for the camera network to be added. The name should be different than the ones already exist.')).grid(sticky='w'+'e',row=r,column=2)
Tkinter.Entry(self.manager_network_addonline_window,textvariable=self.manager_network_nname).grid(sticky='w'+'e',row=r,column=1)
r += 1
Tkinter.Label(self.manager_network_addonline_window,anchor='w',text='Link to CNIF:').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
r += 1
Tkinter.Button(self.manager_network_addonline_window,text='?',width=1,command=lambda: tkMessageBox.showinfo('Network Manager','Hyperlink to the CNIF, e.g. http://johnsnetwork.com/network/cnif.tsvx , ftp://myftpserver.com/mycams/cnif.tsvx , http://192.168.23.5/cnif.tsvx)')).grid(sticky='w'+'e',row=r,column=2)
Tkinter.Entry(self.manager_network_addonline_window,textvariable=self.manager_network_link).grid(sticky='w'+'e',row=r,column=1)
r += 1
Tkinter.Label(self.manager_network_addonline_window,anchor='w',text='Username for host:').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
r += 1
Tkinter.Button(self.manager_network_addonline_window,text='?',width=1,command=lambda: tkMessageBox.showinfo('Network Manager','Username for the host that bears CNIF, if applicable.')).grid(sticky='w'+'e',row=r,column=2)
Tkinter.Entry(self.manager_network_addonline_window,textvariable=self.manager_network_user).grid(sticky='w'+'e',row=r,column=1)
r += 1
Tkinter.Label(self.manager_network_addonline_window,anchor='w',text='Password for host:').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
r += 1
Tkinter.Button(self.manager_network_addonline_window,text='?',width=1,command=lambda: tkMessageBox.showinfo('Network Manager','Password for the username for the host that bears CNIF, if applicable.\nIf \'*\' is used, the program will ask for the password each time it is trying to connect to the host. For security, prefer \'*\', because that information is saved in a file in your local disk.')).grid(sticky='w'+'e',row=r,column=2)
Tkinter.Entry(self.manager_network_addonline_window,textvariable=self.manager_network_pass).grid(sticky='w'+'e',row=r,column=1)
r += 1
Tkinter.Button(self.manager_network_addonline_window,text='Fetch CNIF and add the network...',width=50,command=self.Networks_AddOnlineCNIF_ReadCNIF).grid(sticky='w'+'e',row=r,column=1,columnspan=2)
self.centerWindow(self.manager_network_addonline_window)
def Networks_AddOnlineCNIF_ReadCNIF(self):
if 'http://' not in self.manager_network_link.get() and 'https://' not in self.manager_network_link.get() and 'ftp://' not in self.manager_network_link.get():
tkMessageBox.showwarning('Incorrect link','Link is incorrect. Click ? for help.')
return False
elif len(self.manager_network_link.get().split('/'))<3 or '.' not in self.manager_network_link.get().split('/')[2]:
tkMessageBox.showwarning('Incorrect link','Link is incorrect. Click ? for help.')
return False
self.Networks_NetworkManager()
self.manager_network_window.geometry("0x0")
self.manager_network_addonline_window.lift()
self.Networks_AddNetwork()
if 'http://' in self.manager_network_link.get() or 'https://' in self.manager_network_link.get():
self.manager_network_host.set(self.manager_network_link.get().split('/')[2])
self.manager_network_file.set(self.manager_network_link.get().replace(self.manager_network_link.get().split('/')[0]+'//'+self.manager_network_link.get().split('/')[2]+'/',''))
self.manager_network_protocol.set('HTTP')
if 'ftp://' in self.manager_network_link.get():
self.manager_network_host.set(self.manager_network_link.get().split('/')[2])
self.manager_network_file.set(self.manager_network_link.get().replace(self.manager_network_link.get().split('/')[0]+'//'+self.manager_network_link.get().split('/')[2],''))
self.manager_network_protocol.set('FTP')
self.manager_network_name.set(self.manager_network_nname.get())
self.manager_network_username.set(self.manager_network_user.get())
self.manager_network_password.set(self.manager_network_pass.get())
if self.Networks_CheckNetwork():
if self.Networks_ReadCNIF() is not False:
self.Networks_Network_Save()
self.manager_network_window.destroy()
self.manager_network_addonline_window.destroy()
else:
self.Networks_RemoveNetwork()
self.manager_network_window.destroy()
self.manager_network_addonline_window.lift()
self.manager_network_addonline_window.grab_set()
def Networks_Wizard(self):
tkMessageBox.showinfo('Single directory wizard','This wizard helps you to add a directory of camera images in your computer to FMIPROT or remove one you have added before. ')
self.wizard = Tkinter.Toplevel(self,padx=10,pady=10)
var = Tkinter.IntVar()
self.wizard.grab_set()
self.wizard.wm_title('Single directory wizard')
Tkinter.Button(self.wizard ,text='I want to add a directory',command=lambda : var.set(1)).grid(sticky='w'+'e',row=1,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='I want to remove a directory',command=lambda : var.set(2)).grid(sticky='w'+'e',row=2,column=1,columnspan=1)
var.trace_variable('w',self.Networks_Wizard_destroy)
self.centerWindow(self.wizard)
self.wizard.wait_window()
if var.get() == 1: #add directory
protocol = 'LOCAL'
tkMessageBox.showinfo('Single directory wizard','Please find the directory that you have the images inside with the next dialog.')
file_opt = options = {}
options['title'] = '(Choose) Custom directory'
datetimelist = []
while datetimelist == []:
filepath = str(os.path.normpath(tkFileDialog.askdirectory(**file_opt)))
if filepath == '.':
tkMessageBox.showinfo('Single directory wizard','You have cancelled the wizard.')
return False
else:
self.wizard = Tkinter.Toplevel(self,padx=10,pady=10)
var = Tkinter.StringVar()
self.wizard.grab_set()
self.wizard.wm_title('Single directory wizard')
Tkinter.Label(self.wizard ,anchor='w',wraplength=500,text='Enter the file name convention of the image files. File name convention is how the files are named according to the time of the image.\nFor example, if the file name convention is \'researchsite_1_north_%Y_%m_%d_%H:%M:%S.jpg\' and an image is named as \'researchsite_1_north_2016_09_24_18:27:05.jpg\', then the time that the image taken is 24 September 2016 18:27:05. Do not forget to include the extension (e.g. \'.jpg\', \'.png\').\nFor the meanings of time directives (e.g. %Y, %m) refer the user manual or https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .').grid(sticky='w',row=1,column=1,columnspan=1)
Tkinter.Entry(self.wizard ,textvariable=var).grid(sticky='w'+'e',row=2,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='OK',command=self.Networks_Wizard_destroy).grid(sticky='w'+'e',row=4,column=1,columnspan=1)
self.centerWindow(self.wizard)
self.wizard.wait_window()
filenameformat = str(var.get())
imglist = os.listdir(filepath)
for i, img in enumerate(imglist):
try:
datetimelist.append(parsers.strptime2(str(img),filenameformat)[0])
except:
pass
if len(datetimelist) == 0:
tkMessageBox.showinfo('Single directory wizard','Wizard can not find any images that fits the file name conventions in the directory. Select the directoy and input the filename convention again to continue.')
continue
else:
tkMessageBox.showinfo('Single directory wizard',str(len(datetimelist)) + ' images are found in the directory, from '+ str(min(datetimelist))+' to '+ str(max(datetimelist))+'.')
var3 = Tkinter.BooleanVar()
var3.set(False)
while not var3.get():
self.wizard = Tkinter.Toplevel(self,padx=10,pady=10)
var1 = Tkinter.StringVar()
var2 = Tkinter.StringVar()
self.wizard.grab_set()
self.wizard.wm_title('Single directory wizard')
Tkinter.Label(self.wizard ,anchor='w',wraplength=500,text='Enter a camera network name and camera name for the images.').grid(sticky='w',row=1,column=1,columnspan=1)
Tkinter.Label(self.wizard ,wraplength=500,text='Network name').grid(sticky='w',row=2,column=1,columnspan=1)
Tkinter.Entry(self.wizard ,textvariable=var1).grid(sticky='w'+'e',row=3,column=1,columnspan=1)
Tkinter.Label(self.wizard ,anchor='w',wraplength=500,text='Camera name').grid(sticky='w',row=4,column=1,columnspan=1)
Tkinter.Entry(self.wizard ,textvariable=var2).grid(sticky='w'+'e',row=5,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='OK',command=self.Networks_Wizard_destroy).grid(sticky='w'+'e',row=6,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='Cancel',command=lambda : var3.set(True)).grid(sticky='w'+'e',row=7,column=1,columnspan=1)
self.centerWindow(self.wizard)
self.wizard.wait_window()
nets = sources.listNetworks(self.Message,self.networklist)
nets_ = nets[:]
for n,net in enumerate(nets):
nets[n] = parsers.validateName(net).lower()
if parsers.validateName(var1.get()).lower() in nets:
if sources.getSource(self.Message,self.networklist,nets_[nets.index(parsers.validateName(var1.get()).lower())])['protocol'] == 'LOCAL':
sours = sources.listSources(self.Message,self.sourcelist,nets_[nets.index(parsers.validateName(var1.get()).lower())])
sours_ = sours[:]
for s, sour in enumerate(sours):
sours[s] = parsers.validateName(sour).lower()
if parsers.validateName(var2.get()).lower() in sours:
tkMessageBox.showwarning('Single directory wizard','The network name you have entered is already existing or too similar with '+nets_[nets.index(parsers.validateName(var1.get()).lower())]+' and the camera name you have entered is already existing or too similar with '+sours_[sours.index(parsers.validateName(var2.get()).lower())]+'. Please change either the network name or the camera name.')
else:
if tkMessageBox.askyesno('Single directory wizard','The network name you have entered is already existing or too similar with '+nets_[nets.index(parsers.validateName(var1.get()).lower())]+'. Do you want to add the images as a camera to that network?'):
#add source to network
sourcedict = {'network':nets_[nets.index(parsers.validateName(var1.get()).lower())],'networkid':sources.getSource(self.Message,self.networklist,nets_[nets.index(parsers.validateName(var1.get()).lower())])['id'],'name':var2.get(),'protocol':'LOCAL','host':None,'username':None,'password':<PASSWORD>,'path':filepath,'filenameformat':filenameformat}
self.sourcelist.append(sourcedict)
parsers.writeTSVx(sources.getSource(self.Message,self.networklist,nets_[nets.index(parsers.validateName(var1.get()).lower())])['file'],sources.getSources(self.Message,self.sourcelist,sourcedict['network'],'network'))
tkMessageBox.showinfo('Single directory wizard','The camera is added to the network.')
break
else:
tkMessageBox.showwarning('Single directory wizard','Please enter a different camera network name.')
else:
'Single directory wizard','The network name you have entered is already existing or too similar with '+nets_[nets.index(parsers.validateName(var1.get()).lower())]+'. But the CNIF of this network is not stored in this computer. Thus FMIPROT can not add the directory to that network. Please enter a different camera network name.'
else:
#add network and source
i = 1
ids = []
for network in self.networklist:
ids.append(network['id'])
while len(ids) > 0 and str(i) in ids:
i += 1
networkdict = {'id':str(i),'name':var1.get(),'protocol':'LOCAL','host':None,'username':None,'password':<PASSWORD>,'file':os.path.join(SourceDir,parsers.validateName(var1.get()).lower()+'.tsvx'),'localfile':parsers.validateName(var1.get()).lower()+'.tsvx'}
self.networklist.append(networkdict)
sourcedict = {'network':var1.get(),'networkid':sources.getSource(self.Message,self.networklist,var1.get())['id'],'name':var2.get(),'protocol':'LOCAL','host':None,'username':None,'password':<PASSWORD>,'path':filepath,'filenameformat':filenameformat}
self.sourcelist.append(sourcedict)
parsers.writeTSVx(NetworklistFile,self.networklist)
parsers.writeTSVx(networkdict['file'],[sourcedict])
tkMessageBox.showinfo('Single directory wizard','The camera network is created and the camera is added to the network.')
break
if var.get() == 2: #remove directory
removelist = []
removelist_names = []
for network in self.networklist:
if network['protocol'] == 'LOCAL':
for source in sources.getSources(self.Message,self.sourcelist,network['name'],'network'):
if source['protocol'] == 'LOCAL':
removelist.append([network['name'],source['name']])
removelist_names.append(network['name']+' - '+ source['name'])
if len(removelist) == 0:
tkMessageBox.showinfo('Single directory wizard','There is no single-directory-type camera or camera network to be removed.')
else:
self.wizard = Tkinter.Toplevel(self,padx=10,pady=10)
var = Tkinter.StringVar()
var.set(removelist_names[0])
self.wizard.grab_set()
self.wizard.wm_title('Single directory wizard')
Tkinter.Label(self.wizard ,anchor='w',wraplength=500,text='Choose a camera to remove:').grid(sticky='w'+'e',row=1,column=1,columnspan=1)
Tkinter.OptionMenu(self.wizard ,var,*removelist_names).grid(sticky='w'+'e',row=2,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='Remove',command=self.Networks_Wizard_destroy).grid(sticky='w'+'e',row=3,column=1,columnspan=1)
Tkinter.Button(self.wizard ,text='Cancel',command=lambda : var.set('')).grid(sticky='w'+'e',row=4,column=1,columnspan=1)
self.centerWindow(self.wizard)
self.wizard.wait_window()
if var.get() != '':
rem = removelist[removelist_names.index(var.get())]
if len(sources.getSources(self.Message,self.sourcelist,rem[0],'network')) == 1:
self.networklist.remove(sources.getSource(self.Message,self.networklist,rem[0]))
self.sourcelist.remove(sources.getSource(self.Message,sources.getSources(self.Message,self.sourcelist,rem[0],'network'),rem[1]))
else:
self.sourcelist.remove(sources.getSource(self.Message,sources.getSources(self.Message,self.sourcelist,rem[0],'network'),rem[1]))
parsers.writeTSVx(sources.getSource(self.Message,self.networklist,rem[0])['file'],sources.getSources(self.Message,self.sourcelist,rem[0],'network'))
parsers.writeTSVx(NetworklistFile,self.networklist)
tkMessageBox.showinfo('Single directory wizard',var.get() + ' removed.')
def Networks_Wizard_destroy(self,*args):
self.wizard.destroy()
self.lift()
def Networks_Import(self):
return False
def Networks_Export(self):
return False
def Networks_ProxyManager(self):
self.manager_proxy_window = Tkinter.Toplevel(self,padx=10,pady=10)
self.manager_proxy_window.grab_set()
self.manager_proxy_window.wm_title('Camera Network Proxy Manager')
self.manager_proxy_window.columnconfigure(1, minsize=20)
self.manager_proxy_window.columnconfigure(2, minsize=40)
self.manager_proxy_window.columnconfigure(3, minsize=20)
self.manager_proxy_window.columnconfigure(4, minsize=20)
self.manager_proxy_window.columnconfigure(5, minsize=100)
self.manager_proxy_window.columnconfigure(6, minsize=100)
self.manager_proxy_window.columnconfigure(7, | |
di = {
'ะฐะฑัะธะบะพั': 30,
'ะฐะฑัะธะบะพัั (ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต ะฒ ัะธัะพะฟะต)': 60,
'ะฐะฒะพะบะฐะดะพ': 10,
'ะฐะนะฒะฐ': 35,
'ะฐะผะฐัะฐะฝั (ัะตะผะตะฝะฐ)': 35,
'ะฐะผะฐัะฐะฝั ะฒะพะทะดััะฝัะน (ะฐะฝะฐะปะพะณ ะฟะพะฟะบะพัะฝะฐ)': 70,
'ะฐะผะธะปะพะทะฐ': 48,
'ะฐะฝะฐะฝะฐั': 60,
'ะฐะฝะฐะฝะฐั (ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต ะฒ ัะธัะพะฟะต)': 65,
'ะฐะฝะฐะฝะฐัะพะฒัะน ัะพะบ ะฑะตะท ัะฐั
ะฐัะฐ': 50,
'ะฐะฟะตะปััะธะฝ': 35,
'ะฐะฟะตะปััะธะฝะพะฒัะน ัะพะบ': 45,
'ะฐัะฐั
ะธั': 15,
'ะฐัะฐั
ะธัะพะฒะฐั ะฟะฐััะฐ (ะฑะตะท ัะฐั
ะฐัะฐ)': 20,
'ะฐัะฐั
ะธัะพะฒะพะต ะผะฐัะปะพ (ะฟะฐััะฐ ะฑะตะท ัะฐั
ะฐัะฐ)': 40,
'ะฐัะฑัะท': 75,
'ะฐััะธัะพะบ': 20,
'ะฐัะตัะพะปะฐ (ะฑะฐัะฑะฐะดะพััะบะฐั ะฒะธัะฝั)': 20,
'ะฑะฐะบะปะฐะถะฐะฝ': 20,
'ะฑะฐะผะฑัะบะฐ ัะพััะบะธ': 20,
'ะฑะฐะฝะฐะฝ': 60,
'ะฑะฐะฝะฐะฝ ะดะตัะตััะฝัะน (ะทะตะปะตะฝัะน)': 45,
'ะฑะฐะฝะฐะฝ ะดะตัะตััะฝัะน (ัะฟะตะปัะน)': 60,
'ะฑะฐะฝะฐะฝ ะฟะปะฐัะฐะฝะฐ (ัััะพะน)': 45,
'ะฑะฐะฝะฐะฝ ะฟะปะฐัะฐะฝะฐ (ัะพะปัะบะพ ะฒ ะฟัะธะณะพัะพะฒะปะตะฝะฝะพะผ ะฒะธะดะต)': 70,
'ะฑะฐัะฐั (ัะปะฐะดะบะธะน ะบะฐััะพัะตะปั)': 65,
'ะฑะตะปะพะบ ะพะดะฝะพะณะพ ัะนัะฐ': 48,
'ะฑะตะปัะน ัะฐั
ะฐั': 70,
'ะฑะตะปัะน ั
ะปะตะฑ': 100,
'ะฑะธัะบะฒะธั': 70,
'ะฑะธัะบะพััะธ (ััั
ะพะต ะฟะตัะตะฝัะต)': 70,
'ะฑะปะธะฝัะธะบะธ': 95,
'ะฑะปะธะฝั ะธะท ะณัะตัะฝะตะฒะพะน ะผัะบะธ': 50,
'ะฑะพะฑั ัะพะตะฒัะต': 15,
'ะฑะพะฑั ัะพะตะฒัะต, ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต': 22,
'ะฑะพะฑั, ัะฐะฒะฐ (ัััะฐั)': 40,
'ะฑัะธะพัั (ะฑัะปะพัะบะฐ)': 70,
'ะฑัะพะบะบะพะปะธ': 15,
'ะฑััะบะฒะฐ': 99,
'ะฑัะฑะปะธะบะธ, ะฑะฐัะฐะฝะบะธ': 70,
'ะฑัะปะณัั': 55,
'ะฑัะปะณัั ัะตะปัะฝะพะทะตัะฝะพะฒะพะน (ะฟัะธะณะพัะพะฒะปะตะฝะฝัะน)': 45,
'ะฑัะปะพัะบะธ ะดะปั ะณะฐะผะฑััะณะตัะพะฒ': 85,
'ะฑััััะพัะฐััะฒะพัะธะผะฐั ะพะฒััะฝะฐั ะบะฐัะฐ': 66,
'ะฒะฐัะตะฝัะต': 65,
'ะฒะฐัะตะฝัั ะธ ะดะถะตะผั': 65,
'ะฒะฐัะปะธ ัะปะฐะดะบะธะต': 75,
'ะฒะตัะผะธัะตะปั ะธะท ัะฒะตัะดัั
ัะพััะพะฒ ะฟัะตะฝะธัั': 35,
'ะฒะตัะผะธัะตะปั ัะพะตะฒะฐั': 30,
'ะฒะธะฝะพะณัะฐะด': 45,
'ะฒะธะฝะพะณัะฐะดะฝัะน ัะพะบ': 55,
'ะฒะธัะฝั': 25,
'ะณะฐะผะฑััะณะตั': 103,
'ะณะปัะบะพะทะฐ': 100,
'ะณะปัะบะพะทะฐ (ะดะตะบัััะพะทะฐ)': 100,
'ะณะพะปัะฑะธะบะฐ': 25,
'ะณะพัะพั
ััั
ะพะน': 25,
'ะณะพัะพัะตะบ ะทะตะปะตะฝัะน': 35,
'ะณะพัะพัะตะบ ะทะตะปะตะฝัะน (ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะน)': 45,
'ะณะพัะพัะตะบ ัััััะบะพะฒะพะน': 15,
'ะณะพััะธัะฐ': 55,
'ะณะพััะธัะฐ, ะณะพััะธัะฐ ะดะธะถะพะฝัะบะฐั': 35,
'ะณัะฐะฝะฐั': 35,
'ะณัะตะนะฟัััั': 30,
'ะณัะตะนะฟััััะพะฒัะน ัะพะบ (ะฑะตะท ัะฐั
ะฐัะฐ)': 45,
'ะณัะตัะฐ': 40,
'ะณัะตัะบะฐ (ะบะพัะธัะฝะตะฒะฐั, ั ะพะฑะถะฐัะบะพะน)': 60,
'ะณัะธะฑั': 15,
'ะณัััะฐ': 30,
'ะณัััะฐ ะดัะฝะฝะฐั (ะฟะตะฟะธะฝะพ)': 40,
'ะดะถะตะผ': 65,
'ะดะถะตะผ (ะฑะตะท ัะฐั
ะฐัะฐ)': 30,
'ะดะถะตะผ (ะฑะตะท ัะฐั
ะฐัะฐ, ะฟะพะดัะปะฐัะตะฝะฝัะน ะฒะธะฝะพะณัะฐะดะฝัะผ ัะพะบะพะผ)': 45,
'ะดะถะตะผ (ะฑะตะท ัะฐั
ะฐัะฐ, ะฟะพะดัะปะฐัะตะฝะฝัะน ัััะบัะพะฒัะผ ัะพะบะพะผ)': 30,
'ะดะถะตะผ ะฑะตะท ัะฐั
ะฐัะฐ (montignacยฎ)': 20,
'ะดะถะตะผ ััะฐะฝะดะฐััะฝัะน ั ัะฐั
ะฐัะพะผ': 55,
'ะดะธะถะพะฝัะบะฐั ะณะพััะธัะฐ': 35,
'ะดัะพะถะถะธ ะฟะตะบะฐัะฝัะต': 35,
'ะดัะพะถะถะธ ะฟะธะฒะฝัะต': 35,
'ะดัะฝั': 60,
'ะตะถะตะฒะธะบะฐ': 30,
'ะถะตะปะต ะธะท ะฐะนะฒั (ะฑะตะท ัะฐั
ะฐัะฐ)': 40,
'ะถะตะปะต ะธะท ะฐะนะฒั (ั ัะฐั
ะฐัะพะผ)': 65,
'ะถะตะปัะฐั ัะตัะตะฒะธัะฐ': 30,
'ะถะตะปัะพะบ ะพะดะฝะพะณะพ ัะนัะฐ': 50,
'ะทะตะปะตะฝะฐั ะณัะตัะบะฐ (ะฑะตะท ะฟัะตะดะฒะฐัะธัะตะปัะฝะพะน ะพะฑะถะฐัะบะธ)': 50,
'ะทะตะปะตะฝะฐั ะธ ะบัะฐัะฝะฐั ัะตัะตะฒะธัะฐ': 25,
'ะทะตะปะตะฝะฐั ัะฐัะพะปั': 30,
'ะทะตะปะตะฝะฐั ัะตัะตะฒะธัะฐ': 25,
'ะทะตะปะตะฝัะน ะณะพัะพัะตะบ': 35,
'ะทะตัะฝะพะฒัะต ะทะฐัะพะดััะธ': 15,
'ะทะปะฐะบะธ ัะตะปัะฝะพะทะตัะฝะพะฒัะต (ะฑะตะท ัะฐั
ะฐัะฐ)': 45,
'ะทะพะปะพัะธััะฐั ัะฐัะพะปั': 25,
'ะธะทัะผ': 65,
'ะธะผะฑะธัั': 15,
'ะธะฝะถะธั': 35,
'ะธะฝะถะธั ัััะตะฝัะน': 40,
'ะนะพะณััั ะฝะฐัััะฐะปัะฝัะน': 35,
'ะนะพะณััั ัะปะฐะดะบะธะน': 52,
'ะนะพะณััั ัะพะตะฒัะน (ะฐัะพะผะฐัะธะทะธัะพะฒะฐะฝะฝัะน)': 35,
'ะนะพะณััั ัะพะตะฒัะน (ะฝะฐัััะฐะปัะฝัะน)': 20,
'ะบะฐะฑะฐัะพะบ': 15,
'ะบะฐะบะฐะพ-ะฟะพัะพัะพะบ (ะฑะตะท ัะฐั
ะฐัะฐ)': 20,
'ะบะฐะบะฐะพ-ะฟะพัะพัะพะบ (ั ะดะพะฑะฐะฒะปะตะฝะธะตะผ ัะฐั
ะฐัะฐ)': 60,
'ะบะฐะผัั ัะตะปัะฝะพะทะตัะฝะพะฒะพะน': 40,
'ะบะฐะฟะตะปะปะธะฝะธ (ะฒะธะด ะฟะฐััั, ัะพะฝััะต ัะฟะฐะณะตััะธ)': 45,
'ะบะฐะฟัััะฐ ะฑะตะปะพะบะพัะฐะฝะฝะฐั': 15,
'ะบะฐะฟัััะฐ ะฑััััะตะปััะบะฐั': 15,
'ะบะฐะฟัััะฐ ะบะฒะฐัะตะฝะฐั': 15,
'ะบะฐะฟัััะฐ ัะฒะตัะฝะฐั': 15,
'ะบะฐััะพัะตะปั ะฒะฐัะตะฝัะน': 85,
'ะบะฐััะพัะตะปั ะฒะฐัะตะฝัะน ะฒ ะผัะฝะดะธัะต': 65,
'ะบะฐััะพัะตะปั ะถะฐัะตะฝัะน': 95,
'ะบะฐััะพัะตะปั ะพัะฒะฐัะฝะพะน ะฑะตะท ะบะพะถะธัั': 70,
'ะบะฐััะพัะตะปั ะฟะตัะตะฝัะน': 95,
'ะบะฐััะพัะตะปัะฝะฐั ะทะฐะฟะตะบะฐะฝะบะฐ': 95,
'ะบะฐััะพัะตะปัะฝะพะต ะฟััะต': 83,
'ะบะฐััะพัะตะปัะฝะพะต ะฟััะต - ะฟะพัะพัะพะบ': 90,
'ะบะฐััะพัะตะปัะฝัะต ั
ะปะพะฟัั (ะฑััััะพะณะพ ะฟัะธะณะพัะพะฒะปะตะฝะธั)': 90,
'ะบะฐััะพัะตะปัะฝัะต ัะธะฟัั': 70,
'ะบะฐัััะปะต (ัั. ะฑะปัะดะพ ะธะท ัะฐัะพะปะธ ะธ ะผััะฐ)': 35,
'ะบะฐัะฐ ะณัะตัะฝะตะฒะฐั': 40,
'ะบะฐัะฐ ะธะท ะบัะบัััะทะฝะพะน ะผัะบะธ (ะผะฐะผะฐะปัะณะฐ)': 70,
'ะบะฐัะฐ ะธะท ะพะฒััะฝัั
ั
ะปะพะฟัะตะฒ': 60,
'ะบะฐัะฐ ะพะฒััะฝะฐั (ะดัะพะฑะปัะฝะฐั), ะฑะปัะดะฐ ะธะท ะพะฒััะฝะบะธ': 60,
'ะบะฐัะฐ ะฟัะตะฝะฝะฐั': 70,
'ะบะฐัะธ ะฑััััะพะณะพ ะฟัะธะณะพัะพะฒะปะตะฝะธั': 85,
'ะบะฐััะฐะฝ': 60,
'ะบะฒะธะฝะพะฐ': 35,
'ะบะตัััะฟ': 55,
'ะบะตัะธั ะฝะตะถะธัะฝัะน': 25,
'ะบะตััั': 15,
'ะบะธะฒะธ': 50,
'ะบะธะฝะพะฐ': 50,
'ะบะปัะฑะฝะธะบะฐ': 25,
'ะบะปัะฑะฝะธะบะฐ, ะทะตะผะปัะฝะธะบะฐ': 25,
'ะบะปัะบะฒะฐ': 47,
'ะบะปัะบะฒะตะฝะฝัะน ัะพะบ (ะฑะตะท ัะฐั
ะฐัะฐ)': 50,
'ะบะพะบะพั': 45,
'ะบะพะผะฟะพั (ะฑะตะท ัะฐั
ะฐัะฐ)': 34,
'ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต ะฐะฑัะธะบะพัั': 91,
'ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต ะพะฒะพัะธ': 65,
'ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต ะฟะตััะธะบะธ': 55,
'ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะน ะฐะฝะฐะฝะฐั': 65,
'ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะน ะทะตะปะตะฝัะน ะณะพัะพัะตะบ': 45,
'ะบะพัะธัะฝะตะฒะฐั ัะตัะตะฒะธัะฐ': 30,
'ะบะพัะธัะฝะตะฒัะน ัะฐั
ะฐั': 70,
'ะบะพัะฝะธัะพะฝ': 15,
'ะบัะฐัะฝะฐั ัะผะพัะพะดะธะฝะฐ': 25,
'ะบัะฐั
ะผะฐะป ะธะท ะฟะพะดะทะตะผะฝัั
ะฟะพะฑะตะณะพะฒ': 85,
'ะบัะฐั
ะผะฐะป ะบะฐััะพัะตะปัะฝัะน': 95,
'ะบัะฐั
ะผะฐะป ะบัะบัััะทะฝัะน': 85,
'ะบัะฐั
ะผะฐะป ะผะพะดะธัะธัะธัะพะฒะฐะฝะฝัะน': 100,
'ะบัะตะฒะตัะบะธ': 0,
'ะบัะตะบะตั': 80,
'ะบััะฐััะฐะฝ': 70,
'ะบััะฟะฐ ะณัะตัะฝะตะฒะฐั': 40,
'ะบััะฟะฐ ััะฝะตะฒะฐั': 25,
'ะบััะถะพะฒะฝะธะบ': 25,
'ะบัะบัััะทะฐ ะธะฝะดะธะนัะบะฐั': 35,
'ะบัะบัััะทะฐ ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝะฐั': 65,
'ะบัะบัััะทะฐ ัะฒะตะถะฐั': 35,
'ะบัะบัััะทะฝัะต ั
ะปะพะฟัั': 85,
'ะบัะฝะถัั': 35,
'ะบัะฝะถััะฝะฐั ะฟะฐััะฐ, ัะฐั
ะธะฝะธ': 40,
'ะบัะฝะถััะฝะพะต ัะตะผั': 35,
'ะบััะฐะณะฐ': 40,
'ะบััะบัั': 65,
'ะบััะบัั ัะตะปัะฝะพะทะตัะฝะพะฒะพะน': 50,
'ะปะฐะทะฐะฝัั (ะธะท ะฟัะตะฝะธัั ะผัะณะบะธั
ัะพััะพะฒ)': 75,
'ะปะฐะทะฐะฝัั (ะธะท ะฟัะตะฝะธัั ัะฒะตัะดัั
ัะพััะพะฒ)': 60,
'ะปะฐะบัะพะทะฐ': 40,
'ะปะฐะฟัะฐ ะธะท ะผัะณะบะธั
ัะพััะพะฒ ะฟัะตะฝะธัั': 70,
'ะปะฐะฟัะฐ ัะธัะพะฒะฐั': 65,
'ะปะตะฟะตัะบะธ ะฟัะตัะฝัะต': 69,
'ะปะธะผะพะฝ': 20,
'ะปะธััะพะฒะพะน ัะฐะปะฐั': 9,
'ะปะธัะธ': 50,
'ะปัะบ-ะฟะพัะตะน': 15,
'ะปัะบ-ัะฐะปะพั': 15,
'ะปัะฝัะฝะพะต ัะตะผั': 35,
'ะปัะฟะธะฝ': 15,
'ะผะฐะธั': 35,
'ะผะฐะนะพะฝะตะท (ะฟัะพะผััะปะตะฝะฝัะน ั ัะฐั
ะฐัะพะผ)': 60,
'ะผะฐะบะฐัะพะฝั (ะธะท ัะฒะตัะดัั
ัะพััะพะฒ ะฟัะตะฝะธัั)': 50,
'ะผะฐะบะฐัะพะฝั ั ัััะพะผ': 64,
'ะผะฐะบะฐัะพะฝั, ัะฒะฐัะตะฝะฝัะต ยซal denteยป': 40,
'ะผะฐะบะพะฒะพะต ัะตะผั': 35,
'ะผะฐะปะธะฝะฐ': 25,
'ะผะฐะปััะพะดะตะบัััะธะฝ': 95,
'ะผะฐะฝะณะพ': 50,
'ะผะฐะฝะดะฐัะธะฝ': 30,
'ะผะฐะฝะดะฐัะธะฝ, ะบะปะตะผะตะฝัะธะฝ': 30,
'ะผะฐะฝะธะพะบะฐ (ะณะพััะบะฐั, ัะปะฐะดะบะฐั)': 55,
'ะผะฐะฝะบะฐ': 70,
'ะผะฐะฝะบะฐ ัะตะปัะฝะพะทะตัะฝะพะฒะฐั': 50,
'ะผะฐะฝะบะฐ/ะบััะฟะฐ ะธะท ัะฒะตัะดัั
ัะพััะพะฒ ะฟัะตะฝะธัั': 60,
'ะผะฐัะฐะบัะนั': 30,
'ะผะฐัะฐะฝัะฐ (ะฐััะพั-ััั)': 85,
'ะผะฐัะณะฐัะธะฝ': 55,
'ะผะฐัะผะตะปะฐะด': 65,
'ะผะฐัะผะตะปะฐะด (ะฑะตะท ัะฐั
ะฐัะฐ montignacยฎ)': 30,
'ะผะฐัะผะตะปะฐะด ั ัะฐั
ะฐัะพะผ': 65,
'ะผะฐัะฐ (ะธะท ะฑะตะปะพะน ะผัะบะธ)': 70,
'ะผะฐัะฐ (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั ะผัะบะฐ)': 40,
'ะผะตะด': 90,
'ะผัะด': 90,
'ะผะตะด ะณัะตัะธัะฝัะน': 60,
'ะผัะด ะณัะตัะธัะฝัะน': 60,
'ะผะธะฝะดะฐะปั': 15,
'ะผะธะฝะดะฐะปัะฝะฐั ะฟะฐััะฐ (ะฑะตะท ัะฐั
ะฐัะฐ)': 35,
'ะผะธะฝะดะฐะปัะฝะพะต ะผะพะปะพะบะพ': 30,
'ะผะพะดะธัะธัะธัะพะฒะฐะฝะฝัะน ะบัะฐั
ะผะฐะป': 100,
'ะผะพะปะพะบะพ ะบะพะบะพัะพะฒะพะต': 40,
'ะผะพะปะพะบะพ ะบะพัะพะฒัะต': 30,
'ะผะพะปะพะบะพ ะพะฒััะฝะพะต (ัััะพะต)': 30,
'ะผะพะปะพะบะพ ัะธัะพะฒะพะต': 85,
'ะผะพะปะพะบะพ ัะณััะตะฝะพะต ั ัะฐั
ะฐัะพะผ': 80,
'ะผะพะปะพะบะพ ัะพะตะฒะพะต': 30,
'ะผะพะปะพะบะพ ััั
ะพะต': 30,
'ะผะพะปะพะบะพ ัะพะบะพะปะฐะดะฝะพะต': 34,
'ะผะพะปะพัะฝะพ-ัะพะบะพะปะฐะดะฝัะต ััั
ะธะต ะฝะฐะฟะธัะบะธ (ovomaltine, nesquik)': 60,
'ะผะพัะบะพะฒะฝัะน ัะพะบ (ะฑะตะท ัะฐั
ะฐัะฐ)': 40,
'ะผะพัะบะพะฒั': 30,
'ะผะพัะบะพะฒั (ะฒะฐัะตะฝะฐั ะธะปะธ ัััะตะฝะฐั)': 85,
'ะผะพัะพะถะตะฝะฝะพะต ะฝะฐ ัััะบัะพะทะต': 35,
'ะผะพัะพะถะตะฝะพะต': 60,
'ะผะพัะพะถะตะฝะพะต ะธะท ัะพะตะฒะพะณะพ ะผะพะปะพะบะฐ': 35,
'ะผะพัะพัะบะฐ': 25,
'ะผะพััะบะฐั ะบะฐะฟัััะฐ': 22,
'ะผัะบะฐ ะฐะนะฒะพะฒะฐั': 40,
'ะผัะบะฐ ะณัะตัะฝะตะฒะฐั': 40,
'ะผัะบะฐ ะณััะฑะพะณะพ ะฟะพะผะพะปะฐ': 60,
'ะผัะบะฐ ะธะท ะบะฐะผััะฐ (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั)': 45,
'ะผัะบะฐ ะธะท ะบะฒะธะฝะพะฐ': 40,
'ะผัะบะฐ ะธะท ะฟะพะปะฑั (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั)': 45,
'ะผัะบะฐ ะบะฐััะฐะฝะพะฒะฐั': 65,
'ะผัะบะฐ ะบัะบัััะทะฝะฐั': 70,
'ะผัะบะฐ ะฝััะพะฒะฐั': 35,
'ะผัะบะฐ ะพะฑะดะธัะฝะฐั ะฟัะตะฝะธัะฝะฐั (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั)': 65,
'ะผัะบะฐ ะฟัะตะฝะธัะฝะฐั ะพัะธัะตะฝะฝะฐั ะฒ/ั': 85,
'ะผัะบะฐ ัะถะฐะฝะฐั ัะตะปัะฝะพะทะตัะฝะพะฒะฐั': 45,
'ะผัะบะฐ ัะธัะพะฒะฐั': 95,
'ะผัะบะฐ ัะพะตะฒะฐั': 25,
'ะผััะผัะปะฐ, ัะปะธะฒะฐ ัะฟะพะฝัะบะฐั': 55,
'ะผััะปะธ (ะฑะตะท ัะฐั
ะฐัะฐ)': 50,
'ะผััะปะธ (ั ัะฐั
ะฐัะพะผ, ะผะตะดะพะผ)': 65,
'ะผััะปะธ montignacยฎ': 45,
'ะผััะปะธ ั ะพัะตั
ะฐะผะธ ะธ ะธะทัะผะพะผ': 80,
'ะผััะปะธ ั ัะฐั
ะฐัะพะผ': 65,
'ะผัะบะพัั ะฟะฐะปัะผั': 20,
'ะผััะพ ั ะฑะพะฑะฐะผะธ': 35,
'ะฝะตะบัะฐัะธะฝ': 35,
'ะฝะตัะปะฐะดะบะธะต ะฒะฐัะปะธ': 75,
'ะฝะตัะปะฐะดะบะธะน ะฟะพะฟ-ะบะพัะฝ': 85,
'ะฝัั': 35,
'ะพะฒะพัะธ ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะต (ะทะฐะฒะพะดัะบะธะต)': 65,
'ะพะฒััะฝัะต ั
ะปะพะฟัั (ะฑะตะท ะบัะปะธะฝะฐัะฝะพะน ะพะฑัะฐะฑะพัะบะธ)': 40,
'ะพะฒััะฝัะน (ััััะธัะฝัะน) ะบะพัะตะฝั': 30,
'ะพะฒัั (ะบััะฟะฐ)': 40,
'ะพะณััะตั': 15,
'ะพะณัััะธะบะธ-ะฟะธะบัะปะธ ะผะฐัะธะฝะพะฒะฐะฝะฝัะต': 15,
'ะพะปะฐะดัะธ ะธะท ะฟัะตะฝะธัะฝะพะน ะผัะบะธ': 62,
'ะพะปะธะฒะบะธ': 15,
'ะพะผะปะตั': 49,
'ะพะฟัะฝัะธั': 35,
'ะพัะตั
ะณัะตัะบะธะน': 15,
'ะพัะตั
ะปะตัะฝะพะน (ััะฝะดัะบ)': 15,
'ะพัะตั
ะธ ะบะตะดัะพะฒัะต': 15,
'ะพัะตั
ะธ ะบะตััั': 15,
'ะพัะตั
ะธ ัะธััะฐัะบะธ': 15,
'ะพัััะฑะธ': 15,
'ะฟะฐะฟะฐะนั': 55,
'ะฟะฐััะฐ nulellaยฎ': 55,
'ะฟะฐััะฐ ะธะท ะปะตัะฝัั
ะพัะตั
ะพะฒ (ะฑะตะท ัะฐั
ะฐัะฐ)': 25,
'ะฟะฐััะฐ ะธะท ะฝะตะพัะธัะตะฝะฝะพะณะพ ะผะธะฝะดะฐะปั (ะฑะตะท ัะฐั
ะฐัะฐ)': 25,
'ะฟะฐััะฐ ะธะท ะพัะธัะตะฝะฝะพะณะพ ะผะธะฝะดะฐะปั (ะฑะตะท ัะฐั
ะฐัะฐ)': 35,
'ะฟะฐััะฐ ะธะท ัะตะปัะฝะพะทะตัะฝะพะฒะพะน ะผัะบะธ': 50,
'ะฟะฐััะฐ ะธะท ัะตะปัะฝะพะทะตัะฝะพะฒะพะน ะผัะบะธ, ะฟัะธะณะพัะพะฒะปะตะฝะฝะฐั al dente': 40,
'ะฟะฐััะตัะฝะฐะบ': 85,
'ะฟะฐัะพะบะฐ': 70,
'ะฟะตะปัะผะตะฝะธ': 70,
'ะฟะตัะตั ัะปะฐะดะบะธะน': 15,
'ะฟะตัะตั ัะธะปะธ': 15,
'ะฟะตัะปะพะฒะฐั ะบะฐัะฐ ะฝะฐ ะฒะพะดะต': 35,
'ะฟะตัะปะพะฒะฐั ะบะฐัะฐ ะฝะฐ ะผะพะปะพะบะต': 65,
'ะฟะตัะปะพะฒะฐั ะบััะฟะฐ': 30,
'ะฟะตััะธะบ': 35,
'ะฟะตััะธะบ (ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝัะน ะฒ ัะธัะพะฟะต)': 55,
'ะฟะตัะพัะฝะพะต ะฟะตัะตะฝัะต': 55,
'ะฟะตััะพ': 15,
'ะฟะตัะตะฝัะต ะผะฐัะปัะฝะพะต (ะผัะบะฐ, ะผะฐัะปะพ, ัะฐั
ะฐั)': 55,
'ะฟะตัะตะฝัะต ะพะฒััะฝะพะต': 55,
'ะฟะตัะตะฝัะต ะฟะตัะพัะฝะพะต (ะธะท ะผัะบะธ, ะผะฐัะปะฐ, ัะฐั
ะฐัะฐ)': 55,
'ะฟะตัะตะฝัะต ะฟะตัะพัะฝะพะต (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั ะผัะบะฐ, ะฑะตะท ัะฐั
ะฐัะฐ)': 40,
'ะฟะตัะตะฝัะต ัะดะพะฑะฝะพะต': 55,
'ะฟะธะฒะพ': 110,
'ะฟะธััะฐ': 60,
'ะฟะปะพะด ั
ะปะตะฑะฝะพะณะพ ะดะตัะตะฒะฐ': 65,
'ะฟะพะดัะพะปะฝะตัะฝะธะบ (ัะตะผะตะฝะฐ)': 35,
'ะฟะพะปะฑะฐ': 40,
'ะฟะพะปะฑะฐ (ะธะท ัะฐัะธะฝะธัะพะฒะฐะฝะฝะพะน ะผัะบะธ)': 65,
'ะฟะพะปะฑะฐ (ัะตะปัะฝะพะทะตัะฝะพะฒะฐั)': 45,
'ะฟะพะปะตะฝัะฐ ะบัะบัััะทะฝะฐั ะบััะฟะฐ': 70,
'ะฟะพะฝัะธะบะธ': 75,
'ะฟะพะฟะบะพัะฝ': 85,
'ะฟะพะฟะบะพัะฝ ะฝะตัะปะฐะดะบะธะน': 85,
'ะฟัะพะผััะปะตะฝะฝัะน ะผะฐะนะพะฝะตะท': 60,
'ะฟัะพัะพัะตะฝะฝัะต ะทะตัะฝะฐ ะฟัะตะฝะธัั': 63,
'ะฟัะพัะพัะตะฝะฝัะต ะทะปะฐะบะธ (ะฟัะตะฝะธัั, ัะพะธ...)': 15,
'ะฟัะพัะพ': 70,
'ะฟัะตะฝะธัะฝะฐั ะผัะบะฐ': 69,
'ะฟัะตะฝะพ': 71,
'ัะฐะฒะธะพะปะธ (ะธะท ะผัะณะบะธั
ัะพััะพะฒ ะฟัะตะฝะธัั)': 70,
'ัะฐะฒะธะพะปะธ (ัะฒะตัะดัะต ัะพััะฐ ะฟัะตะฝะธัั)': 60,
'ัะฐะบะธ ะพัะฒะฐัะฝัะต': 5,
'ัะฐะบะพะพะฑัะฐะทะฝัะต (ะพะผะฐั, ะบัะฐะฑ, ะปะฐะฝะณััั)': 5,
'ัะฐัะฐััะน': 20,
'ัะตะฒะตะฝั': 15,
'ัะตะดะธั': 15,
'ัะตะฟะฐ': 30,
'ัะตะฟัะฐััะน ะปัะบ': 15,
'ั
ะปะตะฑ ัะถะฐะฝะพะน': 65,
'ัะธะทะพััะพ': 70,
'ัะธั ะฑะฐัะผะฐัะธ': 50,
'ัะธั ะฑะฐัะผะฐัะธ ะฝะตะพัะธัะตะฝะฝัะน': 45,
'ัะธั ะฑะตะปัะน': 70,
'ัะธั ะฑะตะปัะน ะบะปะตะนะบะธะน': 90,
'ัะธั ะฑััััะพะณะพ ะฟัะธะณะพัะพะฒะปะตะฝะธั': 90,
'ัะธั ะฒะพะทะดััะฝัะน (ะฐะฝะฐะปะพะณ ะฟะพะฟะบะพัะฝะฐ)': 85,
'ัะธั ะดะธะบะธะน (ัะตัะฝัะน)': 35,
'ัะธั ะดะปะธะฝะฝะพะทะตัะฝะพะฒะพะน': 60,
'ัะธั ะถะฐัะผะธะฝ': 60,
'ัะธั ะบะฐะผะฐัะณัะบะธะน': 60,
'ัะธั ะบะพัะธัะฝะตะฒัะน (ะฑัััะน)': 50,
'ัะธั ะบัะฐัะฝัะน': 55,
'ัะธัะพะฒะฐั ะบะฐัะฐ ะฝะฐ ะผะพะปะพะบะต': 75,
'ัะธัะพะฒัะต ะณะฐะปะตัั': 85,
'ัะธัะพะฒัะน ะฟัะดะธะฝะณ': 85,
'ัะพะถะบะพะฒะพะณะพ ะดะตัะตะฒะฐ ะฟะพัะพัะพะบ (ะทะฐะณัััะธัะตะปั)': 15,
'ัะฐะปะฐั ("ะฐะนัะฑะตัะณ", ะปะธััะพะฒะพะน, ััะบะบะพะปะฐ ะธ ั.ะฟ.)': 15,
'ัะฐั
ะฐั ะฑะตะปัะน': 70,
'ัะฐั
ะฐั ะฑัััะน': 70,
'ัะฐั
ะฐั ะบะพัะธัะฝะตะฒัะน': 70,
'ัะฒะตะบะปะฐ': 30,
'ัะฒะตะบะปะฐ (ะฒะฐัะตะฝะฐั ะธะปะธ ัััะตะฝะฐั)': 65,
'ัะฒะตะบะปะฐ ะปะธััะพะฒะฐั': 15,
'ัะดะพะฑะฝัะต ะฑัะปะพัะบะธ': 95,
'ัะตะปัะดะตัะตะน': 15,
'ัะตะปัะดะตัะตะน ะบะพัะฝะตะฒะพะน (ัััะพะน)': 35,
'ัะตะผะตะฝะฐ ัะธะฐ, ะปัะฝัะฝะพะต ัะตะผั': 10,
'ัะธะดั ััั
ะพะน': 40,
'ัะธัะพะฟ ะฐะณะฐะฒั': 15,
'ัะธัะพะฟ ะณะปัะบะพะทั': 100,
'ัะธัะพะฟ ะบะปะตะฝะพะฒัะน': 65,
'ัะธัะพะฟ ะบัะบัััะทะฝัะน': 115,
'ัะธัะพะฟ ะฟัะตะฝะธัะฝัะน': 100,
'ัะธัะพะฟ ัะธัะพะฒัะน': 100,
'ัะธัะพะฟ ัะธะบะพัะธั': 55,
'ัะบะพััะพะฝะตัะฐ (ะบะพะทะตะปะตั)': 30,
'ัะปะฐะดะบะฐั ะฒัะฟะตัะบะฐ (ะฒะฐัะปะธ, ะฟะพะฝัะธะบะธ)': 75,
'ัะปะฐะดะบะฐั ะณะฐะทะธัะพะฒะบะฐ (ยซะบะพะบะฐ-ะบะพะปะฐยป, ยซะฟะตะฟัะธ-ะบะพะปะฐยป ะธ ะฟะพะดะพะฑะฝัะต)': 70,
'ัะปะฐะดะบะฐั ะบะพะฝัะตัะฒะธัะพะฒะฐะฝะฝะฐั ะบัะบัััะทะฐ': 57,
'ัะปะฐะดะบะธะต ะณะฐะทะธัะพะฒะฐะฝะฝัะต ะฝะฐะฟะธัะบะธ (pepsi, coca-cola)': 70,
'ัะปะฐะดะบะธะน ะฟะพะฝัะธะบ': 76,
'ัะปะธะฒะฐ': 35,
'ัะผะตัั ะทะปะฐะบะพะฒ ะดะปั ะทะฐะฒััะฐะบะฐ (kellogg)': 70,
'ัะผะตัั ะทะปะฐะบะพะฒ ั ะพัััะฑัะผะธ ะผะฐัะบะธ all branโข (kellogg)': 50,
'ัะผะตัั ะพัะธัะตะฝะฝัั
ะทะปะฐะบะพะฒ ั ัะฐั
ะฐัะพะผ': 70,
'ัะผะตัะฐะฝะฐ | |
<filename>tests/examples/minlplib/camshape400.py
# NLP written by GAMS Convert at 04/21/18 13:51:13
#
# Equation counts
# Total E G L N X C B
# 801 400 0 401 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 800 800 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2797 1598 1199 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(1,1.00000982052922),initialize=1.00000982052922)
m.x2 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x3 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x4 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x5 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x6 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x7 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x8 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x9 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x10 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x11 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x12 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x13 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x14 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x15 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x16 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x17 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x18 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x19 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x20 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x21 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x22 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x23 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x24 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x25 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x26 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x27 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x28 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x29 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x30 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x31 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x32 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x33 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x34 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x35 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x36 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x37 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x38 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x39 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x40 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x41 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x42 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x43 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x44 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x45 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x46 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x47 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x48 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x49 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x50 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x51 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x52 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x53 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x54 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x55 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x56 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x57 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x58 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x59 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x60 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x61 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x62 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x63 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x64 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x65 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x66 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x67 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x68 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x69 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x70 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x71 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x72 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x73 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x74 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x75 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x76 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x77 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x78 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x79 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x80 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x81 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x82 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x83 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x84 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x85 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x86 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x87 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x88 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x89 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x90 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x91 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x92 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x93 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x94 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x95 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x96 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x97 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x98 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x99 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x100 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x101 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x102 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x103 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x104 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x105 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x106 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x107 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x108 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x109 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x110 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x111 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x112 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x113 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x114 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x115 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x116 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x117 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x118 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x119 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x120 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x121 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x122 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x123 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x124 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x125 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x126 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x127 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x128 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x129 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x130 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x131 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x132 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x133 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x134 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x135 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x136 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x137 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x138 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x139 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x140 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x141 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x142 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x143 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x144 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x145 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x146 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x147 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x148 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x149 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x150 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x151 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x152 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x153 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x154 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x155 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x156 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x157 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x158 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x159 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x160 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x161 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x162 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x163 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x164 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x165 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x166 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x167 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x168 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x169 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x170 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x171 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x172 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x173 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x174 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x175 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x176 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x177 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x178 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x179 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x180 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x181 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x182 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x183 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x184 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x185 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x186 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x187 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x188 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x189 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x190 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x191 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x192 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x193 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x194 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x195 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x196 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x197 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x198 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x199 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x200 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x201 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x202 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x203 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x204 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x205 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x206 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x207 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x208 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x209 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x210 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x211 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x212 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x213 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x214 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x215 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x216 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x217 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x218 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x219 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x220 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x221 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x222 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x223 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x224 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x225 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x226 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x227 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x228 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x229 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x230 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x231 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x232 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x233 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x234 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x235 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x236 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x237 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x238 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x239 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x240 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x241 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x242 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x243 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x244 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x245 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x246 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x247 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x248 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x249 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x250 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x251 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x252 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x253 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x254 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x255 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x256 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x257 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x258 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x259 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x260 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x261 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x262 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x263 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x264 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x265 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x266 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x267 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x268 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x269 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x270 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x271 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x272 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x273 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x274 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x275 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x276 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x277 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x278 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x279 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x280 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x281 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x282 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x283 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x284 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x285 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x286 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x287 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x288 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x289 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x290 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x291 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x292 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x293 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x294 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x295 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x296 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x297 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x298 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x299 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x300 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x301 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x302 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x303 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x304 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x305 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x306 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x307 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x308 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x309 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x310 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x311 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x312 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x313 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x314 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x315 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x316 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x317 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x318 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x319 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x320 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x321 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x322 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x323 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x324 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x325 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x326 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x327 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x328 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x329 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x330 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x331 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x332 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x333 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x334 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x335 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x336 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x337 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x338 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x339 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x340 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x341 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x342 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x343 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x344 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x345 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x346 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x347 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x348 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x349 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x350 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x351 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x352 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x353 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x354 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x355 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x356 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x357 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x358 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x359 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x360 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x361 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x362 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x363 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x364 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x365 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x366 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x367 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x368 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x369 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x370 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x371 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x372 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x373 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x374 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x375 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x376 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x377 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x378 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x379 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x380 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x381 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x382 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x383 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x384 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x385 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x386 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x387 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x388 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x389 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x390 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x391 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x392 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x393 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x394 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x395 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x396 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x397 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x398 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x399 = Var(within=Reals,bounds=(1,2),initialize=1.5)
m.x400 = Var(within=Reals,bounds=(1.99529936261308,2),initialize=1.99529936261308)
m.x401 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x403 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x404 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x405 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x406 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x407 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x408 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x409 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x410 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x411 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x412 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x413 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x414 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x415 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x416 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x417 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x418 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x419 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x420 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x421 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x422 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x423 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x424 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x425 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x426 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x427 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x428 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x429 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x430 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x431 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x432 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x433 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x434 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x435 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x436 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x437 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x438 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x439 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x440 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x441 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x442 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x443 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x444 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x445 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x446 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x447 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x448 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x449 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x450 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x451 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x452 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x453 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x454 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x455 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x456 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x457 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x458 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x459 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x460 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x461 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x462 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x463 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x464 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x465 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x466 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x467 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x468 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x469 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x470 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x471 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x472 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x473 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x474 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x475 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x476 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x477 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x478 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x479 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x480 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x481 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x482 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x483 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x484 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x485 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x486 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x487 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x488 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x489 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x490 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x491 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x492 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x493 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x494 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x495 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x496 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x497 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x498 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x499 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x500 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x501 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x502 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x503 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x504 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x505 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x506 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x507 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x508 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x509 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x510 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x511 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x512 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x513 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x514 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x515 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x516 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x517 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x518 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x519 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x520 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x521 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x522 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x523 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x524 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x525 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x526 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x527 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x528 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x529 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x530 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x531 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x532 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x533 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x534 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x535 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x536 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x537 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x538 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x539 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x540 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x541 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x542 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x543 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x544 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x545 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x546 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x547 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x548 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x549 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x550 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x551 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x552 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x553 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x554 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x555 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x556 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x557 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x558 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x559 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x560 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x561 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x562 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x563 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x564 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x565 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x566 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x567 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x568 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x569 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x570 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x571 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x572 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x573 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x574 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x575 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x576 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x577 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x578 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x579 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x580 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x581 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x582 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x583 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x584 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x585 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x586 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x587 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x588 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x589 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x590 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x591 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x592 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x593 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x594 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x595 = Var(within=Reals,bounds=(-0.0047006373869174,0.0047006373869174),initialize=0)
m.x596 | |
<gh_stars>10-100
import openmmtools.cache as cache
import os
import copy
from perses.dispersed.utils import *
from openmmtools.states import ThermodynamicState, CompoundThermodynamicState, SamplerState
import numpy as np
import mdtraj as md
import simtk.unit as unit
import logging
import time
from collections import namedtuple
from perses.annihilation.lambda_protocol import LambdaProtocol
from perses.annihilation.lambda_protocol import RelativeAlchemicalState
from perses.dispersed import *
import random
import pymbar
from perses.dispersed.parallel import Parallelism
from openmmtools import utils
# Instantiate logger
logging.basicConfig(level = logging.NOTSET)
_logger = logging.getLogger("sMC")
_logger.setLevel(logging.INFO)
cache.global_context_cache.platform = configure_platform(utils.get_fastest_platform().getName())
EquilibriumFEPTask = namedtuple('EquilibriumInput', ['sampler_state', 'inputs', 'outputs'])
DISTRIBUTED_ERROR_TOLERANCE = 1e-4
class SequentialMonteCarlo():
"""
This class represents an sMC particle that runs a nonequilibrium switching protocol.
It is a batteries-included engine for conducting sequential Monte Carlo sampling.
WARNING: take care in writing trajectory file as saving positions to memory is costly. Either do not write the configuration or save sparse positions.
"""
supported_resampling_methods = {'multinomial': multinomial_resample}
supported_observables = {'ESS': ESS, 'CESS': CESS}
def __init__(self,
factory,
lambda_protocol = 'default',
temperature = 300 * unit.kelvin,
trajectory_directory = 'test',
trajectory_prefix = 'out',
atom_selection = 'not water',
timestep = 1 * unit.femtoseconds,
collision_rate = 1 / unit.picoseconds,
eq_splitting_string = 'V R O R V',
neq_splitting_string = 'V R O R V',
ncmc_save_interval = None,
measure_shadow_work = False,
neq_integrator = 'langevin',
compute_endstate_correction = True,
external_parallelism = None,
internal_parallelism = {'library': ('dask', 'LSF'),
'num_processes': 2}
):
"""
Parameters
----------
factory : perses.annihilation.relative.HybridTopologyFactory - compatible object
lambda_protocol : str, default 'default'
the flavor of scalar lambda protocol used to control electrostatic, steric, and valence lambdas
temperature : float unit.Quantity
Temperature at which to perform the simulation, default 300K
trajectory_directory : str, default 'test'
Where to write out trajectories resulting from the calculation. If None, no writing is done.
trajectory_prefix : str, default None
What prefix to use for this calculation's trajectory files. If none, no writing is done.
atom_selection : str, default not water
MDTraj selection syntax for which atomic coordinates to save in the trajectories. Default strips
all water.
timestep : float unit.Quantity, default 1 * units.femtoseconds
the timestep for running MD
collision_rate : float unit.Quantity, default 1 / unit.picoseconds
the collision rate for running MD
eq_splitting_string : str, default 'V R O R V'
The integrator splitting to use for equilibrium simulation
neq_splitting_string : str, default 'V R O R V'
The integrator splitting to use for nonequilibrium switching simulation
ncmc_save_interval : int, default None
interval with which to write ncmc trajectory. If None, trajectory will not be saved.
We will assert that the n_lambdas % ncmc_save_interval = 0; otherwise, the protocol will not be complete
measure_shadow_work : bool, default False
whether to measure the shadow work of the integrator.
WARNING : this is not currently supported
neq_integrator : str, default 'langevin'
which integrator to use
compute_endstate_correction : bool, default True
whether to compute the importance weight to the alchemical endstates
external_parallelism : dict('parallelism': perses.dispersed.parallel.Parallelism, 'available_workers': list(str)), default None
an external parallelism dictionary;
external_parallelism is used if the entire SequentialMonteCarlo class is allocated workers by an external client (i.e.
there exists a Parallelism.client object that is allocating distributed workers to several SequentialMonteCarlo classes simultaneously)
internal_parallelism : dict, default {'library': ('dask', 'LSF'), 'num_processes': 2}
dictionary of parameters to instantiate a client and run parallel computation internally. internal parallelization is handled by default
if None, external worker arguments have to be specified, otherwise, no parallel computation will be conducted, and annealing will be conducted locally.
internal_parallelism is used when the SequentialMonteCarlo class is allowed to create its own Parallelism.client object to allocate workers on a
cluster.
"""
_logger.info(f"Initializing SequentialMonteCarlo")
#pull necessary attributes from factory
self.factory = factory
#context cache
self.context_cache = cache.global_context_cache
#use default protocol
self.lambda_protocol = lambda_protocol
#handle both eq and neq parameters
self.temperature = temperature
self.timestep = timestep
self.collision_rate = collision_rate
self.measure_shadow_work = measure_shadow_work
self.neq_integrator = neq_integrator
if measure_shadow_work:
raise Exception(f"measure_shadow_work is not currently supported. Aborting!")
#handle equilibrium parameters
self.eq_splitting_string = eq_splitting_string
#handle storage and names
self.trajectory_directory = trajectory_directory
self.trajectory_prefix = trajectory_prefix
self.atom_selection = atom_selection
#handle neq parameters
self.neq_splitting_string = neq_splitting_string
self.ncmc_save_interval = ncmc_save_interval
#lambda states:
self.lambda_endstates = {'forward': [0.0,1.0], 'reverse': [1.0, 0.0]}
#instantiate trajectory filenames
if self.trajectory_directory and self.trajectory_prefix:
self.write_traj = True
self.eq_trajectory_filename = {lambda_state: os.path.join(os.getcwd(), self.trajectory_directory, f"{self.trajectory_prefix}.eq.lambda_{lambda_state}.h5") for lambda_state in self.lambda_endstates['forward']}
self.neq_traj_filename = {direct: os.path.join(os.getcwd(), self.trajectory_directory, f"{self.trajectory_prefix}.neq.lambda_{direct}") for direct in self.lambda_endstates.keys()}
self.topology = self.factory.hybrid_topology
else:
self.write_traj = False
self.eq_trajectory_filename = {0: None, 1: None}
self.neq_traj_filename = {'forward': None, 'reverse': None}
self.topology = None
# subset the topology appropriately:
self.atom_selection_string = atom_selection
# subset the topology appropriately:
if self.atom_selection_string is not None:
atom_selection_indices = self.factory.hybrid_topology.select(self.atom_selection_string)
self.atom_selection_indices = atom_selection_indices
else:
self.atom_selection_indices = None
# instantiating equilibrium file/rp collection dicts
self._eq_dict = {0: [], 1: [], '0_decorrelated': None, '1_decorrelated': None, '0_reduced_potentials': [], '1_reduced_potentials': []}
self._eq_files_dict = {0: [], 1: []}
self._eq_timers = {0: [], 1: []}
self._neq_timers = {'forward': [], 'reverse': []}
#instantiate nonequilibrium work dicts: the keys indicate from which equilibrium thermodynamic state the neq_switching is conducted FROM (as opposed to TO)
self.cumulative_work = {'forward': [], 'reverse': []}
self.incremental_work = copy.deepcopy(self.cumulative_work)
self.shadow_work = copy.deepcopy(self.cumulative_work)
self.nonequilibrium_timers = copy.deepcopy(self.cumulative_work)
self.total_jobs = 0
#self.failures = copy.deepcopy(self.cumulative_work)
self.dg_EXP = copy.deepcopy(self.cumulative_work)
self.dg_BAR = None
#instantiate thermodynamic state
lambda_alchemical_state = RelativeAlchemicalState.from_system(self.factory.hybrid_system)
lambda_alchemical_state.set_alchemical_parameters(0.0, LambdaProtocol(functions = self.lambda_protocol))
self.thermodynamic_state = CompoundThermodynamicState(ThermodynamicState(self.factory.hybrid_system, temperature = self.temperature),composable_states = [lambda_alchemical_state])
# set the SamplerState for the lambda 0 and 1 equilibrium simulations
sampler_state = SamplerState(self.factory.hybrid_positions,
box_vectors=self.factory.hybrid_system.getDefaultPeriodicBoxVectors())
self.sampler_states = {0: copy.deepcopy(sampler_state), 1: copy.deepcopy(sampler_state)}
#endstate corrections?
self.compute_endstate_correction = compute_endstate_correction
#implement the appropriate parallelism
self.implement_parallelism(external_parallelism = external_parallelism,
internal_parallelism = internal_parallelism)
def implement_parallelism(self, external_parallelism, internal_parallelism):
"""
Function to implement the approprate parallelism given input arguments.
This is exposed as a method in case the class already exists and the user wants to change the parallelism scheme.
Parameters
----------
external_parallelism : dict('parallelism': perses.dispersed.parallel.Parallelism, 'available_workers': list(str)), default None
an external parallelism dictionary
internal_parallelism : dict, default {'library': ('dask', 'LSF'), 'num_processes': 2}
dictionary of parameters to instantiate a client and run parallel computation internally. internal parallelization is handled by default
if None, external worker arguments have to be specified, otherwise, no parallel computation will be conducted, and annealing will be conducted locally.
"""
#parallelism implementables
if external_parallelism is not None and internal_parallelism is not None:
raise Exception(f"external parallelism were given, but an internal parallelization scheme was also specified. Aborting!")
if external_parallelism is not None:
self.external_parallelism, self.internal_parallelism = True, False
self.parallelism, self.workers = external_parallelism['parallelism'], external_parallelism['workers']
self.parallelism_parameters = None
assert self.parallelism.client is not None, f"the external parallelism class has not yet an activated client."
elif internal_parallelism is not None:
self.external_parallelism, self.internal_parallelism = False, True
self.parallelism, self.workers = Parallelism(), internal_parallelism['num_processes']
self.parallelism_parameters = internal_parallelism
else:
_logger.warning(f"both internal and external parallelisms are unspecified. Defaulting to not_parallel.")
self.external_parallelism, self.internal_parallelism = False, True
self.parallelism_parameters = {'library': None, 'num_processes': None}
self.parallelism, self.workers = Parallelism(), 0
if external_parallelism is not None and internal_parallelism is not None:
raise Exception(f"external parallelism were given, but an internal parallelization scheme was also specified. Aborting!")
def _activate_annealing_workers(self):
"""
wrapper to distribute workers and create appropriate worker attributes for annealing
"""
_logger.debug(f"activating annealing workers...")
if self.internal_parallelism:
_logger.debug(f"found internal parallelism; activating client with the following parallelism parameters: {self.parallelism_parameters}")
#we have to activate the client
self.parallelism.activate_client(library = self.parallelism_parameters['library'],
num_processes = self.parallelism_parameters['num_processes'])
workers = list(self.parallelism.workers.values())
elif self.external_parallelism:
#the client is already active
workers = self.parallelism_parameters['available_workers']
else:
raise Exception(f"either internal or external parallelism must be True.")
#now client.run to broadcast the vars
broadcast_remote_worker = 'remote' if self.parallelism.client is not None else self
addresses = self.parallelism.run_all(func = activate_LocallyOptimalAnnealing, #func
arguments = (copy.deepcopy(self.thermodynamic_state), #arg: thermodynamic state
broadcast_remote_worker, #arg: remote worker
self.lambda_protocol, #arg: lambda protocol
self.timestep, #arg: timestep
self.collision_rate, #arg: collision_rate
self.temperature, #arg: temperature
self.neq_splitting_string, #arg: neq_splitting string
self.ncmc_save_interval, #arg: ncmc_save_interval
self.topology, #arg: topology
self.atom_selection_indices, #arg: subset atoms
self.measure_shadow_work, #arg: measure_shadow_work
self.neq_integrator, #arg: integrator,
self.compute_endstate_correction #arg: compute_endstate_correction
),
workers = workers) #workers
def _deactivate_annealing_workers(self):
"""
wrapper to deactivate workers and delete appropriate worker attributes for annealing
"""
if self.internal_parallelism:
_logger.debug(f"\t\tfound internal parallelism; deactivating client.")
#we have to deactivate the client
if self.parallelism.client is None:
#then we are running local annealing
deactivate_worker_attributes(remote_worker = self)
self.parallelism.deactivate_client()
elif self.external_parallelism:
#the client is already active; we don't have the authority to deactivate
workers | |
'TaskExecutionId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',
'StatusDetails': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TaskArn': 'string',
'TaskType': 'RUN_COMMAND'|'AUTOMATION'|'STEP_FUNCTIONS'|'LAMBDA'
},
],
}
**Response Structure**
- *(dict) --*
- **WindowExecutionTaskIdentities** *(list) --*
Information about the task executions.
- *(dict) --*
Information about a task execution performed as part of a Maintenance Window execution.
- **WindowExecutionId** *(string) --*
The ID of the Maintenance Window execution that ran the task.
- **TaskExecutionId** *(string) --*
The ID of the specific task execution in the Maintenance Window execution.
- **Status** *(string) --*
The status of the task execution.
- **StatusDetails** *(string) --*
The details explaining the status of the task execution. Only available for certain status values.
- **StartTime** *(datetime) --*
The time the task execution started.
- **EndTime** *(datetime) --*
The time the task execution finished.
- **TaskArn** *(string) --*
The ARN of the task that ran.
- **TaskType** *(string) --*
The type of task that ran.
:type WindowExecutionId: string
:param WindowExecutionId: **[REQUIRED]**
The ID of the Maintenance Window execution whose task executions should be retrieved.
:type Filters: list
:param Filters:
Optional filters used to scope down the returned tasks. The supported filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED.
- *(dict) --*
Filter used in the request. Supported filter keys are Name and Enabled.
- **Key** *(string) --*
The name of the filter.
- **Values** *(list) --*
The filter values.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMaintenanceWindowExecutions(Paginator):
def paginate(self, WindowId: str, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_maintenance_window_executions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
WindowId='string',
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'WindowExecutions': [
{
'WindowId': 'string',
'WindowExecutionId': 'string',
'Status': 'PENDING'|'IN_PROGRESS'|'SUCCESS'|'FAILED'|'TIMED_OUT'|'CANCELLING'|'CANCELLED'|'SKIPPED_OVERLAPPING',
'StatusDetails': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **WindowExecutions** *(list) --*
Information about the Maintenance Windows execution.
- *(dict) --*
Describes the information about an execution of a Maintenance Window.
- **WindowId** *(string) --*
The ID of the Maintenance Window.
- **WindowExecutionId** *(string) --*
The ID of the Maintenance Window execution.
- **Status** *(string) --*
The status of the execution.
- **StatusDetails** *(string) --*
The details explaining the Status. Only available for certain status values.
- **StartTime** *(datetime) --*
The time the execution started.
- **EndTime** *(datetime) --*
The time the execution finished.
:type WindowId: string
:param WindowId: **[REQUIRED]**
The ID of the Maintenance Window whose executions should be retrieved.
:type Filters: list
:param Filters:
Each entry in the array is a structure containing:
Key (string, between 1 and 128 characters)
Values (array of strings, each string is between 1 and 256 characters)
The supported Keys are ExecutedBefore and ExecutedAfter with the value being a date/time string such as 2016-11-04T05:00:00Z.
- *(dict) --*
Filter used in the request. Supported filter keys are Name and Enabled.
- **Key** *(string) --*
The name of the filter.
- **Values** *(list) --*
The filter values.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMaintenanceWindowSchedule(Paginator):
def paginate(self, WindowId: str = None, Targets: List = None, ResourceType: str = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_maintenance_window_schedule`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowSchedule>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
WindowId='string',
Targets=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
ResourceType='INSTANCE',
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ScheduledWindowExecutions': [
{
'WindowId': 'string',
'Name': 'string',
'ExecutionTime': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **ScheduledWindowExecutions** *(list) --*
Information about Maintenance Window executions scheduled for the specified time range.
- *(dict) --*
Information about a scheduled execution for a Maintenance Window.
- **WindowId** *(string) --*
The ID of the Maintenance Window to be run.
- **Name** *(string) --*
The name of the Maintenance Window to be run.
- **ExecutionTime** *(string) --*
The time, in ISO-8601 Extended format, that the Maintenance Window is scheduled to be run.
:type WindowId: string
:param WindowId:
The ID of the Maintenance Window to retrieve information about.
:type Targets: list
:param Targets:
The instance ID or key/value pair to retrieve information about.
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don\'t provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type ResourceType: string
:param ResourceType:
The type of resource you want to retrieve information about. For example, \"INSTANCE\".
:type Filters: list
:param Filters:
Filters used to limit the range of results. For example, you can limit Maintenance Window executions to only those scheduled before or after a certain date and time.
- *(dict) --*
Defines a filter used in Patch Manager APIs.
- **Key** *(string) --*
The key for the filter.
- **Values** *(list) --*
The value for the filter.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMaintenanceWindowTargets(Paginator):
def paginate(self, WindowId: str, Filters: List = None, PaginationConfig: Dict = None) -> | |
version, use_downloads=False)
return None, None, None, None # Cannot find the correct version!
else:
return results
def delete(self, project, version=None):
"""Delete all of the downloaded and installed files for the given project and version.
Args:
project (str): Project name.
version (str)[None]: Project version to delete.
"""
self.delete_downloaded(project, version)
self.delete_installed(project, version)
def delete_downloaded(self, name, version=None, download_dir=None):
"""Delete all of the downloaded files for the given project and version.
Args:
name (str/module): Package name.
version (str)[None]: Project version to delete.
download_dir (str)['.']: Download directory.
"""
if not isinstance(name, str):
try:
name = name.__module__
except (AttributeError, Exception):
name = str(name)
# Delete all of the downloaded files
for filename in self.get_downloaded_versions(name, download_dir=download_dir).values():
try:
if version is None or self.get_name_version(filename)[1] == version:
os.remove(filename)
except (OSError, Exception):
pass
def delete_installed(self, name, version=None):
"""Delete all of the installed files for the given project and version.
Args:
name (str/module): Package name.
version (str)[None]: Project version to delete.
"""
if not isinstance(name, str):
with contextlib.suppress(AttributeError, Exception):
if version is None:
version = name.__import_version__
try:
name = name.__module__
except (AttributeError, Exception):
try:
name = name.__name__
except (AttributeError, Exception):
name = str(name)
# Get the installed directory
imp_path = self.make_import_path(name, version or '')
if version is None:
imp_path = os.path.dirname(imp_path)
# Remove from sys.modules
try:
del sys.modules[__import_name__]
except (AttributeError, KeyError, Exception):
pass
# Remove the library from the installed modules (This object and sys)
try:
if version is not None:
versions = [version]
else:
versions = list(self.modules[name].keys())
# Remove versions and modules
for ver in versions:
with contextlib.suppress(KeyError, AttributeError, Exception):
module = self.modules[name].pop(ver, None)
with contextlib.suppress(KeyError, AttributeError, NameError, Exception):
del sys.modules[module.__name__]
with contextlib.suppress(KeyError, AttributeError, NameError, Exception):
del sys.modules[module.__package__]
with contextlib.suppress(KeyError, AttributeError, NameError, Exception):
del sys.modules[module.__import_name__]
with contextlib.suppress(KeyError, AttributeError, NameError, Exception):
del module
except (KeyError, ValueError, TypeError, Exception):
pass
# Always delete installed
try:
shutil.rmtree(imp_path, ignore_errors=True)
except (OSError, Exception):
# This may not be successful with C extensions.
# When process is closed and new process tries to delete it should be successful.
pass
delete_module = delete_installed
def uri_exists(self, index_url=None, timeout=None, **kwargs):
"""Return if the given URL/URI exists."""
return uri_exists(index_url or self.index_url, timeout=timeout, **kwargs)
def install_downloaded(self, package, version=None):
"""Install a downloaded package.
Args:
package (str): Name of the package/library you want to ge the versions for (Example: "requests").
version (str)[None]: Version number to find and download.
"""
try:
main_module = self.import_module(package, version)
if main_module is None:
self.delete_installed(package, version)
return False
return True
except (ImportError, Exception):
self.delete_installed(package, version)
return False
def download(self, package, version=None, download_dir=None, index_url=None, extensions='.whl',
min_version=None, exclude=None):
"""Download a package version to the download directory and return the file path that was saved.
Args:
package (str): Name of the package/library you want to ge the versions for (Example: "requests").
version (str)[None]: Version number to find and download.
download_dir (str)['.']: Download directory.
index_url (str) ['https://pypi.org/simple/']: Simple url to get the package and it's versions from.
extensions (list/str) [None]: List of allowed extensions (Example: [".whl", ".tar.gz"]).
min_version (str)[None]: Minimum version to allow.
exclude (list)[None]: List of versions that are excluded.
Returns:
filename (str)[None]: Filename of the downloaded package.
"""
try:
index_url = index_url or self.index_url
download_dir = download_dir or self.download_dir
if not os.path.exists(download_dir):
os.makedirs(download_dir)
return HttpListVersions.download(package, version=version, download_dir=download_dir, index_url=index_url,
extensions=extensions, min_version=min_version, exclude=exclude)
except Exception as err:
self.error(err)
return None
def get_versions(self, package, index_url=None, min_version=None, exclude=None):
"""Return a series of package versions.
Args:
package (str): Name of the package/library you want to ge the versions for (Example: "requests").
index_url (str) ['https://pypi.org/simple/']: Simple url to get the package and it's versions from.
min_version (str)[None]: Minimum version to allow.
exclude (list)[None]: List of versions that are excluded.
Returns:
data (OrderedDict): Dictionary of {(package name, version): href}
"""
try:
index_url = index_url or self.index_url
return HttpListVersions.get_versions(package, index_url=index_url, min_version=min_version, exclude=exclude)
except Exception as err:
self.error(err)
return {}
def error(self, error):
"""Handle an import error."""
raise error
def install(self, path, name=None, version=None, import_chain=None, extra_install_args=None):
"""Install the package with the name and version.
Args:
path (str): Path to install
name (str)[None]: Name of the package/module.
version (str)[None]: Version of the package/module.
Returns:
module (ModuleType)[None]: Module that was imported by the name or import_chain.
"""
# Get a valid name and version
if name is None or version is None:
n, v = self.get_name_version(path)
if name is None:
name = n
if version is None:
version = v
# Get the import path (install destination)
imp_path = self.make_import_path(name, version)
# Try to install the package
install_kwargs = {
'pip': self.pip,
'wait_func': self.wait_func,
'reset_modules': self.reset_modules,
'install_dependencies': self.install_dependencies,
'extra_install_args': extra_install_args,
}
install_lib(path, imp_path, **install_kwargs)
# Try to import the installed module
return self.import_path(imp_path, name, version, import_chain)
def import_module(self, name, version=None, import_chain=None):
"""Import the given module or package."""
# Check if valid path
orig_name = name
orig_version = version
name, version, import_name, path = self.find_module(orig_name, version=version)
if path is None:
# Check if given was full path
version = orig_version
if os.path.exists(orig_name):
path = orig_name
name, v = self.get_name_version(path)
if version is None:
version = v
else:
# Invalid path/name given!
self.error(ModuleNotFoundError(orig_name))
return
# Set version
if not version:
version = '0.0.0'
# Check if install_dir
if self.install_dir is None:
self.init()
# Check if import name is available
imp_path = self.make_import_path(name, version)
module = self.import_path(imp_path, name, version, import_chain)
if module is not None:
return module
# Try to install and import
try:
return self.install(path, name, version, import_chain)
except Exception as err:
self.error(err)
def import_path(self, imp_path, name=None, version=None, import_chain=None,
reset_modules=None, clean_modules=None, contained_modules=None):
"""Import the given path.
Args:
imp_path (str): Path to import (path to file/folder).
name (str)[None]: Name of the package to import.
version (str)[None]: Version of the package to import. This makes the import name "name_version" if you
want to use the normal python import after this.
import_chain (str)[None]: Import chain ("custom.run_custom" to just import the function).
reset_modules (bool)[None]: If True reset sys.modules back to the original sys.modules.
clean_modules (bool)[None]: If True reset sys.modules before the context block is run.
contained_modules (dict)[None]: If given and reset_modules save all imported modules to this dictionary.
Returns:
module (types.ModuleType/function/object): Object that was imported.
"""
if reset_modules is None:
reset_modules = self.reset_modules
if clean_modules is None:
clean_modules = self.clean_modules
if contained_modules is None:
contained_modules = self.contained_modules
# Get name and version
if name is None or version is None:
n, v = self.get_name_version(imp_path)
if name is None:
name = n
if version is None:
version = v
# Check if import_chain exists
if import_chain is None:
import_chain = name
if import_chain in self.modules:
modules = self.modules[import_chain]
if version in modules:
return modules[version]
# Import the path
try:
module = import_module(imp_path, import_chain, reset_modules=reset_modules, clean_modules=clean_modules,
contained_modules=contained_modules)
except (ImportError, Exception) as err:
module = None
self.error(err)
if module is not None:
try:
# Save in sys.modules with version
import_name = self.make_import_name(import_chain, version)
try:
module.__import_name__ = import_name
except (AttributeError, Exception):
pass
if not self.reset_modules:
self.rename_module(name, import_name)
else:
self.add_module(import_name, module)
# Save module to my modules
try:
self.modules[import_chain][version] = module
except (KeyError, Exception):
self.modules[import_chain] = {}
self.modules[import_chain][version] = module
# Save the import version
try:
module.__import_version__ = version
except (AttributeError, Exception):
pass
except (ValueError, TypeError, ImportError, Exception) as err:
self.error(err)
return module
def cleanup(self):
"""Properly close the tempfile directory."""
try:
self.remove_path(self.install_dir, delete_path=True)
except:
pass
return self
close = cleanup
# ===== Make the module callable =====
# https://stackoverflow.com/a/48100440/1965288 # https://stackoverflow.com/questions/1060796/callable-modules
MY_MODULE = sys.modules[__name__]
class LibImportModule(MY_MODULE.__class__):
MAIN_VERSION_IMPORTER = None
def __call__(self, imp_path, name=None, version=None, import_chain=None,
reset_modules=None, clean_modules=None, contained_modules=None):
"""Import the given path.
Args:
imp_path (str): Path to import (path to file/folder).
name (str)[None]: Name of the package to import.
version (str)[None]: Version of the package to import. This makes the import name "name_version" if you
want to use the normal python import after this.
import_chain (str)[None]: Import chain ("custom.run_custom" to just import the function).
reset_modules (bool)[None]: If True reset sys.modules back to the original sys.modules.
clean_modules (bool)[None]: If True reset sys.modules before the context block is run.
contained_modules (dict)[None]: If given and reset_modules save all imported modules | |
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2019 Apple Inc. All Rights Reserved.
#
'''Capsule in PyTorch
TBD
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from .bilinear_sparse_routing import BilinearSparseRouting, BilinearRouting, DynamicBilinearRouting
from .linformer import LinformerProjectionKernel, BilinearProjectionWithEmbeddings
#### Simple Backbone ####
class simple_backbone(nn.Module):
def __init__(self, cl_input_channels,cl_num_filters,cl_filter_size,
cl_stride,cl_padding):
super(simple_backbone, self).__init__()
self.pre_caps = nn.Sequential(
nn.Conv2d(in_channels=cl_input_channels,
out_channels=cl_num_filters,
kernel_size=cl_filter_size,
stride=cl_stride,
padding=cl_padding),
nn.ReLU(),
)
def forward(self, x):
out = self.pre_caps(x) # x is an image
return out
#### ResNet Backbone ####
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class resnet_backbone_cifar(nn.Module):
def __init__(self, cl_input_channels, cl_num_filters,
cl_stride):
super(resnet_backbone_cifar, self).__init__()
self.in_planes = 64
def _make_layer(block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
self.pre_caps = nn.Sequential(
nn.Conv2d(in_channels=cl_input_channels,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
_make_layer(block=BasicBlock, planes=64, num_blocks=3, stride=1), # num_blocks=2 or 3
_make_layer(block=BasicBlock, planes=cl_num_filters, num_blocks=4, stride=cl_stride), # num_blocks=2 or 4
)
def forward(self, x):
out = self.pre_caps(x) # x is an image
return out
#Imagenet backbone
class resnet_backbone_imagenet(nn.Module):
def __init__(self, cl_input_channels, cl_num_filters,
cl_stride):
super(resnet_backbone_imagenet, self).__init__()
self.in_planes = 64
def _make_layer(block, planes, num_blocks, stride):
# strides = [stride] + [1]*(num_blocks-1)
strides = [stride]*3 + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
self.pre_caps = nn.Sequential(
nn.Conv2d(in_channels=cl_input_channels,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
_make_layer(block=BasicBlock, planes=64, num_blocks=3, stride=1), # num_blocks=2 or 3
# _make_layer(block=BasicBlock, planes=128, num_blocks=4, stride=cl_stride), # num_blocks=2 or 4
_make_layer(block=BasicBlock, planes=cl_num_filters, num_blocks=4, stride=cl_stride), # num_blocks=2 or 4
# _make_layer(block=BasicBlock, planes=512, num_blocks=2, stride=cl_stride), # num_blocks=2 or 4
)
def forward(self, x):
out = self.pre_caps(x) # x is an image
# print("Resnet backbone shape: ", out.shape)
return out
###
# Explained einsum
'''
https://stackoverflow.com/questions/26089893/understanding-numpys-einsum
torch.einsum('i,ij->i', A, B)
1. A has one axis; we've labelled it i. And B has two axes;
we've labelled axis 0 as i and axis 1 as j.
2. By repeating the label i in both input arrays, we are telling
einsum that these two axes should be multiplied together.
In other words, we're multiplying array A with each column of array B,
just like A[:, np.newaxis] * B does.
3. Notice that j does not appear as a label in our desired output;
we've just used i (we want to end up with a 1D array).
By omitting the label, we're telling einsum to sum along this axis.
In other words, we're summing the rows of the products, just like .sum(axis=1) does.
'''
#### Capsule Layer ####
class CapsuleFC(nn.Module):
r"""Applies as a capsule fully-connected layer.
TBD
"""
'''
Same as CapsuleConv
except that kernal size=1 everywhere.
'''
def __init__(self, in_n_capsules, in_d_capsules, out_n_capsules, out_d_capsules, matrix_pose, dp):
super(CapsuleFC, self).__init__()
self.in_n_capsules = in_n_capsules
self.in_d_capsules = in_d_capsules
self.out_n_capsules = out_n_capsules
self.out_d_capsules = out_d_capsules
self.matrix_pose = matrix_pose
# Matrix form of Hilton
if matrix_pose:
self.sqrt_d = int(np.sqrt(self.in_d_capsules))
self.weight_init_const = np.sqrt(out_n_capsules/(self.sqrt_d*in_n_capsules))
self.w = nn.Parameter(self.weight_init_const* \
torch.randn(in_n_capsules, self.sqrt_d, self.sqrt_d, out_n_capsules))
# Vector form of Hilton
else:
self.weight_init_const = np.sqrt(out_n_capsules/(in_d_capsules*in_n_capsules))
self.w = nn.Parameter(self.weight_init_const* \
torch.randn(in_n_capsules, in_d_capsules, out_n_capsules, out_d_capsules))
self.dropout_rate = dp
self.nonlinear_act = nn.LayerNorm(out_d_capsules)
self.drop = nn.Dropout(self.dropout_rate)
self.scale = 1. / (out_d_capsules ** 0.5)
def extra_repr(self):
return 'in_n_capsules={}, in_d_capsules={}, out_n_capsules={}, out_d_capsules={}, matrix_pose={}, \
weight_init_const={}, dropout_rate={}'.format(
self.in_n_capsules, self.in_d_capsules, self.out_n_capsules, self.out_d_capsules, self.matrix_pose,
self.weight_init_const, self.dropout_rate
)
def forward(self, input, num_iter, next_capsule_value=None):
# b: batch size
# n: num of capsules in current layer
# a: dim of capsules in current layer
# m: num of capsules in next layer
# d: dim of capsules in next layer
if len(input.shape) == 5:
input = input.permute(0, 4, 1, 2, 3)
input = input.contiguous().view(input.shape[0], input.shape[1], -1)
input = input.permute(0,2,1)
if self.matrix_pose:
w = self.w # nxdm
_input = input.view(input.shape[0], input.shape[1], self.sqrt_d, self.sqrt_d) # bnax
else:
w = self.w
if next_capsule_value is None:
# next_capsule_vale=None at 1st Iteration
# query key == r_{i,j} (routing probabilities)
query_key = torch.zeros(self.in_n_capsules, self.out_n_capsules).type_as(input)
query_key = F.softmax(query_key, dim=1)
if self.matrix_pose:
# Einsum: computing multilinear expressions (i.e. sums of products) using the Einstein summation convention.
next_capsule_value = torch.einsum('nm, bnax, nxdm->bmad', query_key, _input, w)
else:
next_capsule_value = torch.einsum('nm, bna, namd->bmd', query_key, input, w)
else:
if self.matrix_pose:
next_capsule_value = next_capsule_value.view(next_capsule_value.shape[0],
next_capsule_value.shape[1], self.sqrt_d, self.sqrt_d)
# _query_key == agreement vector ( a_{i,j})
_query_key = torch.einsum('bnax, nxdm, bmad->bnm', _input, w, next_capsule_value)
else:
_query_key = torch.einsum('bna, namd, bmd->bnm', input, w, next_capsule_value)
# New routing probabilities
_query_key.mul_(self.scale)
query_key = F.softmax(_query_key, dim=2)
query_key = query_key / (torch.sum(query_key, dim=2, keepdim=True) + 1e-10)
if self.matrix_pose:
# Use new routing values, to update state of parent capsule
next_capsule_value = torch.einsum('bnm, bnax, nxdm->bmad', query_key, _input,
w)
else:
next_capsule_value = torch.einsum('bnm, bna, namd->bmd', query_key, input,
w)
# Apply dropout
next_capsule_value = self.drop(next_capsule_value)
if not next_capsule_value.shape[-1] == 1:
if self.matrix_pose:
next_capsule_value = next_capsule_value.view(next_capsule_value.shape[0],
next_capsule_value.shape[1], self.out_d_capsules)
# Apply layer Norm
next_capsule_value = self.nonlinear_act(next_capsule_value)
else:
next_capsule_value = self.nonlinear_act(next_capsule_value)
return next_capsule_value
#
class CapsuleCONV(nn.Module):
r"""Applies as a capsule convolutional layer.
TBD
"""
def __init__(self, in_n_capsules, in_d_capsules, out_n_capsules, out_d_capsules,
kernel_size, stride, matrix_pose, dp, coordinate_add=False):
super(CapsuleCONV, self).__init__()
self.in_n_capsules = in_n_capsules
self.in_d_capsules = in_d_capsules
self.out_n_capsules = out_n_capsules
self.out_d_capsules = out_d_capsules
self.kernel_size = kernel_size
self.stride = stride
self.matrix_pose = matrix_pose
self.coordinate_add = coordinate_add
if matrix_pose:
self.sqrt_d = int(np.sqrt(self.in_d_capsules))
self.weight_init_const = np.sqrt(out_n_capsules/(self.sqrt_d*in_n_capsules*kernel_size*kernel_size))
self.w = nn.Parameter(self.weight_init_const*torch.randn(kernel_size, kernel_size,
in_n_capsules, self.sqrt_d, self.sqrt_d, out_n_capsules))
else:
self.weight_init_const = np.sqrt(out_n_capsules/(in_d_capsules*in_n_capsules*kernel_size*kernel_size))
self.w = nn.Parameter(self.weight_init_const*torch.randn(kernel_size, kernel_size,
in_n_capsules, in_d_capsules, out_n_capsules,
out_d_capsules))
self.nonlinear_act = nn.LayerNorm(out_d_capsules)
self.dropout_rate = dp
self.drop = nn.Dropout(self.dropout_rate)
self.scale = 1. / (out_d_capsules ** 0.5)
def extra_repr(self):
return 'in_n_capsules={}, in_d_capsules={}, out_n_capsules={}, out_d_capsules={}, \
kernel_size={}, stride={}, coordinate_add={}, matrix_pose={}, weight_init_const={}, \
dropout_rate={}'.format(
self.in_n_capsules, self.in_d_capsules, self.out_n_capsules, self.out_d_capsules,
self.kernel_size, self.stride, self.coordinate_add, self.matrix_pose, self.weight_init_const,
self.dropout_rate
)
def input_expansion(self, input):
# input has size [batch x num_of_capsule x height x width x capsule_dimension]
# unfold(dimension, size, step) โ Tensor: Unfold Extracts sliding local blocks along given dim
# extracts kernel patches over complete height and width
unfolded_input = input.unfold(2,size=self.kernel_size,step=self.stride).unfold(3,size=self.kernel_size,step=self.stride)
unfolded_input = unfolded_input.permute([0,1,5,6,2,3,4])
# output has size [batch x num_of_capsule x kernel_size x kernel_size x h_out x w_out x capsule_dimension]
return unfolded_input
def forward(self, input, num_iter, next_capsule_value=None):
# k,l: kernel size
# h,w: output width and length
# b: batch size
# n: num of capsules in current layer
# a: dim of capsules in current layer
# m: num of capsules in next layer
# d: dim of capsules in next layer
# This converts (b,32,14,14,16) --> (b,32,3,3,7,7,16) (3X3 patches, 7 in number along both height and width)
inputs = self.input_expansion(input)
# print("Expansion: ",input.shape, inputs.shape)
if self.matrix_pose:
# W is pose of capsules of layer L
# Input is capsule of layer L (p_{L})
w = self.w # klnxdm
# Converts (b,32,3,3,7,7,16) --> (b,32,3,3,7,7,4,4)
_inputs = inputs.view(inputs.shape[0], inputs.shape[1], inputs.shape[2], inputs.shape[3],\
inputs.shape[4], inputs.shape[5], self.sqrt_d, self.sqrt_d) # bnklmhax
# print(_inputs.shape)
else:
w = self.w
if next_capsule_value is None:
# Routing probabilities in 1st iteration
query_key = torch.zeros(self.in_n_capsules, self.kernel_size, self.kernel_size,
self.out_n_capsules).type_as(inputs) # nklm
query_key = F.softmax(query_key, dim=3) # softmax on output number of capsules
# print("Query :",query_key.shape)
# print("w :",w.shape)
# print("input :",_inputs.shape)
if self.matrix_pose:
# a,x are sqrt_d if matrix pose and not vector pose
# This performs convolution as well and attention both
# next capsule shape is (b,32,7,7,16) just like original input
'''
for all b:
for all m:
for all h:
for all w:
for all a:
for all d:
for all n: (summing over | |
"""
Tasks for managing tasks, targets and scanners in GVM.
"""
import configparser
import logging
import uuid
from fps.client import GMPClient
from fps.utils import (export_results, get_hosts, get_key_by_value,
reset_discovery_attribute, update_discovered_hosts,
update_host_attribute)
config = configparser.ConfigParser()
config.read('config.ini')
def create_comment(key, values):
"""Creates comments to be injected in `get_` functions filters."""
return '' if values is None else ' '.join([f'comment="{key}:{value}"' for value in values])
def scan_or_discovery(task_name):
"""Determines if a task was created for discovery or scan."""
return 'discovery' if task_name == config['DISCOVERY']['task_name'] else 'scan'
def create_tasks(
client: GMPClient, number_of_tasks, config_name,
target_name, scanner_name, preferences, state='initialised'):
"""
Creates a set of tasks.
:param client: GMP client.
:param number_of_tasks: number of tasks to create.
:param config_name: tasks' configuration.
:param target_name: tasks' target.
:param scanner_name: tasks' scanner.
:param preferences: tasks' preferences.
:param state: tasks' state.
:return: list of names of created tasks.
"""
name = uuid.uuid4().hex
required_kwargs = {}
required_kwargs['config_name'] = config_name
required_kwargs['target_name'] = target_name
required_kwargs['scanner_name'] = scanner_name
required_kwargs['preferences'] = preferences
tasks = []
for idx in range(number_of_tasks):
result = client.create_task(name=f'{name}_{idx}', state=state, **required_kwargs)
tasks.append(f'{name}_{idx}')
logging.info('Create task %s: %s', f'{name}_{idx}', result.get('status_text'))
return tasks
def create_targets(
client: GMPClient, num_items_per_target, hosts, port_list_name, state='unassigned'):
"""
Creates a set of targets.
:param client: GMP client.
:param num_items_per_target: number of hosts per target.
:param hosts: hosts in the targets.
:param port_list_name: targets' port list.
:param state: targets' state.
:return: list of names of created targets.
"""
name = uuid.uuid4().hex
targets = []
for idx, sub_hosts in enumerate(
[hosts[i:i + num_items_per_target] for i in range(0, len(hosts), num_items_per_target)]):
result = client.create_target(
name=f'{name}_{idx}', hosts=sub_hosts, port_list_name=port_list_name,
state=state)
targets.append(f'{name}_{idx}')
logging.info(
'Create target %s (%s hosts): %s',
f'{name}_{idx}', len(sub_hosts), result.get('status_text'))
return targets
def delete_tasks(
client: GMPClient, task_name=None, states=['obsolete'], ultimate=False):
"""
Deletes a set of tasks.
:param task_name: name of the task to delete.
:param states: the tasks in `states` will be deleted.
:param ultimate: move to trash or delete permanently.
:return: list of deleted tasks.
"""
state_filter = create_comment('state', states)
_filter = f'rows=-1 {state_filter}' if task_name is None else f'rows=-1 name={task_name} and {state_filter}'
deleted_tasks = []
for task in client.get_tasks(filter=_filter):
_task_name = task.xpath('name/text()')[0]
result = client.delete_task(name=_task_name, ultimate=ultimate)
logging.info('Deleted task %s: %s', _task_name, result.get('status_text'))
deleted_tasks.append(_task_name)
return deleted_tasks
def delete_targets(client: GMPClient, target_name=None, states=['scanned'], ultimate=False):
"""
Deletes a set of targets.
:param target_name: name of the target to delete.
:param states: the targets in `states` will be deleted.
:param ultimate: move to trash or delete permanently.
"""
state_filter = create_comment('state', states)
_filter = f'rows=-1 {state_filter}' if target_name is None else f'rows=-1 name={target_name} and {state_filter}'
deleted_targets = []
for target in client.get_targets(filter=_filter):
_target_name = target.xpath('name/text()')[0]
if target.xpath('in_use/text()')[0] == "0":
result = client.delete_target(name=_target_name, ultimate=ultimate)
logging.info('Deleted target %s: %s', _target_name, result.get('status_text'))
else:
logging.info('Target %s is in use', _target_name)
deleted_targets.append(_target_name)
return deleted_targets
def create_scanners(
client: GMPClient, num_scanners, scanner_name_prefix,
scanner_host_prefix, scanner_service, credential):
"""
Creates a set of scanners.
:param num_scanners: number of the scanners to create.
:param scanner_name_prefix: name prefix of the scanners to create.
:param scanner_host_prefix: scanner host prefix of the scanners to create
:param scanner_service: scanner service name, this is particular to statfulset pods.
:param credential: credential to be used by the scanner to connect to GVMd.
"""
for i in range(num_scanners):
host = f'{scanner_host_prefix}{i}' if scanner_service == '' \
else f'{scanner_host_prefix}{i}.{scanner_service}'
result = client.create_scanner(
name=f'{scanner_name_prefix}{i}',
host=host,
credential=credential)
logging.info('Create scanner %s: %s', f'{scanner_name_prefix}{i}', result.get('status_text'))
def delete_scanners(
client: GMPClient, num_scanners, scanner_name_prefix, ultimate=True):
"""
Deletes a set of scanners.
:param num_scanners: name of scanners to delete.
:param scanner_name_prefix: used to form the scanner name.
:param ultimate: move to trash or delete permanently.
"""
for i in range(num_scanners):
result = client.delete_scanner(name=f'{scanner_name_prefix}{i}', ultimate=ultimate)
logging.info('Delete scanner %s: %s', f'{scanner_name_prefix}{i}', result.get('status_text'))
def assign_targets(
client: GMPClient, target_name=None, task_name=None,
target_states=['unassigned'], task_states=['initialised'],
next_target_state='assigned', next_task_state='has_target'):
"""
Assign targets to tasks.
:param target_name: name of target to assign.
:param task_name: name of task to accept targets.
:param target_states: targets in `target_states` will be assigned.
:param task_states: tasks in `task_states` can accept targets.
:param next_target_state: Next target state.
:param next_task_state: Next task state.
"""
target_state_filter = create_comment('state', target_states)
task_state_filter = create_comment('state', task_states)
target_filter = f'rows=-1 {target_state_filter}' \
if target_name is None else f'rows=-1 name={target_name} and {target_state_filter}'
task_filter = f'rows=-1 {task_state_filter}' \
if task_name is None else f'rows=-1 name={task_name} and {task_state_filter}'
tasks = client.get_tasks(filter=task_filter)
for target in client.get_targets(filter=target_filter):
target_name = target.xpath('name/text()')[0]
available_task = tasks[0] if len(tasks) > 0 else None
if available_task is None:
break
client.update_task_target(
name=available_task.xpath('name/text()')[0], target_name=target_name)
client.update_task_state(name=available_task.xpath('name/text()')[0], state=next_task_state)
client.update_target_state(name=target_name, state=next_target_state)
tasks.remove(available_task)
logging.info('Target %s assigned to task %s', target_name, available_task.xpath('name/text()')[0])
def assign_tasks(
client: GMPClient, task_name=None, scanner_name=None,
task_states=['has_target'], next_task_state='has_scanner'):
"""
Assign tasks to scanners.
Only scanners with zero active tasks will accept new tasks.
:param task_name: name of task to assign to scanner.
:param scanner_name: scanners with name `scanner_name` will be added to scanners' filter.
:param task_states: tasks in `task_states` will be assigned to scanners.
:param next_task_state: Next task state.
:return: `None` if no scanner is available.
"""
active_tasks_per_scanner_dict = active_tasks_per_scanner(client, scanner_name)
state_filter = create_comment('state', task_states)
_filter = f'rows=-1 {state_filter}' if task_name is None else f'rows=-1 name={task_name} and {state_filter}'
for task in client.get_tasks(filter=_filter):
task_name = task.xpath('name/text()')[0]
scanner_name = get_key_by_value(active_tasks_per_scanner_dict, 0)
if scanner_name is None:
logging.info('Cannot assign the task %s as no scanner is available', task_name)
continue
active_tasks_per_scanner_dict[scanner_name] += 1
logging.info('Task %s will run on the scanner %s', task_name, scanner_name)
client.update_task_scanner(name=task_name, scanner_name=scanner_name)
client.update_task_state(name=task_name, state=next_task_state)
def start_tasks(
client: GMPClient, task_name=None,
states=['has_scanner'], next_task_state='started'):
"""
Starts a set of tasks.
:param task_name: name of the task to start.
:param states: the tasks in `states` will be started.
:param next_task_state: if task is started, assign it `next_task_state`.
"""
state_filter = create_comment('state', states)
_filter = f'rows=-1 {state_filter}' if task_name is None else f'rows=-1 name={task_name} and {state_filter}'
for task in client.get_tasks(filter=_filter):
task_name = task.xpath('name/text()')[0]
result = client.start_task(name=task_name)
logging.info('Starting task %s: %s', task_name, result.get('status_text'))
client.update_task_state(name=task_name, state=next_task_state)
def get_results(
client: GMPClient, task_name=None, task_states=['finished'],
next_task_state='obsolete', next_target_state='scanned'):
"""
Returns results in the last reports generated for each task in a task set.
:param task_name: name of the task from which to get the scanned hosts.
:param task_states: get results from the tasks in state`states`.
:param next_task_state: next state of the task.
:param next_target_state: next state of the target.
:return: result objects.
"""
results = []
state_filter = create_comment('state', task_states)
_filter = f'rows=-1 {state_filter}' if task_name is None else f'rows=-1 name={task_name} and {state_filter}'
for task in client.get_tasks(filter=_filter):
task_name = task.xpath('name/text()')[0]
task_target = task.xpath('target/name/text()')[0]
report_id = task.xpath('last_report/report')[0].get('id')
report = client.get_report(report_id=report_id, details=1, ignore_pagination=1)
task_results = report[0].xpath('report/results/result')
logging.info(f'{len(task_results)} results returned by task {task_name}')
results.extend(task_results)
client.update_task_state(name=task_name, state=next_task_state)
client.update_target_state(name=task_target, state=next_target_state)
return results
def active_tasks_per_scanner(
client: GMPClient, scanner_name=None,
scanner_name_prefix=config['SCAN']['scanner_name_prefix'],
num_scanners=config.getint('SCAN', 'num_scanners')):
"""
Returns a dictionary containing number of active tasks per scanner.
`active` tasks are those in status `Requested` or `Running` or `Stop Requested`.
:param scanner_name: if provided only consider scanner with `scanner_name`.
:param scanner_name_prefix: check scanners with passed prefix.
:param num_scanners: number of scanners to check.
:return: dictionary.
"""
# It has turned out that get_scanners(details=True) is very time consuming.
# So falling back into doing it the classical way.
tasks_per_scanner_dict = {}
# Initialise tasks_per_scanner_dict
# Assuming scanners with `scanner_name` or `{scanner_name_prefix}{i}` exist.
if scanner_name is None:
for i in range(num_scanners):
tasks_per_scanner_dict[f'{scanner_name_prefix}{i}'] = 0
else:
tasks_per_scanner_dict[scanner_name] = 0
for task in client.get_tasks(filter='status="Requested" status="Running" status="Stop Requested"'):
if task.xpath('scanner/name/text()')[0] in tasks_per_scanner_dict:
tasks_per_scanner_dict[task.xpath('scanner/name/text()')[0]] += 1
return tasks_per_scanner_dict
def check_task_completion(
client: GMPClient, task_name=None, states=['started'],
task_finished_state='finished', task_failed_state='failed', task_stopped_state='stopped'):
"""
Checks tasks completion status.
:param task_name: name of the task to check its status.
:param states: check tasks with a state in states.
:param task_finished_state: state of finished tasks.
:param task_failed_state: state of failed tasks.
:param task_stopped_state: state of stopped tasks.
"""
state_filter = create_comment('state', states)
_filter = f'rows=-1 {state_filter}' if task_name is None else f'rows=-1 name={task_name} and {state_filter}'
for task in client.get_tasks(filter=_filter):
task_name = task.xpath('name/text()')[0]
task_status = task.xpath('status/text()')[0]
task_progress = task.xpath('progress/text()')[0]
last_report_severity = error = state = None
if task_status == 'Done':
last_report_severity = task.xpath('last_report/report/severity/text()')[0]
last_report_id = task.xpath('last_report/report')[0].get('id')
if float(last_report_severity) >= 0.0 or float(last_report_severity) in [-1, -99.0]:
state = task_finished_state
else:
state = task_failed_state
last_report = client.get_report(report_id=last_report_id, details=True)
error = last_report[0].xpath('report/errors/error/description/text()')[0]
elif task_status == 'Stopped':
state = task_stopped_state
if state is not None:
client.update_task_state(name=task_name, state=state)
status_msg = (
f'Task: {task_name} | Status: {task_status} | | |
{p3} )
) ; (
{router_to_router_pol}
))
+
((
( {p4} )
+
( {p6} )
+
( {p7} )
) ; (
{l2}
))
+
( {p5} )
+
( {p8} )
""".format(
l2=l2,
p1=p1,
p2=p2,
p3=p3,
p4=p4,
p5=p5,
p6=p6,
p7=p7,
p8=p8,
router_to_router_pol=spec2.router_to_router_pol))
spec3 = Spec3(
'\n+ '.join(preface),
'\n\n+\n\n'.join(topo),
'\n\n+\n\n'.join(pol),
spec2.router_to_router_topo,
spec2.router_to_router_pol,
spec2.hosts)
if self.args.export_hsa:
out = self.args.export_hsa
out.write('### SPEC 3 ###')
out.write('--- NETKAT SWITCH FUNCTION ---\n')
out.write('%s\n+\n%s\n' % (spec3.local_pols, spec3.router_to_router_pol))
out.write('\n')
out.write('--- TOPOLOGY ---\n')
for lan in self.lans:
for h in lan.hosts:
for g in [lan.g for lan in self.lans] + [self.routers]:
export_topology_of_networkx(g, out)
out.write('\n')
out.write('--- HOSTS ---\n')
for lan in self.lans:
for h in lan.hosts:
out.write('HOST %d, %s\n' % (h.mac, h.ip))
out.write('\n')
out.write('--- ACL (BLACKLIST IP PAIRS) ---\n')
for (src, dst) in self.acl_pairs:
out.write('(%s, %s)\n' % (src.ip, dst.ip))
out.write('\n')
return spec3
def export_cocoon(self):
out = self.args.export_cocoon
# Make a new graph that connects the zones to the router fabric.
g = self.routers
for lan in self.lans:
g = copy_compose(lan.g, g)
for n in lan.g:
for neighbor in lan.g.node[n]['ports']:
g.node[n]['ports'][neighbor] = lan.g.node[n]['ports'][neighbor]
# FUNCTION cHost
out.write('function cHost(hid_t hid): bool =\n')
out.write(' or\n'.join([" hid == 64'd%d" % h.mac for lan in self.lans for h in lan.hosts]))
out.write('\n')
# FUNCTION cVlan
out.write('function cVlan(vid_t vid): bool =\n')
out.write(' or\n'.join([" vid == 12'd%d" % lan.vlan for lan in self.lans]))
out.write('\n')
# FUNCTION vidRouterMAC
vid_map = ["vid == 12'd%d: 48'h%x;" % (lan.vlan, lan.router)
for lan in self.lans]
vid_map = '\n '.join(vid_map)
out.write('''
function vidRouterMAC(vid_t vid): MAC =
case {{
{vid_map}
default: 48'h0;
}}
'''.format(vid_map = vid_map))
# FUNCTION ip2vid
ip_map = ["ip == 32'h%x: 12'd%d;" % (int_of_ip(h.ip), h.vlan) for lan in self.lans for h in lan.hosts]
ip_map = '\n '.join(ip_map)
out.write('''
function ip2vlan(IP4 ip): vid_t =
case {{
{ip_map}
default: 12'd0;
}}
'''.format(ip_map = ip_map))
# FUNCTION hid2ip
m = ["hid == 64'd%d: 32'h%x;" % (h.mac, int_of_ip(h.ip)) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function hid2ip(hid_t hid): IP4 =
case {{
{m}
default: 32'd0;
}}
'''.format(m = m))
# FUNCTION ip2hid
m = ["ip == 32'h%x: 64'd%d;" % (int_of_ip(h.ip), h.mac) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function ip2hid(IP4 ip): hid_t =
case {{
{m}
default: 64'd0;
}}
'''.format(m = m))
# FUNCTION acl
m = ["(ip.src == 32'h%x and ip.dst == 32'h%x)" % (
int_of_ip(src.ip), int_of_ip(dst.ip))
for (src, dst) in self.acl_pairs]
m = ' or \n '.join(m)
out.write('''
function acl(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool =
{m}
'''.format(m = m))
# FUNCTION aclSrc, aclDst (derived)
out.write('''
function aclSrc(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool = acl(srcvlan, dstvlan, ip)
function aclDst(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool = true
''')
# FUNCTION cZone
m = ["zid == 32'd%d" % lan.vlan for lan in self.lans] + ["zid == 32'd0"]
m = ' or \n '.join(m)
out.write('''
function cZone(zid_t zid): bool =
{m}
'''.format(m = m))
# FUNCTION cRouter
m = ["rid == 64'd%d" % lan.router for lan in self.lans]
m = ' or \n '.join(m)
out.write('''
function cRouter(hid_t rid): bool =
{m}
'''.format(m = m))
# FUNCTION portConnected
out.write('function portConnected(pid_t pid): bool = true (* assume all ports are connected *)\n')
# FUNCTION routerPortZone
zone_router_ports = []
for lan in self.lans:
zone_router_ports += ["pid == pid_t{64'd%d, 16'd%d}: 32'd%d;" % (
lan.router, port, lan.vlan)
for port in lan.g.node[lan.router]['ports'].values()]
zone_router_ports = '\n '.join(zone_router_ports)
out.write('''
function routerPortZone(pid_t pid): zid_t =
case {{
{zone_router_ports}
default: 32'd0;
}}
'''.format( zone_router_ports = zone_router_ports ))
# FUNCTION pid2mac
hosts = ["pid == pid_t{64'd%d, 16'd1}: 48'h%x;" % ( h.mac, h.mac)
for lan in self.lans for h in lan.hosts]
hosts = '\n '.join(hosts)
routers = ["pid == pid_t{64'd%d, 16'd%d}: 48'h%x;" % (lan.router, port, lan.router)
for lan in self.lans for port in g.node[lan.router]['ports'].values()]
routers = '\n '.join(routers)
out.write('''
function pid2mac(pid_t pid): MAC =
case {{
{hosts}
{routers}
default: 48'h0;
}}
'''.format(hosts=hosts, routers=routers))
# FUNCTION mac2pid
# Note the following assumptions on gateway routers:
# - all gateway routers have exactly two ports
# - port 1 connects to the gateway's zone
# - the other port is > 1 and connects to the router-to-router fabric
# (i.e. zone 0)
# NB: turns out this only needs to be defined for gateway router ports
# on the interior router fabric.
routers = []
for lan in self.lans:
assert(len(self.routers.node[lan.router]['ports']) == 1)
routers.append("mac == 48'h%x: pid_t{64'd%d, 16'd%d};" % (
lan.router, lan.router, self.routers.node[lan.router]['ports'].values()[0]))
routers = '\n '.join(routers)
out.write('''
function mac2pid(MAC mac): pid_t =
case {{
{routers}
default: pid_t{{64'd0, 16'd0}};
}}
'''.format(hosts=hosts, routers=routers))
# FUNCTION l3NextHop
m = []
for src_lan in self.lans:
for dst_lan in self.lans:
if src_lan == dst_lan:
continue
m.append("rid == 64'd%d and vid == 12'd%d: nexthop_t{48'h%x, 16'd%d};" % (
src_lan.router,
dst_lan.vlan,
dst_lan.router,
self.routers.node[src_lan.router]['ports'].values()[0] ))
m = '\n '.join(m)
out.write('''
function l3NextHop(hid_t rid, vid_t vid): nexthop_t =
case {{
{m}
default: nexthop_t{{48'h0, 16'd0}};
}}
'''.format(m=m))
# FUNCTION cSwitch
local_switches = ["sid == 64'd%d" % n
for lan in self.lans
for n in lan.g
if lan.g.node[n]['type'] == 'switch']
router_switches = ["sid == 64'd%d" % n
for n in self.routers
if self.routers.node[n]['type'] == 'switch']
routers = ["sid == 64'd%d" % lan.router for lan in self.lans]
switches = ' or\n '.join(local_switches + router_switches + routers)
out.write('''
function cSwitch(hid_t sid): bool =
{switches}
'''.format( switches=switches ))
# FUNCTION link
local_links = ["pid == pid_t{64'd%d, 16'd%d}: pid_t{64'd%d, 16'd%d};" % (
n, lan.g.node[n]['ports'][neighbor], neighbor, lan.g.node[neighbor]['ports'][n])
for lan in self.lans for n in lan.g for neighbor in lan.g.node[n]['ports']]
local_links = '\n '.join(local_links)
router_links = ["pid == pid_t{64'd%d, 16'd%d}: pid_t{64'd%d, 16'd%d};" % (
n, self.routers.node[n]['ports'][neighbor], neighbor, self.routers.node[neighbor]['ports'][n])
for n in self.routers for neighbor in self.routers.node[n]['ports']]
router_links = '\n '.join(router_links)
out.write('''
function link(pid_t pid): pid_t =
case {{
{local_links}
{router_links}
default: pid_t{{64'd0, 16'd0}};
}}
'''.format( local_links = local_links
, router_links = router_links ))
# FUNCTION l2distance
path_lengths = networkx.shortest_path_length(g)
max_shortest_path = max([path_lengths[src][dst] for src in g for dst in g])
max_shortest_zone_path = max([path_lengths[lan.router][h.mac] for lan in self.lans for h in lan.hosts])
assert(max_shortest_path > max_shortest_zone_path)
distances, out_ports = cocoon_of_networkx(g)
# l2distance(sw, vid, dst) cases:
# From the whiteboard:
#
# if sw in vid zone and dst in vid zone:
# shortest path routing to dst.
# if sw is not core switch and not in vid zone and dst not in vid zone:
# route to gateway.
# if sw in core and dst not special case:
# route to gateway (vid)
# if sw in core and dst is special case:
# route to dst
# if sw in core and dst is gateway router:
# route to gateway (dst)
#
# Reworked:
#
# sw in vid zone and dst is host in vid zone
# sw in vid zone and dst is host not in vid zone
# sw in vid zone (not core) and dst is router: forward to router
# sw in core and dst is host in vid zone (i.e. not special)
# sw in core and dst is host not in vid zone (i.e. special)
# sw in core and dst is router: forward to router
# sw not in vid zone and dst is host in vid zone
# sw not in vid zone and dst is host not in vid zone: drop
# sw not in vid zone (but also not core) and dst is router
#
# Idea: use fall-through nature of 'case' statements to prioritize
# as follows.
#
# 1. Do destination L2 routing for all special hosts (i.e. not in
# their home zone) for in-VLAN traffic.
# 2. For zone switches, send non-zone traffic to local gateway.
# 3. For zone switches (and routers), send local (in-zone) traffic
# to local host.
# 4. For zone switches, send router-destined traffic to local gateway.
# 5. For core switches (and routers), send vlan traffic to vlan
# gateway.
# 6. For core switches (and routers), send router-destined traffic
# to router.
use_optimized = True
if use_optimized:
# 1. Do destination | |
<reponame>networmix/NetSim
# pylint: disable=protected-access,invalid-name
import pprint
from netsim.simulator.core.simcore import SimTime
from netsim.simulator.netsim_base import (
PacketInterfaceTx,
PacketQueue,
PacketSink,
PacketSource,
PacketSize,
)
from netsim.simulator.netsim_simulator import NetSim
def test_packet_source_1():
sim = NetSim()
def arrival_gen() -> SimTime:
yield 0
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1000
source = PacketSource(sim.ctx, arrival_gen(), size_gen(), initial_delay=0)
sim.run(until_time=1)
assert sim.ctx.now == 1
assert sim.event_counter == 3 # initial_delay, arrival, stop_sim
pprint.pprint(source.stat.cur_stat_frame.todict())
assert source.stat.cur_stat_frame.todict() == {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"duration": 1,
"last_state_change_timestamp": 0,
"timestamp": 1,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 1000,
"total_sent_pkts": 1,
}
def test_packet_source_2():
sim = NetSim()
def arrival_gen() -> SimTime:
yield 0
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1000
source = PacketSource(sim.ctx, arrival_gen(), size_gen(), initial_delay=0)
sim.run(until_time=3)
assert sim.ctx.now == 3
assert sim.event_counter == 5 # initial_delay, arrival, arrival, arrival, stop_sim
pprint.pprint(source.stat.todict())
assert source.stat.todict() == {
"cur_interval_duration": 3,
"cur_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"duration": 3,
"last_state_change_timestamp": 2,
"timestamp": 3,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
},
"cur_timestamp": 3,
"last_state_change_timestamp": 2,
"prev_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 12000.0,
"avg_send_rate_pps": 1.5,
"duration": 2,
"last_state_change_timestamp": 2,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
},
"prev_timestamp": 2,
"start_interval_timestamp": 0,
}
def test_packet_sink_1():
sim = NetSim()
def arrival_gen() -> SimTime:
yield 0
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
sink = PacketSink(sim.ctx)
source.subscribe(sink)
sim.run(until_time=10)
assert sim.ctx.now == 10
assert sim.event_counter == 12
pprint.pprint(sink.stat.cur_stat_frame.todict())
assert source.stat.cur_stat_frame.todict() == {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 8.0,
"avg_send_rate_pps": 1.0,
"duration": 10,
"last_state_change_timestamp": 9,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 10,
"total_sent_pkts": 10,
}
assert sink.stat.cur_stat_frame.todict() == {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 8.0,
"avg_receive_rate_pps": 1.0,
"avg_send_rate_bps": 0.0,
"avg_send_rate_pps": 0.0,
"duration": 10,
"last_state_change_timestamp": 9,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 10,
"total_received_pkts": 10,
"total_sent_bytes": 0,
"total_sent_pkts": 0,
}
def test_packet_queue_1():
sim = NetSim()
def arrival_gen() -> SimTime:
yield 0
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1000
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
queue = PacketQueue(sim.ctx)
source.subscribe(queue)
sim.run(until_time=10)
assert sim.ctx.now == 10
assert sim.event_counter == 32
pprint.pprint(queue.stat.todict())
assert queue.stat.todict() == {
"cur_interval_duration": 10,
"cur_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 1.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 1.0,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 8000.0,
"avg_receive_rate_pps": 1.0,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 10,
"integral_queue_sum": 0,
"integral_wait_time_sum": 0,
"last_state_change_timestamp": 9,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 10000,
"total_get_pkts": 10,
"total_put_bytes": 10000,
"total_put_pkts": 10,
"total_received_bytes": 10000,
"total_received_pkts": 10,
"total_sent_bytes": 10000,
"total_sent_pkts": 10,
},
"cur_timestamp": 10,
"last_state_change_timestamp": 9,
"prev_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 1.1111111111111112,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 1.1111111111111112,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 8888.888888888889,
"avg_receive_rate_pps": 1.1111111111111112,
"avg_send_rate_bps": 8888.888888888889,
"avg_send_rate_pps": 1.1111111111111112,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 9,
"integral_queue_sum": 0,
"integral_wait_time_sum": 0,
"last_state_change_timestamp": 9,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 9,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 10000,
"total_get_pkts": 10,
"total_put_bytes": 10000,
"total_put_pkts": 10,
"total_received_bytes": 10000,
"total_received_pkts": 10,
"total_sent_bytes": 10000,
"total_sent_pkts": 10,
},
"prev_timestamp": 9,
"start_interval_timestamp": 0,
}
def test_packet_queue_admission_taildrop_2():
sim = NetSim()
ADMISSION_PARAMS = {
"admission_policy": "taildrop",
}
MAX_QUEUE_LEN = 10
def arrival_gen() -> SimTime:
yield 0
while True:
for idx in range(0, 10):
yield 1 / 2**idx
def size_gen() -> PacketSize:
while True:
yield 1000
def service_gen() -> SimTime:
while True:
yield 0.25
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
queue = PacketQueue(
sim.ctx,
queue_len_limit=MAX_QUEUE_LEN,
admission_params=ADMISSION_PARAMS,
service_func=service_gen(),
)
source.subscribe(queue)
sim.run(until_time=10)
assert sim.ctx.now == 10
pprint.pprint(queue.stat.todict())
assert queue.stat.todict() == {
"cur_interval_duration": 10.0,
"cur_stat_frame": {
"avg_drop_rate_bps": 4000.0,
"avg_drop_rate_pps": 0.5,
"avg_get_rate_pps": 3.6,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 1.2932477678571428,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 4.6,
"avg_queue_len": 4.4390625,
"avg_receive_rate_bps": 40800.0,
"avg_receive_rate_pps": 5.1,
"avg_send_rate_bps": 28000.0,
"avg_send_rate_pps": 3.5,
"avg_wait_time": 1.0639105902777777,
"cur_queue_len": 10,
"duration": 10.0,
"integral_queue_sum": 44.390625,
"integral_wait_time_sum": 38.30078125,
"last_state_change_timestamp": 9.990234375,
"max_queue_len": 10,
"max_wait_time": 2.2578125,
"timestamp": 10,
"total_dropped_bytes": 5000,
"total_dropped_pkts": 5,
"total_get_bytes": 36000,
"total_get_pkts": 36,
"total_put_bytes": 46000,
"total_put_pkts": 46,
"total_received_bytes": 51000,
"total_received_pkts": 51,
"total_sent_bytes": 35000,
"total_sent_pkts": 35,
},
"cur_timestamp": 10,
"last_state_change_timestamp": 9.990234375,
"prev_stat_frame": {
"avg_drop_rate_bps": 4003.9100684261975,
"avg_drop_rate_pps": 0.5004887585532747,
"avg_get_rate_pps": 3.6035190615835777,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 1.2932477678571428,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 4.604496578690127,
"avg_queue_len": 4.433626588465298,
"avg_receive_rate_bps": 40839.882697947214,
"avg_receive_rate_pps": 5.104985337243402,
"avg_send_rate_bps": 28027.370478983383,
"avg_send_rate_pps": 3.5034213098729228,
"avg_wait_time": 1.0639105902777777,
"cur_queue_len": 10,
"duration": 9.990234375,
"integral_queue_sum": 44.29296875,
"integral_wait_time_sum": 38.30078125,
"last_state_change_timestamp": 9.990234375,
"max_queue_len": 10,
"max_wait_time": 2.2578125,
"timestamp": 9.990234375,
"total_dropped_bytes": 5000,
"total_dropped_pkts": 5,
"total_get_bytes": 36000,
"total_get_pkts": 36,
"total_put_bytes": 46000,
"total_put_pkts": 46,
"total_received_bytes": 51000,
"total_received_pkts": 51,
"total_sent_bytes": 35000,
"total_sent_pkts": 35,
},
"prev_timestamp": 9.990234375,
"start_interval_timestamp": 0,
}
def test_packet_queue_admission_red_1():
sim = NetSim()
ADMISSION_PARAMS = {
"admission_policy": "red",
"wq": 0.5,
"minth": 3,
"maxth": 10,
"maxp": 0.99,
"s": 0.1,
}
MAX_QUEUE_LEN = 10
def arrival_gen() -> SimTime:
yield 0
while True:
for idx in range(0, 10):
yield 1 / 2**idx
def size_gen() -> PacketSize:
while True:
yield 1000
def service_gen() -> SimTime:
while True:
yield 0.25
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
queue = PacketQueue(
sim.ctx,
queue_len_limit=MAX_QUEUE_LEN,
admission_params=ADMISSION_PARAMS,
service_func=service_gen(),
)
source.subscribe(queue)
sim.run(until_time=10)
assert sim.ctx.now == 10
pprint.pprint(queue.stat.todict())
print(queue._queue._queue)
assert queue.stat.todict() == {
"cur_interval_duration": 10.0,
"cur_stat_frame": {
"avg_drop_rate_bps": 7200.0,
"avg_drop_rate_pps": 0.9,
"avg_get_rate_pps": 3.6,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.9225446428571429,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 4.2,
"avg_queue_len": 2.4330078125,
"avg_receive_rate_bps": 40800.0,
"avg_receive_rate_pps": 5.1,
"avg_send_rate_bps": 28000.0,
"avg_send_rate_pps": 3.5,
"avg_wait_time": 0.6610243055555556,
"cur_queue_len": 6,
"duration": 10.0,
"integral_queue_sum": 24.330078125,
"integral_wait_time_sum": 23.796875,
"last_state_change_timestamp": 9.990234375,
"max_queue_len": 7,
"max_wait_time": 1.5078125,
"timestamp": 10,
"total_dropped_bytes": 9000,
"total_dropped_pkts": 9,
"total_get_bytes": 36000,
"total_get_pkts": 36,
"total_put_bytes": 42000,
"total_put_pkts": 42,
"total_received_bytes": 51000,
"total_received_pkts": 51,
"total_sent_bytes": 35000,
"total_sent_pkts": 35,
},
"cur_timestamp": 10,
"last_state_change_timestamp": 9.990234375,
"prev_stat_frame": {
"avg_drop_rate_bps": 7207.038123167155,
"avg_drop_rate_pps": 0.9008797653958944,
"avg_get_rate_pps": 3.6035190615835777,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.9225446428571429,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 4.204105571847507,
"avg_queue_len": 2.429521016617791,
"avg_receive_rate_bps": 40839.882697947214,
"avg_receive_rate_pps": 5.104985337243402,
"avg_send_rate_bps": 28027.370478983383,
"avg_send_rate_pps": 3.5034213098729228,
"avg_wait_time": 0.6610243055555556,
"cur_queue_len": 6,
"duration": 9.990234375,
"integral_queue_sum": 24.271484375,
"integral_wait_time_sum": 23.796875,
"last_state_change_timestamp": 9.990234375,
"max_queue_len": 7,
"max_wait_time": 1.5078125,
"timestamp": 9.990234375,
"total_dropped_bytes": 9000,
"total_dropped_pkts": 9,
"total_get_bytes": 36000,
"total_get_pkts": 36,
"total_put_bytes": 42000,
"total_put_pkts": 42,
"total_received_bytes": 51000,
"total_received_pkts": 51,
"total_sent_bytes": 35000,
"total_sent_pkts": 35,
},
"prev_timestamp": 9.990234375,
"start_interval_timestamp": 0,
}
def test_packet_interface_tx_1():
sim = NetSim()
def arrival_gen() -> SimTime:
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1500
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
interface_tx = PacketInterfaceTx(sim.ctx, bw=64000)
source.subscribe(interface_tx)
sim.run(until_time=10)
assert sim.ctx.now == 10
assert sim.event_counter == 38
pprint.pprint(interface_tx.stat.todict())
assert interface_tx.stat.todict() == {
"cur_interval_duration": 10.0,
"cur_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 0.9,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.1875,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 0.9,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 10800.0,
"avg_receive_rate_pps": 0.9,
"avg_send_rate_bps": 10800.0,
"avg_send_rate_pps": 0.9,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 10.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0,
"last_state_change_timestamp": 9.1875,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 13500,
"total_get_pkts": 9,
"total_put_bytes": 13500,
"total_put_pkts": 9,
"total_received_bytes": 13500,
"total_received_pkts": 9,
"total_sent_bytes": 13500,
"total_sent_pkts": 9,
},
"cur_timestamp": 10,
"last_state_change_timestamp": 9.1875,
"prev_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 0.9795918367346939,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.1875,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 0.9795918367346939,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 11755.102040816326,
"avg_receive_rate_pps": 0.9795918367346939,
"avg_send_rate_bps": 11755.102040816326,
"avg_send_rate_pps": 0.9795918367346939,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 9.1875,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0,
"last_state_change_timestamp": 9.1875,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 9.1875,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 13500,
"total_get_pkts": 9,
"total_put_bytes": 13500,
"total_put_pkts": 9,
"total_received_bytes": 13500,
"total_received_pkts": 9,
"total_sent_bytes": 13500,
"total_sent_pkts": 9,
},
"prev_timestamp": 9.1875,
"start_interval_timestamp": 0,
}
def test_packet_interface_tx_2():
sim = NetSim()
def arrival_gen() -> SimTime:
while True:
yield 1
def size_gen() -> PacketSize:
while True:
yield 1500
source = PacketSource(sim.ctx, arrival_gen(), size_gen())
interface_tx = PacketInterfaceTx(sim.ctx, bw=9600, queue_len_limit=2)
source.subscribe(interface_tx)
sim.run(until_time=10)
assert sim.ctx.now == 10
assert sim.event_counter == 35
pprint.pprint(interface_tx.stat.todict())
assert interface_tx.stat.todict() == {
"cur_interval_duration": 10.0,
"cur_stat_frame": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 0.8,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 2.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 0.9,
"avg_queue_len": 0.8,
"avg_receive_rate_bps": 10800.0,
"avg_receive_rate_pps": 0.9,
"avg_send_rate_bps": 8400.0,
"avg_send_rate_pps": 0.7,
"avg_wait_time": 0.875,
"cur_queue_len": 1,
"duration": 10.0,
"integral_queue_sum": 8.0,
"integral_wait_time_sum": 7.0,
"last_state_change_timestamp": 9.75,
"max_queue_len": 2,
"max_wait_time": 1.75,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 12000,
"total_get_pkts": 8,
"total_put_bytes": 13500,
"total_put_pkts": 9,
"total_received_bytes": 13500,
"total_received_pkts": 9,
"total_sent_bytes": 10500,
"total_sent_pkts": 7,
},
"cur_timestamp": 10,
"last_state_change_timestamp": | |
<filename>auth_api/api/api_users/endpoint_usr_register.py<gh_stars>1-10
# -*- encoding: utf-8 -*-
"""
endpoint_usr_register.py
"""
from auth_api.api import *
log.debug(">>> api_users ... creating api endpoints for USER_REGISTER")
from . import api, document_type
### create namespace
ns = Namespace('register', description='Users : register related endpoints')
### import models
from auth_api._models.models_user import *
model_register_user = NewUser(ns).model
model_user = User_infos(ns)
model_register_user_out = model_user.model_complete_out
model_user_complete_in = model_user.model_complete_in
model_user_access = model_user.model_access
model_user_login_out = model_user.model_login_out
models = {
"model_doc_in" : model_user_complete_in ,
"model_doc_out" : model_register_user_out
}
### CREATE DEFAULT USRs FROM config_default_docs
### import default documents
from auth_api.config_default_docs import default_system_user_list
for dft_usr in default_system_user_list :
log.debug ("dft_usr : \n{}".format(pformat(dft_usr)))
Query_db_insert(
ns,
models,
document_type,
dft_usr,
value_to_check = dft_usr["auth"]["role"],
field_to_check = "auth.role",
user_role = "system"
)
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### ROUTES
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### cf : response codes : https://restfulapi.net/http-status-codes/
@ns.doc(security='apikey')
@ns.route('')
class Register(Resource):
@ns.doc('usr_register')
@ns.doc(security='apikey')
@ns.expect(model_register_user, validate=True)
@anonymous_required
# @ns.marshal_with(model_register_user_out, envelope="new_user", code=201)
def post(self):
"""
Create / register a new user
>
--- needs : an anonymous access_token, an email, a name, a surname and and a password
>>> returns : msg, access_token + refresh_token for not confirmed email, user's data marshalled
"""
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
log.debug ("payload : \n{}".format(pformat(ns.payload)))
### antispam/ghost field to filter out spams and robots
antispam_check = True
if app.config["ANTISPAM_MODE"] == "yes" :
payload_antispam = ns.payload["antispam"]
antispam_check = payload_antispam == app.config["ANTISPAM_VALUE"]
### retrieve infos from form
if app.config["RSA_MODE"] == "yes" :
payload_email_encrypted = ns.payload["email_encrypt"]
log.debug("payload_email_encrypted : \n%s", payload_email_encrypted )
payload_email = email_decoded = RSAdecrypt(payload_email_encrypted)
log.debug("email_decoded : %s", email_decoded )
payload_pwd_encrypted = ns.payload["pwd_encrypt"]
log.debug("payload_pwd_encrypted : \n%s", payload_pwd_encrypted )
payload_pwd = password_decoded = RSAdecrypt(payload_pwd_encrypted)
log.debug("password_decoded : %s", password_decoded )
else :
payload_email= ns.payload["email"]
log.debug("payload_email : \n%s", payload_email )
payload_pwd = ns.payload["pwd"]
log.debug("payload_pwd : \n%s", payload_pwd )
### chek if user already exists in db
existing_user = mongo_users.find_one({"infos.email" : payload_email})
log.debug("existing_user : %s ", pformat(existing_user))
if antispam_check and existing_user is None and payload_pwd not in bad_passwords and payload_email != "anonymous" :
### create hashpassword
hashpass = generate_password_hash(payload_pwd, method='sha256')
log.debug("hashpass : %s", hashpass)
user_data = ns.payload.get('u_data', {})
### create user dict from form's data
new_user_infos = {
"infos" : ns.payload,
"u_data" : user_data,
# "auth" : ns.payload
"log" : { "created_at" : datetime.utcnow() },
"profile" : { "lang" : ns.payload["lang"]}
}
new_user = marshal( new_user_infos , model_user_complete_in )
new_user["auth"]["pwd"] = <PASSWORD>
new_user["infos"]["email"] = payload_email
new_user["infos"]["open_level_edit"] = "private"
new_user["infos"]["open_level_show"] = "commons"
new_user["specs"]["doc_type"] = "usr"
new_user["team"] = []
### agreement to terms and conditions
new_user["infos"]["agreement"] = ns.payload["agreement"]
### temporary save new user in db
_id = mongo_users.insert( new_user )
log.info("new user is being created : \n%s", pformat(new_user))
log.info("_id : \n%s", pformat(_id))
### add _id to data
new_user["_id"] = str(_id) # str(user_created["_id"])
### create access tokens
log.debug("... create_access_token")
access_token = create_access_token( identity=new_user )
### create refresh tokens
log.debug("... refresh_token")
### just create a temporary refresh token once / so it could be blacklisted
expires = app.config["JWT_CONFIRM_EMAIL_REFRESH_TOKEN_EXPIRES"] # timedelta(days=7)
refresh_token = create_refresh_token( identity=new_user, expires_delta=expires )
### add confirm_email to claims for access_token_confirm_email
new_user["confirm_email"] = True
access_token_confirm_email = create_access_token( identity=new_user, expires_delta=expires )
log.debug("access_token_confirm_email : \n %s", access_token_confirm_email )
tokens = {
'access_token' : access_token,
'refresh_token' : refresh_token,
# 'rsa_token' : public_key_str,
# 'access_token_confirm_email' : access_token_confirm_email
}
if app.config["RSA_MODE"]=="yes" :
tokens["rsa_token"] = public_key_str
log.info("tokens : \n %s", pformat(tokens))
### update new user in db
# user_created = mongo_users.find_one({"infos.email" : payload_email})
user_created = mongo_users.find_one({"_id" : _id})
user_created["log"]["created_by"] = _id
user_created["auth"]["refr_tok"] = refresh_token
mongo_users.save(user_created)
log.info("new user is updated with its tokens : \n%s", pformat(new_user))
### marshall output
new_user_out = marshal( new_user, model_register_user_out )
message = "new user has been created but no confirmation link has been sent"
### send a confirmation email if not RUN_MODE not 'default'
if app.config["RUN_MODE"] in ["prod", "dev_email", "preprod"] :
try :
# create url for confirmation to send in the mail
confirm_url = app.config["DOMAIN_NAME"] + api.url_for(Confirm_email, token=access_token_confirm_email, external=True)
log.info("confirm_url : \n %s", confirm_url)
# generate html body of the email
html = render_template('emails/confirm_email.html', confirm_url=confirm_url)
# send the mail
send_email( "Confirm your email", payload_email, template=html )
message = "new user has been created and a confirmation link has been sent, you have {} days to confirm your email, otherwise this account will be erased...".format(expires)
except :
message = "new user has been created but error occured while sending confirmation link to the email"
return {
"msg" : message,
"expires" : str(expires),
"tokens" : tokens,
"_id" : str(user_created["_id"]),
"infos" : user_created["infos"],
"data" : new_user_out,
}, 200
else :
if existing_user :
return {
"msg" : "email '{}' is already taken ".format(payload_email)
}, 401
if antispam_check == False :
return {
"msg" : "aaaah you're a spam !!! "
}, 406
else :
return {
"msg" : "error and checkmate... "
}, 404
@ns.doc(security='apikey')
@ns.route("/confirm_email")
@ns.response(404, 'error in the redirection to confirm email')
@ns.param('token', 'The token sent by email when registering to confirm your email')
class Confirm_email(Resource):
# The config query paramater where the JWT is looked for is `token`,
# The default query paramater where the JWT is looked for is `jwt`,
# and can be changed with the JWT_QUERY_STRING_NAME option.
# Making a request to this endpoint would look like:
# /confirm?token=<REFRESH_TOKEN>
@ns.doc('confirm_email')
# @jwt_required
# @jwt_refresh_token_required ### verify refresh_token from request args or header
@confirm_email_required
def get(self):
"""
URL to confirm email sent once registered or when change email
>
--- needs : access_token_confirm_email as URL argument like :
'.../api/users/register/confirm?token=<access_token_confirm_email>'
>>> returns : msg, access_token, refresh_tokens
"""
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
user_identity = get_jwt_identity()
log.debug( "user_identity : \n %s", user_identity )
### check client identity and claims
claims = get_jwt_claims()
log.debug("claims : \n %s", pformat(claims) )
### !!! only works with access_token
# claims = get_jwt_claims()
# log.debug(" claims : \n %s ", pformat(claims) )
### retrieve and read token to get user's email
# user_oid = claims["_id"]
### find user created in db
user_to_confirm = mongo_users.find_one({"_id" : ObjectId(user_identity)})
# user_to_confirm = mongo_users.find_one({"infos.email" : user_identity })
if user_to_confirm :
### marshal user infos to create token
user_light = marshal( user_to_confirm , model_user_login_out)
user_light["_id"] = str(user_to_confirm["_id"])
log.debug( "user_light : \n %s", pformat(user_light) )
### create a new access token
access_token = create_access_token(identity=user_light)
### check if user is already confirmed
is_confirmed = user_to_confirm["auth"]["conf_usr"]
is_blacklisted = user_to_confirm["auth"]["is_blacklisted"]
### user is not confirmed yet
if is_confirmed == False and is_blacklisted == False :
### renew the existing refresh token as a more valid token
refresh_token = create_refresh_token(identity=user_light)
### confirm user's email and create a real refresh_token
user_to_confirm["auth"]["refr_tok"] = refresh_token
user_to_confirm["auth"]["role"] = "registred"
user_to_confirm["auth"]["conf_usr"] = True
### register as admin if user is the first to be created and confirmed in collection
count_users = mongo_users.count()
if count_users == 1 :
user_to_confirm["auth"]["role"] = "admin"
### update modfication in user data
user_to_confirm = create_modif_log(doc=user_to_confirm, action="confirm_email" )
### save data
mongo_users.save(user_to_confirm)
### store tokens
tokens = {
'access_token' : access_token,
'refresh_token' : refresh_token,
# 'rsa_token' : public_key_str,
}
if app.config["RSA_MODE"]=="yes" :
tokens["rsa_token"] = public_key_str
log.info("tokens : \n%s", pformat(tokens))
return {
"msg" : "identity '{}' confirmed, new refresh token created...".format(user_identity),
"tokens" : tokens,
"redirection_link" : app.config["REDIRECTION_FRONT"]
}, 200
### user is already confirmed
else :
### retrieve the existing refresh_token
refresh_token = user_to_confirm["auth"]["refr_tok"]
### store tokens
tokens = {
'access_token' : access_token,
'refresh_token' : refresh_token,
# 'rsa_token' : | |
subnet_address: Network address of the subnet that is the container
of this address.
subnet_cidr: CIDR of the subnet that is the container of this
address.
subnet_id: Subnet ID that is the container of this address.
tenant: The Cloud API Tenant object.
vm_availability_zone: Availability zone of the VM.
vm_comment: VM comment.
vm_creation_time: Date/time the VM was first created as NIOS object.
vm_hostname: Host part of the FQDN of the address attached to the
primary interface.
vm_id: The UUID of the Virtual Machine.
vm_kernel_id: Kernel ID of the VM that this address is associated
with.
vm_last_update_time: Last time the VM was updated.
vm_name: The name of the Virtual Machine.
vm_network_count: Count of networks containing all the addresses of
the VM.
vm_operating_system: Operating system that the VM is running.
vm_type: Type of the VM this address is associated with.
vm_vpc_address: Network address of the VPC of the VM that this
address is associated with.
vm_vpc_cidr: CIDR of the VPC of the VM that this address is
associated with.
vm_vpc_id: Identifier of the VPC where the VM is defined.
vm_vpc_name: Name of the VPC where the VM is defined.
vm_vpc_ref: Reference to the VPC where the VM is defined.
"""
_infoblox_type = 'grid:cloudapi:vmaddress'
_fields = ['address', 'address_type', 'associated_ip',
'associated_object_types', 'associated_objects', 'cloud_info',
'dns_names', 'elastic_address', 'interface_name', 'is_ipv4',
'mac_address', 'ms_ad_user_data', 'network', 'network_view',
'port_id', 'private_address', 'private_hostname',
'public_address', 'public_hostname', 'subnet_address',
'subnet_cidr', 'subnet_id', 'tenant', 'vm_availability_zone',
'vm_comment', 'vm_creation_time', 'vm_hostname', 'vm_id',
'vm_kernel_id', 'vm_last_update_time', 'vm_name',
'vm_network_count', 'vm_operating_system', 'vm_type',
'vm_vpc_address', 'vm_vpc_cidr', 'vm_vpc_id', 'vm_vpc_name',
'vm_vpc_ref']
_search_for_update_fields = ['address', 'vm_name']
_updateable_search_fields = []
_all_searchable_fields = ['address', 'mac_address', 'vm_id', 'vm_name']
_return_fields = ['address', 'is_ipv4', 'network_view', 'port_id',
'vm_name']
_remap = {}
_shadow_fields = ['_ref']
class GridDashboard(InfobloxObject):
""" GridDashboard: Grid Dashboard object.
Corresponds to WAPI object 'grid:dashboard'
The Grid Dashboard object provides a configuration interface for
threshold values that are used to warn about critical ATP, RPZ and
Analytics events. These threshold values are used to calculate the
security status for ATP, RPZ, and Analytics.
Fields:
analytics_tunneling_event_critical_threshold: The Grid Dashboard
critical threshold for Analytics tunneling events.
analytics_tunneling_event_warning_threshold: The Grid Dashboard
warning threshold for Analytics tunneling events.
atp_critical_event_critical_threshold: The Grid Dashboard critical
threshold for ATP critical events.
atp_critical_event_warning_threshold: The Grid Dashboard warning
threshold for ATP critical events.
atp_major_event_critical_threshold: The Grid Dashboard critical
threshold for ATP major events.
atp_major_event_warning_threshold: The Grid Dashboard warning
threshold for ATP major events.
atp_warning_event_critical_threshold: The Grid Dashboard critical
threshold for ATP warning events.
atp_warning_event_warning_threshold: The Grid Dashboard warning
threshold for ATP warning events.
rpz_blocked_hit_critical_threshold: The critical threshold value for
blocked RPZ hits in the Grid dashboard.
rpz_blocked_hit_warning_threshold: The warning threshold value for
blocked RPZ hits in the Grid dashboard.
rpz_passthru_event_critical_threshold: The Grid Dashboard critical
threshold for RPZ passthru events.
rpz_passthru_event_warning_threshold: The Grid Dashboard warning
threshold for RPZ passthru events.
rpz_substituted_hit_critical_threshold: The critical threshold value
for substituted RPZ hits in the Grid dashboard.
rpz_substituted_hit_warning_threshold: The warning threshold value
for substituted RPZ hits in the Grid dashboard.
"""
_infoblox_type = 'grid:dashboard'
_fields = ['analytics_tunneling_event_critical_threshold',
'analytics_tunneling_event_warning_threshold',
'atp_critical_event_critical_threshold',
'atp_critical_event_warning_threshold',
'atp_major_event_critical_threshold',
'atp_major_event_warning_threshold',
'atp_warning_event_critical_threshold',
'atp_warning_event_warning_threshold',
'rpz_blocked_hit_critical_threshold',
'rpz_blocked_hit_warning_threshold',
'rpz_passthru_event_critical_threshold',
'rpz_passthru_event_warning_threshold',
'rpz_substituted_hit_critical_threshold',
'rpz_substituted_hit_warning_threshold']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['analytics_tunneling_event_critical_threshold',
'analytics_tunneling_event_warning_threshold',
'atp_critical_event_critical_threshold',
'atp_critical_event_warning_threshold',
'atp_major_event_critical_threshold',
'atp_major_event_warning_threshold',
'atp_warning_event_critical_threshold',
'atp_warning_event_warning_threshold',
'rpz_blocked_hit_critical_threshold',
'rpz_blocked_hit_warning_threshold',
'rpz_passthru_event_critical_threshold',
'rpz_passthru_event_warning_threshold',
'rpz_substituted_hit_critical_threshold',
'rpz_substituted_hit_warning_threshold']
_remap = {}
_shadow_fields = ['_ref']
class GridDhcpproperties(InfobloxObject):
""" GridDhcpproperties: Grid DHCP properties object.
Corresponds to WAPI object 'grid:dhcpproperties'
This object represents a subset of the Infoblox Grid DHCP
properties.
Fields:
authority: The Grid-level authority flag. This flag specifies
whether a DHCP server is authoritative for a domain.
bootfile: The name of a file that DHCP clients need to boot. Some
DHCP clients use BOOTP (bootstrap protocol) or include the boot
file name option in their DHCPREQUEST messages.
bootserver: The name of the server on which a boot file is stored.
capture_hostname: The Grid-level capture hostname flag. Set this
flag to capture the hostname and lease time when assigning a
fixed address.
ddns_domainname: The member DDNS domain name value.
ddns_generate_hostname: Determines if the ability of a DHCP server
to generate a host name and update DNS with this host name when
it receives a DHCP REQUEST message that does not include a host
name is enabled or not.
ddns_retry_interval: Determines the retry interval when the DHCP
server makes repeated attempts to send DDNS updates to a DNS
server.
ddns_server_always_updates: Determines that only the DHCP server is
allowed to update DNS, regardless of the requests from the DHCP
clients.
ddns_ttl: The DDNS TTL (Dynamic DNS Time To Live) value specifies
the number of seconds an IP address for the name is cached.
ddns_update_fixed_addresses: Determines if the Grid DHCP server's
ability to update the A and PTR records with a fixed address is
enabled or not.
ddns_use_option81: Determines if support for option 81 is enabled or
not.
deny_bootp: Determines if deny BOOTP is enabled or not.
disable_all_nac_filters: If set to True, NAC filters will be
disabled on the Infoblox Grid.
dns_update_style: The update style for dynamic DNS updates.
email_list: The Grid-level email_list value. Specify an e-mail
address to which you want the Infoblox appliance to send e-mail
notifications when the DHCP address usage for the grid crosses a
threshold. You can create a list of several e-mail addresses.
enable_ddns: Determines if the member DHCP server's ability to send
DDNS updates is enabled or not.
enable_dhcp_thresholds: Represents the watermarks above or below
which address usage in a network is unexpected and might warrant
your attention.
enable_email_warnings: Determines if e-mail warnings are enabled or
disabled. When DHCP threshold is enabled and DHCP address usage
crosses a watermark threshold, the appliance sends an e-mail
notification to an administrator.
enable_fingerprint: Determines if the fingerprint feature is enabled
or not. If you enable this feature, the server will match a
fingerprint for incoming lease requests.
enable_gss_tsig: Determines whether all appliances are enabled to
receive GSS-TSIG authenticated updates from DHCP clients.
enable_hostname_rewrite: Determines if the Grid-level host name
rewrite feature is enabled or not.
enable_leasequery: Determines if lease query is allowed or not.
enable_roaming_hosts: Determines if DHCP servers in a Grid support
roaming hosts or not.
enable_snmp_warnings: Determined if the SNMP warnings on Grid-level
are enabled or not. When DHCP threshold is enabled and DHCP
address usage crosses a watermark threshold, the appliance sends
an SNMP trap to the trap receiver that you defined you defined
at the Grid member level.
format_log_option_82: The format option for Option 82 logging.
grid: Determines the Grid that serves DHCP. This specifies a group
of Infoblox appliances that are connected together to provide a
single point of device administration and service configuration
in a secure, highly available environment.
gss_tsig_keys: The list of GSS-TSIG keys for a Grid DHCP object.
high_water_mark: Determines the high watermark value of a Grid DHCP
server. If the percentage of allocated addresses exceeds this
watermark, the appliance makes a syslog entry and sends an
e-mail notification (if enabled). Specifies the percentage of
allocated addresses. The range is from 1 to 100.
high_water_mark_reset: Determines the high watermark reset value of
a member DHCP server. If the percentage of allocated addresses
drops below this value, a corresponding SNMP trap is reset.
Specifies the percentage of allocated addresses. The range is
from 1 to 100. The high watermark reset value must be lower than
the high watermark value.
hostname_rewrite_policy: The name of the default hostname rewrite
policy, which is also in the protocol_hostname_rewrite_policies
array.
ignore_dhcp_option_list_request: Determines if the ignore DHCP
option list request flag of a Grid DHCP is enabled or not. If
this flag is set to true all available DHCP options will be
returned to the client.
ignore_id: Indicates whether the appliance will ignore DHCP client
IDs or MAC addresses. Valid values are "NONE", "CLIENT", or
"MACADDR". The default is "NONE".
ignore_mac_addresses: A list of MAC addresses the appliance will
ignore.
immediate_fa_configuration: Determines if the fixed address
configuration takes effect immediately without DHCP service
restart or not.
| |
'''serialize/deserialize almost any kind of python object'''
# TODO:
# memoryview -- not possible? .tolist or .tobytes will return the data, but i haven't found a way to get the object that it references
# bytearray -- use str() to get the data
# operator.methodcaller -- can be done by using an object with __getattr__ for the name, and grabbing the method's *args,**kwds for the default args. hopefully doing this doesn't influence state...
# TODO: add a decorator that can transform anything into an object that will pass an instance of self
# to serialization service
import sys
if sys.version_info.major < 3:
import __builtin__
else:
import builtins as __builtin__
__all__ = ['caller','pack','unpack','loads','dumps']
VERSION = '0.7'
## FIXME: none of these are enabled due to their hackiness, search for XXX
# attribute[ignore=list of fu type names] -- ignore serializing/deserializing these types
# attribute[globals=dict] -- use the provided dict as the globals for deserialized objects
# attribute[exclude=list of var names] -- ignore serializing/deserializing these specific names
# attribute[local=list of module names] -- use the local versions of these modules
# attribute[recurse={type name : [list of types]}] -- only recurse into these types from this type
# attribute[norecurse={type name : [list of types]}] -- don't recurse into these types from this type
########
class package:
'''
This class is responsible for exposing the interface used to marshal/unmarshal
an object. The reason for the class is to close around the internals of this
module hiding the functionality that is used for serialization. The only
interfaces that are exposed are the pack() and unpack() classmethods.
'''
@classmethod
def pack(cls, object, **attributes):
'''convert any python object into a packable format'''
st = cls.stash()
id = st.store(object,**attributes)
return VERSION,id,st.packed()
@classmethod
def unpack(cls, data, **attributes):
'''unpack data into a real python object'''
ver,id,data = data
assert ver == VERSION,'fu.package.unpack : invalid version %s != %s'%(ver,VERSION)
st = cls.stash()
st.unpack(data)
return st.fetch(id,**attributes)
### stuff that's hidden within this namespace
class cache(object):
'''
This class is used to handle the registration of the different serializers
and deserializers for a python type/constant. The registration of the
different implementations is done via decorator at which point one can
use the .by*() classmethods to identify the handler for their type or
instance.
'''
class registration:
id,const,type = {},{},{}
@staticmethod
def hash(data):
agg = 5381
for item in iter(data):
agg = (((agg<<5) + agg) ^ ord(item)) & 0xffffffff
return agg
## registration of a cls into cache
@classmethod
def register(cls, definition):
id = cls.registration.hash(definition.__name__)
if id in cls.registration.id:
raise KeyError("Duplicate id %x in cache"% id)
cls.registration.id[id] = definition
definition.id = id
return definition
@classmethod
def register_type(cls, definition):
'''registers the definition with the specified builtin type'''
type = definition.getclass()
if type in cls.registration.type:
raise KeyError("Duplicate type %r in cache"% type)
definition = cls.register(definition)
cls.registration.type[type] = definition
return definition
@classmethod
def register_const(cls, definition):
const = definition.getclass()
if const in cls.registration.const:
raise KeyError("Duplicate constant %r in cache"% const)
definition = cls.register(definition)
cls.registration.const[const] = definition
return definition
## determining a registered cls from various types
@classmethod
def byid(cls, id):
'''search through globastate.id for a definition'''
return cls.registration.id[id]
@classmethod
def byclass(cls, type):
'''search through registration.type for a definition'''
return cls.registration.type[type]
@classmethod
def byconst(cls, const):
'''search through registration.const for a definition'''
result = cls.registration.const[const]
if result.getclass() is not const:
raise KeyError(const)
return result
@classmethod
def byinstance(cls, instance):
'''iterate through all registered definitions to determine which one can work for serialization/deserialization'''
global package,object_,module_
type, object, module = __builtin__.type, __builtin__.object, __builtin__.__class__
t = type(instance)
# any constant
try:
return package.cache.byconst(instance)
except (KeyError, TypeError):
pass
# special types
if t is module and instance is not module:
# XXX: implement binary modules
if hasattr(instance, '__file__'):
if instance.__file__.endswith('.pyd'):
raise NotImplementedError('Binary modules are un-supported')
return module_
return module_local
# by type
try:
return package.cache.byclass(t)
except (KeyError, TypeError):
pass
# builtins for known-modules that can be copied from
if t == builtin_.getclass():
if instance.__module__ is None:
return partial # XXX
raise KeyError(instance, 'Unable to determine module name from builtin method')
return builtin_
# catch-all object
if hasattr(instance, '__dict__') or hasattr(instance, '__slots__'): # is this an okay assumption?
return object_
# FIXME: if it follows the pickle protocol..
if hasattr(instance, '__getstate__'):
raise NotImplementedError('Pickle protocol for type %r is unimplemented'% instance)
pickle.loads(pickle.dumps(instance))
return partial
raise KeyError(instance)
class stash(__builtin__.object):
'''
This class is used to recursively serialize/deserialize an instance or
type. It is temporarily constructed and will use the cache to identify
how to serialize/deserialize the data that is passed to it. Once all
the references are processed, a tuple of the objects and constants are
then returned. This can then be re-packed into a bytestream which can
then be transported wherever the user needs it.
'''
def __init__(self):
# cache for .fetch
self.fetch_cache = {}
self.store_cache = __builtin__.set()
# caches for .store
self.cons_data = {}
self.inst_data = {}
@staticmethod
def clsbyid(item): return package.cache.byid(item)
@staticmethod
def clsbyinstance(item): return package.cache.byinstance(item)
def __repr__(self):
cons = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid,v) in self.cons_data.items()]
inst = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid,v) in self.inst_data.items()]
return "<class '%s'> %s"%(self.__class__.__name__, repr(__builtin__.dict(cons)))
## serializing/deserializing entire state
def packed(self):
return self.cons_data,self.inst_data
def unpack(self, data):
cons,inst = data
self.cons_data.clear()
self.inst_data.clear()
self.cons_data.update(cons)
self.inst_data.update(inst)
return True
## packing/unpacking of id's
def pack_references(self, data, **attributes):
'''converts object data into reference id's'''
if data.__class__ is __builtin__.tuple:
return __builtin__.tuple(self.store(item, **attributes) for item in data)
elif data.__class__ is __builtin__.dict:
return { self.store(k, **attributes) : self.store(v, **attributes) for k, v in data.items() }
elif data.__class__ is __builtin__.list:
# a list contains multiple packed objects
return [self.pack_references(item, **attributes) for item in data]
return data
def unpack_references(self, data, **attributes):
'''converts packed references into objects'''
if data.__class__ is __builtin__.tuple:
return __builtin__.tuple(self.fetch(item, **attributes) for item in data)
elif data.__class__ is __builtin__.dict:
return { self.fetch(k, **attributes) : self.fetch(v, **attributes) for k, v in data.items() }
elif data.__class__ is __builtin__.list:
return [self.unpack_references(item, **attributes) for item in data]
return data
def identify(self, object):
return id(object)
# unique id generator for .identify if id is not guaranteed to be unique (python 2.6?)
#if not hasattr(self, '__identity'):
# self.__identity = []
#if object in self.__identity:
# return self.__identity.index(object)
#self.__identity.append(object)
#return self.identify(object)
def __getitem__(self, name):
return self.identify(name)
### stashing/fetching of objects
def store(self, object, **attributes):
identity = self.identify(object)
if identity in self.store_cache:
return identity
cls = self.clsbyinstance(object)
if False: # XXX: if we want to make the module and name part of the protocol. (for assistance with attributes)
# get naming info
modulename,name = getattr(object,'__module__',None),getattr(object,'__name__',None)
fullname = ('%s.%s'% (modulename,name)) if modulename else name
# attribute[ignore=list of types,exclude=list of names]
if (cls.__name__ in __builtin__.set(attributes.get('ignore',()))) or \
(fullname in __builtin__.set(attributes.get('exclude',()))):
cls = partial
# attribute[local=list of names]
if name in __builtin__.set(attributes.get('local',())):
cls = module
# store constructor info
data = cls.p_constructor(object,**attributes)
self.store_cache.add(identity)
data = self.pack_references(data,**attributes)
self.cons_data[identity] = cls.id,data
# self.cons_data[identity] = cls.id,(modulename,name),data # XXX: for attributes by name
# recurse into instance data
data = cls.p_instance(object,**attributes)
data = self.pack_references(data,**attributes)
self.inst_data[identity] = cls.id,data
return identity
def fetch(self, identity, **attributes):
if identity in self.fetch_cache:
return self.fetch_cache[identity]
# unpack constructor
# _,(modulename,name),data = self.cons_data[identity] # XXX: for attributes by name
_, data = self.cons_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
if False: # XXX: attributes
# naming info
fullname = ('%s.%s'% (modulename,name)) if modulename else name
# attribute[ignore=list of types,exclude=list of names]
if (cls.__name__ in __builtin__.set(attributes.get('ignore',()))) or \
(fullname in __builtin__.set(attributes.get('exclude',()))):
cls = partial
instance = partial.new()
self.fetch_cache[identity] = instance
return instance
# attribute[local=list of names]
if name in __builtin__.set(attributes.get('local',())):
cls = module
# create an instance of packed object
instance = cls.u_constructor(data, **attributes)
self.fetch_cache[identity] = instance
# update instance with packed attributes
_,data = self.inst_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
_ = cls.u_instance(instance, data, **attributes)
assert instance is _, '%s.fetch(%d) : constructed instance is different from updated instance'% (__builtin__.object.__repr__(self), identity)
return instance
class __type__(__builtin__.object):
'''
This base class is used to help register an instance of a type. Once
identifying the type of an instance, the class will be responsible for
returning any attributes that are necessary to re-construct or
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._time_series_insights_client_enums import *
class AccessPolicyCreateOrUpdateParameters(msrest.serialization.Model):
"""AccessPolicyCreateOrUpdateParameters.
:param principal_object_id: The objectId of the principal in Azure Active Directory.
:type principal_object_id: str
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_attribute_map = {
'principal_object_id': {'key': 'properties.principalObjectId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
principal_object_id: Optional[str] = None,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyCreateOrUpdateParameters, self).__init__(**kwargs)
self.principal_object_id = principal_object_id
self.description = description
self.roles = roles
class AccessPolicyListResponse(msrest.serialization.Model):
"""The response of the List access policies operation.
:param value: Result of the List access policies operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.AccessPolicyResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyResource]'},
}
def __init__(
self,
*,
value: Optional[List["AccessPolicyResource"]] = None,
**kwargs
):
super(AccessPolicyListResponse, self).__init__(**kwargs)
self.value = value
class Resource(msrest.serialization.Model):
"""Time Series Insights resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AccessPolicyResource(Resource):
"""An access policy is used to grant users and applications access to the environment. Roles are assigned to service principals in Azure Active Directory. These roles define the actions the principal can perform through the Time Series Insights data plane APIs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param principal_object_id: The objectId of the principal in Azure Active Directory.
:type principal_object_id: str
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'principal_object_id': {'key': 'properties.principalObjectId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
principal_object_id: Optional[str] = None,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyResource, self).__init__(**kwargs)
self.principal_object_id = principal_object_id
self.description = description
self.roles = roles
class AccessPolicyUpdateParameters(msrest.serialization.Model):
"""AccessPolicyUpdateParameters.
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_attribute_map = {
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyUpdateParameters, self).__init__(**kwargs)
self.description = description
self.roles = roles
class ResourceProperties(msrest.serialization.Model):
"""Properties that are common to all tracked resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.creation_time = None
class EventSourceCommonProperties(ResourceProperties):
"""Properties of the event source.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
}
def __init__(
self,
*,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventSourceCommonProperties, self).__init__(**kwargs)
self.timestamp_property_name = timestamp_property_name
class AzureEventSourceProperties(EventSourceCommonProperties):
"""Properties of an event source that reads events from an event broker in Azure.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(AzureEventSourceProperties, self).__init__(timestamp_property_name=timestamp_property_name, **kwargs)
self.event_source_resource_id = event_source_resource_id
class CloudErrorBody(msrest.serialization.Model):
"""Describes a particular API error with an error code and a message.
:param code: An error code that describes the error condition more precisely than an HTTP
status code. Can be used to programmatically handle specific error cases.
:type code: str
:param message: A message that describes the error in detail and provides debugging
information.
:type message: str
:param target: The target of the particular error (for example, the name of the property in
error).
:type target: str
:param details: Contains nested errors that are related to this error.
:type details: list[~azure.mgmt.timeseriesinsights.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class CreateOrUpdateTrackedResourceProperties(msrest.serialization.Model):
"""Properties required to create any resource tracked by Azure Resource Manager.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CreateOrUpdateTrackedResourceProperties, self).__init__(**kwargs)
self.location = location
self.tags = tags
class EnvironmentCreateOrUpdateParameters(CreateOrUpdateTrackedResourceProperties):
"""Parameters supplied to the CreateOrUpdate Environment operation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Gen1EnvironmentCreateOrUpdateParameters, Gen2EnvironmentCreateOrUpdateParameters.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional | |
and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr:
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return struct.pack("B",nh)+self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
conf.neighbor.register_l3(Ether, IPv6, lambda l2,l3: getmacbyip6(l3.dst))
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = str(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message" }) ]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : now that we have that option, we should do something in | |
<filename>tests/test_customers.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import unittest
from six.moves.urllib.parse import parse_qs, urlparse
from optimove.client import Client
from optimove.constants import DEFAULT_URL
import responses
from tests.constants import HEADERS
from tests.helpers import login_callback, token_required
"""Callbacks"""
@token_required
def get_customers_by_action_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['RecipientGroupID'][0] == '1' and params['ActionID'][0] == '2' and params['Date'][0] == '2015-06-24':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Alias;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '231342', 'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': '943157', 'CustomerAttributes': ['Pax65', 'DE']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': '231342'},
{'CustomerID': '943157'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_actions_by_target_group_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['TargetGroupID'][0] == '2' and params['Date'][0] == '2015-12-24':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Alias;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': 'A1342', 'ActionID': 49, 'ChannelID': 6, 'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': 'G4650', 'ActionID': 49, 'ChannelID': 6, 'CustomerAttributes': ['Mighty6', 'ES']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': 'A1342', 'ActionID': 49, 'ChannelID': 6},
{'CustomerID': 'G4650', 'ActionID': 49, 'ChannelID': 6}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_one_time_actions_by_date_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['Date'][0] == '2015-06-24':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Alias;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '8D871', 'ActionID': 19, 'ChannelID': 3, 'CustomerAttributes': ['Yo999', 'UA']},
{'CustomerID': '8U76T', 'ActionID': 19, 'ChannelID': 3, 'CustomerAttributes': ['Neto2', 'TR']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': '8D871', 'ActionID': 19, 'ChannelID': 3},
{'CustomerID': '8U76T', 'ActionID': 19, 'ChannelID': 3}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_target_group_changers_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['StartDate'][0] == '2015-09-01' and params['EndDate'][0] == '2015-09-30':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Alias;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '231342', 'InitialTargetGroupID': 4, 'FinalTargetGroupID': 12,
'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': '931342', 'InitialTargetGroupID': -1, 'FinalTargetGroupID': 8,
'CustomerAttributes': ['Pax65', 'DE']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': '231342', 'InitialTargetGroupID': 4, 'FinalTargetGroupID': 12},
{'CustomerID': '931342', 'InitialTargetGroupID': -1, 'FinalTargetGroupID': 8}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_attribute_changers_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['StartDate'][0] == '2015-01-30' and params['EndDate'][0] == '2015-01-31'and \
params['ChangedCustomerAttributes'][0] == 'OptimailUnsubscribed':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Alias;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '231342', 'InitialCustomerAttributes': 'NULL',
'FinalCustomerAttributes': 'SuperBrand', 'CustomerAttributes': ['BuddyZZ', 'UK']},
{'CustomerID': '231343', 'InitialCustomerAttributes': 'SuperBrand',
'FinalCustomerAttributes': 'Super Brand, Mega Brand', 'CustomerAttributes': ['Pax65', 'DE']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': '231342', 'InitialCustomerAttributes': 'NULL',
'FinalCustomerAttributes': 'SuperBrand'},
{'CustomerID': '231343', 'InitialCustomerAttributes': 'SuperBrand',
'FinalCustomerAttributes': 'Super Brand, Mega Brand'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_future_values_callback(request):
params = parse_qs(urlparse(request.url).query)
if 'LifecycleStageID' in params and 'CustomerAttributes' not in params and 'CustomerAttributeValue' not in params:
if params['LifecycleStageID'][0] == '6':
resp_body = [
{'CustomerID': '631942', 'FutureValue': 342.65},
{'CustomerID': '257938', 'FutureValue': 102.33}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
elif 'LifecycleStageID' not in params and 'CustomerAttributes' in params and 'CustomerAttributeValue' in params:
if params['CustomerAttributes'][0] == 'Country' and params['CustomerAttributeValue'][0] == 'Australia':
resp_body = [
{'CustomerID': '631942', 'FutureValue': 342.65},
{'CustomerID': '257938', 'FutureValue': 102.33}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_last_action_executed_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['CustomerID'][0] == '2872732':
resp_body = {
'ActionID': 428,
'Date': '2014-12-24',
'Duration': 7,
'TargetGroupID': 15
}
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_action_details_by_date_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['Date'][0] == '2014-12-10':
resp_body = [
{'CustomerID': '231342', 'RecipientGroupID': 1, 'ActionID': 42, 'ChannelID': 10},
{'CustomerID': '940023', 'RecipientGroupID': 2, 'ActionID': 42, 'ChannelID': 10}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customers_action_ended_by_date_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['Date'][0] == '2014-12-10':
resp_body = [
{'CustomerID': '231342', 'ActionID': 428, 'ChannelID': 4, 'Date': '2014-12-03',
'Duration': 7, 'TargetGroupID': 15},
{'CustomerID': '981002', 'ActionID': 22, 'ChannelID': 9, 'Date': '2014-12-05',
'Duration': 5, 'TargetGroupID': 34}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_send_details_by_campaign_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['CampaignID'][0] == '65874':
if 'IncludeTemplateIDs' in params:
if params['IncludeTemplateIDs'][0] == 'True':
resp_body = [
{'CustomerID': '231342', 'ChannelID': 4, 'ScheduledTime': '2015-12-30 10:30:00',
'SendID': 'HG65D', 'TemplateID': 12},
{'CustomerID': '917251', 'ChannelID': 4, 'ScheduledTime': '2015-12-30 11:45:00',
'SendID': 'HG65E', 'TemplateID': 7}
]
return 200, HEADERS['json'], json.dumps(resp_body)
resp_body = [
{'CustomerID': '231342', 'ChannelID': 4, 'ScheduledTime': '2015-12-30 10:30:00', 'SendID': 'HG65D'},
{'CustomerID': '917251', 'ChannelID': 4, 'ScheduledTime': '2015-12-30 11:45:00', 'SendID': 'HG65E'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_customer_send_details_by_channel_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['ChannelID'][0] == '5' and params['CampaignID'][0] == '65874':
if 'CustomerAttributes' in params and 'CustomerAttributesDelimiter' in params:
if params['CustomerAttributes'][0] == 'Email;Country' and params['CustomerAttributesDelimiter'][0] == ',':
resp_body = [
{'CustomerID': '96134', 'TemplateID': 14, 'ScheduledTime': '2016-08-30 10:00:00',
'CustomerAttributes': ['<EMAIL>', 'US']},
{'CustomerID': '13482', 'TemplateID': 14, 'ScheduledTime': '2016-08-30 10:00:00',
'CustomerAttributes': ['<EMAIL>', 'UK']}
]
else:
return 404, HEADERS['text'], 'Not Found'
else:
resp_body = [
{'CustomerID': '96134', 'TemplateID': 14, 'ScheduledTime': '2016-08-30 10:00:00'},
{'CustomerID': '13482', 'TemplateID': 14, 'ScheduledTime': '2016-08-30 10:00:00'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
@token_required
def get_currently_targeted_customers_callback(request):
resp_body = [
{'CustomerID': '231342', 'CampaignID': 428, 'ActionID': 4, 'StartDate': '2015-12-30', 'EndDate': '2016-01-02'},
{'CustomerID': '745611', 'CampaignID': 370, 'ActionID': 18, 'StartDate': '2015-12-30', 'EndDate': '2016-01-03'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
@token_required
def get_canceled_campaign_customers_callback(request):
params = parse_qs(urlparse(request.url).query)
if params['CampaignID'][0] == '6574':
resp_body = [
{'CustomerID': '231342', 'ActionID': 4, 'PromoCode': 'A7Bonus'},
{'CustomerID': '463516', 'ActionID': 4, 'PromoCode': 'A7Bonus'}
]
return 200, HEADERS['json'], json.dumps(resp_body)
else:
return 404, HEADERS['text'], 'Not Found'
"""Tests"""
class TestCustomers(unittest.TestCase):
@responses.activate
def test_get_customers_by_action(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomersByAction',
callback=get_customers_by_action_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customers_by_action(1, 2, '2015-06-24')
self.assertEqual(data, ['231342', '943157'])
@responses.activate
def test_get_customers_by_action_with_empty_date(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomersByAction',
callback=get_customers_by_action_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.customers.get_customers_by_action, 1, 2, None)
@responses.activate
def test_get_customers_by_action_with_wrong_date(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomersByAction',
callback=get_customers_by_action_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customers_by_action(1, 2, '3015-06-24')
self.assertFalse(data)
@responses.activate
def test_get_customers_by_action_with_attributes(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomersByAction',
callback=get_customers_by_action_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customers_by_action(1, 2, '2015-06-24', ['Alias', 'Country'], ',')
self.assertEqual(data, [
{
'customer_id': '231342',
'attributes': {
'Alias': 'BuddyZZ',
'Country': 'UK'
}
},
{
'customer_id': '943157',
'attributes': {
'Alias': 'Pax65',
'Country': 'DE'
}
}
])
@responses.activate
def test_get_customers_by_action_with_wrong_delimiter(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomersByAction',
callback=get_customers_by_action_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.customers.get_customers_by_action, 1, 2, '2015-06-24',
['Alias', 'Country'], '/')
@responses.activate
def test_get_customer_actions_by_target_group(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerActionsByTargetGroup',
callback=get_customer_actions_by_target_group_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customer_actions_by_target_group(2, '2015-12-24')
self.assertEqual(data, [
{
'customer_id': 'A1342',
'action_id': 49,
'channel_id': 6
},
{
'customer_id': 'G4650',
'action_id': 49,
'channel_id': 6
}
])
@responses.activate
def test_get_customer_actions_by_target_group_with_empty_date(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerActionsByTargetGroup',
callback=get_customer_actions_by_target_group_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.customers.get_customer_actions_by_target_group, 2, None)
@responses.activate
def test_get_customer_actions_by_target_group_with_wrong_date(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerActionsByTargetGroup',
callback=get_customer_actions_by_target_group_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customer_actions_by_target_group(2, '3015-12-24')
self.assertFalse(data)
@responses.activate
def test_get_customer_actions_by_target_group_with_attributes(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerActionsByTargetGroup',
callback=get_customer_actions_by_target_group_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customer_actions_by_target_group(2, '2015-12-24',
include_control_group=True,
attributes=['Alias', 'Country'],
delimiter=',')
self.assertEqual(data, [
{
'customer_id': 'A1342',
'action_id': 49,
'channel_id': 6,
'attributes': {
'Alias': 'BuddyZZ',
'Country': 'UK'
}
},
{
'customer_id': 'G4650',
'action_id': 49,
'channel_id': 6,
'attributes': {
'Alias': 'Mighty6',
'Country': 'ES'
}
}
])
@responses.activate
def test_get_customer_actions_by_target_group_with_wrong_delimiter(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerActionsByTargetGroup',
callback=get_customer_actions_by_target_group_callback,
content_type='application/json'
)
client = Client('username', 'password')
self.assertRaises(Exception, client.customers.get_customer_actions_by_target_group, 2, '2015-12-24',
True, ['Alias', 'Country'], '/')
@responses.activate
def test_get_customer_one_time_actions_by_date(self):
responses.add_callback(
responses.POST,
DEFAULT_URL + '/current/general/login',
callback=login_callback,
content_type='application/json'
)
responses.add_callback(
responses.GET,
DEFAULT_URL + '/current/customers/GetCustomerOneTimeActionsByDate',
callback=get_customer_one_time_actions_by_date_callback,
content_type='application/json'
)
client = Client('username', 'password')
data = client.customers.get_customer_one_time_actions_by_date('2015-06-24')
self.assertEqual(data, [
{
'customer_id': '8D871',
'action_id': 19,
'channel_id': 3
| |
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Augie",
"lat": "12.8903",
"lng": "4.5996",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Sarkin Pawa",
"lat": "10.0209",
"lng": "7.1124",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kujama",
"lat": "10.4577",
"lng": "7.6381",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kaduna",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Keana",
"lat": "8.1472",
"lng": "8.7960",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Nasarawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mabudi",
"lat": "8.7304",
"lng": "9.7851",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Plateau",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Nafada",
"lat": "11.0960",
"lng": "11.3326",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Gombe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ningi",
"lat": "11.0784",
"lng": "9.5689",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bauchi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ajaka",
"lat": "7.1742",
"lng": "6.8253",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kogi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Paiko",
"lat": "9.4369",
"lng": "6.6336",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dekina",
"lat": "7.6897",
"lng": "7.0438",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kogi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oguma",
"lat": "7.8963",
"lng": "7.0623",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kogi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Lemu",
"lat": "9.3964",
"lng": "6.0279",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Agaie",
"lat": "9.0085",
"lng": "6.3182",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Katcha",
"lat": "8.7608",
"lng": "6.3120",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Isa",
"lat": "13.2007",
"lng": "6.4049",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Sokoto",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gbajimba",
"lat": "7.8204",
"lng": "8.8595",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Jada",
"lat": "8.7568",
"lng": "12.1554",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Adamawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mayo-Belwa",
"lat": "9.0542",
"lng": "12.0579",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Adamawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gombi",
"lat": "10.1676",
"lng": "12.7368",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Adamawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Sakaba",
"lat": "11.0651",
"lng": "5.5961",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Aliade",
"lat": "7.2963",
"lng": "8.4828",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Askira",
"lat": "10.6509",
"lng": "12.9088",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ago-Amodu",
"lat": "8.6231",
"lng": "3.6142",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Oyo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Saki",
"lat": "8.6676",
"lng": "3.3939",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Oyo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Fika",
"lat": "11.2867",
"lng": "11.3077",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kamba",
"lat": "11.8517",
"lng": "3.6548",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mokwa",
"lat": "9.2948",
"lng": "5.0541",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Pantisawa",
"lat": "8.9450",
"lng": "11.5118",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Aboh",
"lat": "5.5478",
"lng": "6.5259",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Takum",
"lat": "7.2553",
"lng": "9.9855",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Okada",
"lat": "6.7349",
"lng": "5.3945",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Edo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bukkuyum",
"lat": "12.1372",
"lng": "5.4682",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Zamfara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gummi",
"lat": "12.1448",
"lng": "5.1178",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Zamfara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Boje",
"lat": "6.2843",
"lng": "8.9206",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Damagum",
"lat": "11.6782",
"lng": "11.3352",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gubio",
"lat": "12.4975",
"lng": "12.7809",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Wara",
"lat": "10.2288",
"lng": "4.6236",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Jakusko",
"lat": "12.3709",
"lng": "10.7737",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mahuta",
"lat": "11.5534",
"lng": "4.9814",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Abejukolo",
"lat": "7.8681",
"lng": "7.5091",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kogi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Hong",
"lat": "10.2330",
"lng": "12.9281",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Adamawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Wamba",
"lat": "8.9415",
"lng": "8.6032",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Nasarawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Azare",
"lat": "10.5258",
"lng": "12.2911",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Garaku",
"lat": "8.8472",
"lng": "8.1302",
"country": "Nigeria",
"iso2": "NG", | |
required and takes effect only when ListenerSync is set to off.
:param pulumi.Input[str] sticky_session_type: Mode for handling the cookie. If `sticky_session` is "on", it is mandatory. Otherwise, it will be ignored. Valid values are `insert` and `server`. `insert` means it is inserted from Server Load Balancer; `server` means the Server Load Balancer learns from the backend server.
:param pulumi.Input[int] unhealthy_threshold: Threshold determining the result of the health check is fail. It is required when `health_check` is on. Valid value range: [1-10] in seconds. Default to 3.
:param pulumi.Input[str] url: Domain of the forwarding rule. It must be 2-80 characters in length. Only letters a-z, numbers 0-9,
and characters '-' '/' '?' '%' '#' and '&' are allowed. URLs must be started with the character '/', but cannot be '/' alone.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A forwarding rule is configured in `HTTP`/`HTTPS` listener and it used to listen a list of backend servers which in one specified virtual backend server group.
You can add forwarding rules to a listener to forward requests based on the domain names or the URL in the request.
> **NOTE:** One virtual backend server group can be attached in multiple forwarding rules.
> **NOTE:** At least one "Domain" or "Url" must be specified when creating a new rule.
> **NOTE:** Having the same 'Domain' and 'Url' rule can not be created repeatedly in the one listener.
> **NOTE:** Rule only be created in the `HTTP` or `HTTPS` listener.
> **NOTE:** Only rule's virtual server group can be modified.
## Import
Load balancer forwarding rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:slb/rule:Rule example rule-abc123456
```
:param str resource_name: The name of the resource.
:param RuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cookie: Optional[pulumi.Input[str]] = None,
cookie_timeout: Optional[pulumi.Input[int]] = None,
delete_protection_validation: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
frontend_port: Optional[pulumi.Input[int]] = None,
health_check: Optional[pulumi.Input[str]] = None,
health_check_connect_port: Optional[pulumi.Input[int]] = None,
health_check_domain: Optional[pulumi.Input[str]] = None,
health_check_http_code: Optional[pulumi.Input[str]] = None,
health_check_interval: Optional[pulumi.Input[int]] = None,
health_check_timeout: Optional[pulumi.Input[int]] = None,
health_check_uri: Optional[pulumi.Input[str]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
listener_sync: Optional[pulumi.Input[str]] = None,
load_balancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
scheduler: Optional[pulumi.Input[str]] = None,
server_group_id: Optional[pulumi.Input[str]] = None,
sticky_session: Optional[pulumi.Input[str]] = None,
sticky_session_type: Optional[pulumi.Input[str]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuleArgs.__new__(RuleArgs)
__props__.__dict__["cookie"] = cookie
__props__.__dict__["cookie_timeout"] = cookie_timeout
__props__.__dict__["delete_protection_validation"] = delete_protection_validation
__props__.__dict__["domain"] = domain
if frontend_port is None and not opts.urn:
raise TypeError("Missing required property 'frontend_port'")
__props__.__dict__["frontend_port"] = frontend_port
__props__.__dict__["health_check"] = health_check
__props__.__dict__["health_check_connect_port"] = health_check_connect_port
__props__.__dict__["health_check_domain"] = health_check_domain
__props__.__dict__["health_check_http_code"] = health_check_http_code
__props__.__dict__["health_check_interval"] = health_check_interval
__props__.__dict__["health_check_timeout"] = health_check_timeout
__props__.__dict__["health_check_uri"] = health_check_uri
__props__.__dict__["healthy_threshold"] = healthy_threshold
__props__.__dict__["listener_sync"] = listener_sync
if load_balancer_id is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_id'")
__props__.__dict__["load_balancer_id"] = load_balancer_id
__props__.__dict__["name"] = name
__props__.__dict__["scheduler"] = scheduler
if server_group_id is None and not opts.urn:
raise TypeError("Missing required property 'server_group_id'")
__props__.__dict__["server_group_id"] = server_group_id
__props__.__dict__["sticky_session"] = sticky_session
__props__.__dict__["sticky_session_type"] = sticky_session_type
__props__.__dict__["unhealthy_threshold"] = unhealthy_threshold
__props__.__dict__["url"] = url
super(Rule, __self__).__init__(
'alicloud:slb/rule:Rule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cookie: Optional[pulumi.Input[str]] = None,
cookie_timeout: Optional[pulumi.Input[int]] = None,
delete_protection_validation: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
frontend_port: Optional[pulumi.Input[int]] = None,
health_check: Optional[pulumi.Input[str]] = None,
health_check_connect_port: Optional[pulumi.Input[int]] = None,
health_check_domain: Optional[pulumi.Input[str]] = None,
health_check_http_code: Optional[pulumi.Input[str]] = None,
health_check_interval: Optional[pulumi.Input[int]] = None,
health_check_timeout: Optional[pulumi.Input[int]] = None,
health_check_uri: Optional[pulumi.Input[str]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
listener_sync: Optional[pulumi.Input[str]] = None,
load_balancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
scheduler: Optional[pulumi.Input[str]] = None,
server_group_id: Optional[pulumi.Input[str]] = None,
sticky_session: Optional[pulumi.Input[str]] = None,
sticky_session_type: Optional[pulumi.Input[str]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'Rule':
"""
Get an existing Rule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cookie: The cookie configured on the server. It is mandatory when `sticky_session` is "on" and `sticky_session_type` is "server". Otherwise, it will be ignored. Valid value๏ผString in line with RFC 2965, with length being 1- 200. It only contains characters such as ASCII codes, English letters and digits instead of the comma, semicolon or spacing, and it cannot start with $.
:param pulumi.Input[int] cookie_timeout: Cookie timeout. It is mandatory when `sticky_session` is "on" and `sticky_session_type` is "insert". Otherwise, it will be ignored. Valid value range: [1-86400] in seconds.
:param pulumi.Input[bool] delete_protection_validation: Checking DeleteProtection of SLB instance before deleting. If true, this resource will not be deleted when its SLB instance enabled DeleteProtection. Default to false.
:param pulumi.Input[str] domain: Domain name of the forwarding rule. It can contain letters a-z, numbers 0-9, hyphens (-), and periods (.),
and wildcard characters. The following two domain name formats are supported:
- Standard domain name: www.test.com
- Wildcard domain name: *.test.com. wildcard (*) must be the first character in the format of (*.)
:param pulumi.Input[int] frontend_port: The listener frontend port which is used to launch the new forwarding rule. Valid range: [1-65535].
:param pulumi.Input[str] health_check: Whether to enable health check. Valid values are`on` and `off`. TCP and UDP listener's HealthCheck is always on, so it will be ignore when launching TCP or UDP listener. This parameter is required and takes effect only when ListenerSync is set to off.
:param pulumi.Input[int] health_check_connect_port: Port used for health check. Valid value range: [1-65535]. Default to "None" means the backend server port is used.
:param pulumi.Input[str] health_check_domain: Domain name used for health check. When it used to launch TCP listener, `health_check_type` must be "http". Its length is limited to 1-80 and only characters such as letters, digits, โ-โ and โ.โ are allowed. When it is not set or empty, Server Load Balancer uses the private network IP address of each backend server as Domain used for health check.
:param pulumi.Input[str] health_check_http_code: Regular health check HTTP status code. Multiple codes are segmented by โ,โ. It is required when `health_check` is on. Default to `http_2xx`. Valid values are: `http_2xx`, `http_3xx`, `http_4xx` and `http_5xx`.
:param pulumi.Input[int] health_check_interval: Time interval of health checks. It is required when `health_check` is on. Valid value range: [1-50] in seconds. Default to 2.
:param pulumi.Input[int] health_check_timeout: Maximum timeout of each health check response. It is required when `health_check` is on. Valid value range: [1-300] in seconds. Default to 5. Note: If `health_check_timeout` < `health_check_interval`, its will be replaced by `health_check_interval`.
:param pulumi.Input[str] health_check_uri: URI used for health check. When it used to launch TCP listener, `health_check_type` must be "http". Its length is limited to 1-80 and it must start with /. Only characters such as letters, digits, โ-โ, โ/โ, โ.โ, โ%โ, โ?โ, #โ and โ&โ are allowed.
:param pulumi.Input[int] healthy_threshold: Threshold determining the result of the health check is success. It is required when `health_check` is on. Valid value range: [1-10] in seconds. Default to 3.
:param pulumi.Input[str] listener_sync: Indicates whether a forwarding rule inherits the settings of a health check , session persistence, and scheduling algorithm from a listener. Default to on.
:param pulumi.Input[str] load_balancer_id: The Load Balancer ID which is used to launch the | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
FIXME:
sometimes you have to chown -R user:user ~/.theano or run with sudo the
first time after roboot, otherwise you get errors
CommandLineHelp:
python -m wbia_cnn --tf netrun <networkmodel>
--dataset, --ds = <dstag>:<subtag>
dstag is the main dataset name (eg PZ_MTEST), subtag are parameters to
modify (max_examples=3)
--weights, -w = \|new\|<checkpoint_tag>\|<dstag>:<checkpoint_tag> (default: <checkpoint_tag>)
new will initialize clean weights.
a checkpoint tag will try to to match a saved model state in the history.
can load weights from an external dataset.
<checkpoint_tag> defaults to current
--arch, -a = <archtag>
model architecture tag (eg siaml2_128, siam2stream, viewpoint)
--device = <processor>
sets theano device flag to a processor like gpu0, gpu1, or cpu0
"""
import logging
from wbia_cnn import models
from wbia_cnn import ingest_data
from wbia_cnn import experiments
import utool as ut
import sys
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger()
# This is more of a history tag
CHECKPOINT_TAG_ALIAS = {
'current': None,
'': None,
}
# second level of alias indirection
# This is more of a dataset tag
DS_TAG_ALIAS2 = {
'flankhack': "dict(acfg_name='ctrl:pername=None,excluderef=False,contributor_contains=FlankHack', colorspace='gray', db='PZ_Master1')",
'pzmtest-bgr': "PZ_MTEST;dict(colorspace='bgr', controlled=True, max_examples=None, num_top=None)", # NOQA
'pzmtest': "PZ_MTEST;dict(colorspace='gray', controlled=True, max_examples=None, num_top=None)", # NOQA
'gz-gray': "GZ_ALL;dict(colorspace='gray', controlled=False, max_examples=None, num_top=None)", # NOQA
'liberty': "liberty;dict(detector='dog', pairs=250000)",
'combo': 'combo_vdsujffw',
'timectrl_pzmaster1': "PZ_Master1;dict(acfg_name='timectrl', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm2': "PZ_Master1;dict(acfg_name='timectrl:pername=None', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm3': "PZ_Master1;dict(acfg_name=None, colorspace='gray', controlled=True, min_featweight=0.8)",
#'pzm3' : "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)", # NOQA
'pzm4': "PZ_Master1;dict(acfg_name='default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok', colorspace='gray', min_featweight=0.8)",
}
def netrun():
r"""
CommandLine:
# --- UTILITY
python -m wbia_cnn --tf get_juction_dpath --show
# --- DATASET BUILDING ---
# Build Dataset Aliases
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part
# Parts based datasets
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show
# Patch based dataset (big one)
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd
python -m wbia_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask
# --- TRAINING ---
python -m wbia_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128 --monitor # NOQA
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION
python -m wbia_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001
python -m wbia_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001
# Different ways to train mnist
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch
python -m wbia_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category
# --- INITIALIZED-TRAINING ---
python -m wbia_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor
# --- TESTING ---
python -m wbia_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test
python -m wbia_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> netrun()
>>> ut.show_if_requested()
"""
ut.colorprint('[netrun] NET RUN', 'red')
requests, hyperparams, tags = parse_args()
ds_tag = tags['ds_tag']
datatype = tags['datatype']
extern_ds_tag = tags['extern_ds_tag']
arch_tag = tags['arch_tag']
checkpoint_tag = tags['checkpoint_tag']
# ----------------------------
# Choose the main dataset
ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
dataset = ingest_data.grab_dataset(ds_tag, datatype)
if extern_ds_tag is not None:
extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag)
else:
extern_dpath = None
logger.info('dataset.training_dpath = %r' % (dataset.training_dpath,))
logger.info('Dataset Alias Key: %r' % (dataset.alias_key,))
logger.info(
'Current Dataset Tag: %r'
% (ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),)
)
if requests['ensuredata']:
# Print alias key that maps to this particular dataset
if ut.show_was_requested():
interact_ = dataset.interact() # NOQA
return
logger.info('...exiting')
sys.exit(1)
# ----------------------------
# Choose model architecture
# TODO: data will need to return info about number of labels in viewpoint models
# Specify model archichitecture
ut.colorprint('[netrun] Architecture Specification', 'yellow')
if arch_tag == 'siam2stream':
model = models.SiameseCenterSurroundModel(
data_shape=dataset.data_shape,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag.startswith('siam'):
model = models.SiameseL2(
data_shape=dataset.data_shape,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
elif arch_tag == 'mnist-category':
model = models.MNISTModel(
data_shape=dataset.data_shape,
output_dims=dataset.output_dims,
arch_tag=arch_tag,
training_dpath=dataset.training_dpath,
**hyperparams
)
pass
else:
raise ValueError('Unknown arch_tag=%r' % (arch_tag,))
ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
model.init_arch()
# ----------------------------
# Choose weight initialization
ut.colorprint('[netrun] Setting weights', 'yellow')
if checkpoint_tag == 'new':
ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
model.reinit_weights()
else:
checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern(
checkpoint_tag, extern_dpath
)
ut.colorprint(
'[netrun] * Resolving weights checkpoint_tag=%r' % (checkpoint_tag,),
'lightgray',
)
if extern_dpath is not None:
model.load_extern_weights(dpath=extern_dpath, checkpoint_tag=checkpoint_tag)
elif model.has_saved_state(checkpoint_tag=checkpoint_tag):
model.load_model_state(checkpoint_tag=checkpoint_tag)
else:
model_state_fpath = model.get_model_state_fpath(checkpoint_tag=checkpoint_tag)
logger.info('model_state_fpath = %r' % (model_state_fpath,))
ut.checkpath(model_state_fpath, verbose=True)
logger.info(
'Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints())
)
raise ValueError(
('Unresolved weight init: ' 'checkpoint_tag=%r, extern_ds_tag=%r')
% (
checkpoint_tag,
extern_ds_tag,
)
)
# logger.info('Model State:')
# logger.info(model.get_state_str())
# ----------------------------
if not model.is_train_state_initialized():
ut.colorprint('[netrun] Need to initialize training state', 'yellow')
X_train, y_train = dataset.subset('train')
model.ensure_data_params(X_train, y_train)
# Run Actions
if requests['train']:
ut.colorprint('[netrun] Training Requested', 'yellow')
# parse training arguments
config = ut.argparse_dict(
dict(
era_size=15,
max_epochs=1200,
rate_decay=0.8,
)
)
model.monitor_config.update(**config)
X_train, y_train = dataset.subset('train')
X_valid, y_valid = dataset.subset('valid')
model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)
elif requests['test']:
# assert model.best_results['epoch'] is not None
ut.colorprint('[netrun] Test Requested', 'yellow')
if requests['testall']:
ut.colorprint('[netrun] * Testing on all data', 'lightgray')
X_test, y_test = dataset.subset('all')
flat_metadata = dataset.subset_metadata('all')
else:
ut.colorprint('[netrun] * Testing on test subset', 'lightgray')
X_test, y_test = dataset.subset('test')
flat_metadata = dataset.subset_metadata('test')
data, labels = X_test, y_test
dataname = dataset.alias_key
experiments.test_siamese_performance(model, data, labels, flat_metadata, dataname)
else:
if not ut.get_argflag('--cmd'):
raise ValueError('nothing here. need to train or test')
if requests['publish']:
ut.colorprint('[netrun] Publish Requested', 'yellow')
publish_dpath = ut.truepath('~/Dropbox/IBEIS')
published_model_state = ut.unixjoin(
publish_dpath, model.arch_tag + '_model_state.pkl'
)
ut.copy(model.get_model_state_fpath(), published_model_state)
ut.view_directory(publish_dpath)
logger.info(
'You need to get the dropbox link and '
'register it into the appropriate file'
)
# pip install dropbox
# https://www.dropbox.com/developers/core/start/python
# import dropbox # need oauth
# client.share('/myfile.txt', short_url=False)
# https://wildbookiarepository.azureedge.net/models/siaml2_128_model_state.pkl
if ut.get_argflag('--cmd'):
ut.embed()
def parse_args():
ds_default = None
arch_default = 'siaml2_128'
weights_tag_default = None
# Test values
if False:
ds_default = 'liberty'
weights_tag_default = 'current'
assert ut.inIPython()
# Parse commandline args
ds_tag = ut.get_argval(('--dataset', '--ds'), type_=str, default=ds_default)
datatype = ut.get_argval(('--datatype', '--dt'), type_=str, default='siam-patch')
arch_tag = ut.get_argval(('--arch', '-a'), default=arch_default)
weights_tag = ut.get_argval(
('--weights', '+w'), type_=str, default=weights_tag_default
)
# Incorporate new config stuff?
# NEW = False
# if NEW:
# default_dstag_cfg = {
# 'ds': 'PZ_MTEST',
# 'mode': 'patches',
# 'arch': arch_default
# }
# named_defaults_dict = {
# '': default_dstag_cfg
# }
# ut.parse_argv_cfg('dstag', named_defaults_dict=named_defaults_dict)
hyperparams = ut.argparse_dict(
{
#'batch_size': 128,
'batch_size': 256,
#'learning_rate': .0005,
'learning_rate': 0.1,
'momentum': 0.9,
#'weight_decay': 0.0005,
'weight_decay': 0.0001,
},
alias_dict={
'weight_decay': ['decay'],
'learning_rate': ['learn_rate'],
},
)
requests = ut.argparse_dict(
{
'train': False,
'test': False,
'testall': False,
'publish': False,
'ensuredata': False,
}
)
requests['test'] = requests['test'] or requests['testall']
# breakup weights tag into extern_ds and checkpoint
if weights_tag is not None and ':' in weights_tag:
extern_ds_tag, checkpoint_tag = weights_tag.split(':')
else:
extern_ds_tag = None
checkpoint_tag = weights_tag
# resolve aliases
ds_tag = DS_TAG_ALIAS2.get(ds_tag, ds_tag)
extern_ds_tag = DS_TAG_ALIAS2.get(extern_ds_tag, extern_ds_tag)
checkpoint_tag = CHECKPOINT_TAG_ALIAS.get(checkpoint_tag, checkpoint_tag)
tags = {
'ds_tag': ds_tag,
'extern_ds_tag': extern_ds_tag,
'checkpoint_tag': checkpoint_tag,
'arch_tag': arch_tag,
'datatype': datatype,
}
ut.colorprint('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray')
ut.colorprint('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray')
ut.colorprint('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray')
ut.colorprint('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray')
return requests, hyperparams, tags
def merge_ds_tags(ds_alias_list):
r"""
CommandLine:
python -m wbia_cnn --tf merge_ds_tags --alias-list gz-gray girm pzmtest nnp
TODO:
http://stackoverflow.com/questions/18492273/combining-hdf5-files
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.netrun import * # NOQA
>>> ds_alias_list = ut.get_argval('--alias-list', type_=list, default=[])
>>> result = merge_ds_tags(ds_alias_list)
>>> print(result)
"""
ds_tag_list = [DS_TAG_ALIAS2.get(ds_tag, ds_tag) for ds_tag in ds_alias_list]
dataset_list = [ingest_data.grab_siam_dataset(ds_tag) for ds_tag in ds_tag_list]
merged_dataset = ingest_data.merge_datasets(dataset_list)
logger.info(merged_dataset.alias_key)
return merged_dataset
if | |
"""
Mohr Circle - explains how Mohr's Circle can be used to identify different stresses
"""
###################################
# Imports
###################################
# general imports
from math import pi, sin, cos, atan
import yaml
# bokeh imports
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.layouts import column, row, layout
from bokeh.models import ColumnDataSource, Slider, Arrow, OpenHead, NormalHead, Button
from bokeh.models.markers import Square, Circle
from bokeh.models.glyphs import Wedge, Rect
from bokeh.models.layouts import Spacer
# internal imports
from MC_figure_sources import fig1, fig2, fig3
from MC_helper_functions import (
calculate_radius_and_center,
clear_arrow_source, clear_rect_source,
add_layouts_from_list, add_glyphs_from_list
)
from MC_constants import (
initial_MohrNx, initial_MohrNxz, initial_MohrNz, initial_MohrP_Angle,
initial_Neta, initial_Nzeta, initial_Nzetaeta,
initial_centreX, initial_radius, initial_rleft_z,
c_blue, c_orange, c_green
)
# latex integration
from os.path import dirname, join, split, abspath
import sys, inspect
currentdir = dirname(abspath(inspect.getfile(inspect.currentframe())))
parentdir = join(dirname(currentdir), "shared/")
sys.path.insert(0,parentdir)
from latex_support import LatexDiv, LatexLabel, LatexLabelSet, LatexSlider, LatexLegend
###################################
# DataSources
###################################
std_lang = 'en'
flags = ColumnDataSource(data=dict(show=['off'], lang=[std_lang]))
strings = yaml.safe_load(open('Mohr_Circle/static/strings.json', encoding='utf-8'))
figure_texts = ColumnDataSource(data=dict(text=["\\text{Normal Stresses}\\ \\sigma_x, \\sigma_z",
"\\text{Shear Stresses}\\ \\tau_{xz}",
"\\text{Normal Stresses}\\ \\sigma_{\overline{x}}, \\sigma_{\overline{z}}",
"\\text{Shear Stresses}\\ \\tau_{\overline{xz}}",
"Stress State A",
"Mohr's Circle",
"Stress State B",
]))
### Initial Values
radius = initial_radius
centreX = initial_centreX
Neta = initial_Neta
Nzeta = initial_Nzeta
Nzetaeta = initial_Nzetaeta
rleft_x = centreX-radius
rleft_z = initial_rleft_z
# global variables
global_vars = dict(MohrNx=initial_MohrNx, MohrNz=initial_MohrNz, MohrNxz=initial_MohrNxz,
MohrP_Angle=0, MohrNzeta_zero_angles=[], MohrNeta_zero_angles=[],
alpha=0, MohrChangeShow=-1)
### Initializing variables
f1 = fig1()
f2 = fig2()
f3 = fig3()
###################################
# Callback Functions
###################################
def reset():
Normal_X_slider.disabled = False
Normal_Z_slider.disabled = False
Tangential_XZ_slider.disabled = False
Plane_Angle_slider.disabled = True
draw_button.disabled = False
show_button.disabled = True
global_vars["MohrChangeShow"] = -1
global_vars["alpha"] = 0
global_vars["MohrP_Angle"] = initial_MohrP_Angle
global_vars["MohrNx"] = initial_MohrNx
global_vars["MohrNz"] = initial_MohrNz
global_vars["MohrNxz"] = initial_MohrNxz
radius = initial_radius
centreX = initial_centreX
### Calculations
[radius, centreX, _] = calculate_radius_and_center(global_vars)
Normal_X_slider.value = 0
Normal_Z_slider.value = 0
Tangential_XZ_slider.value = 0
Plane_Angle_slider.value = 0
### Figure 1, Reset values for arrows:
sources_to_reset = [f1.NxP_arrow_source, f1.NxN_arrow_source, f1.NzP_arrow_source, f1.NzN_arrow_source,
f1.Nxz1_arrow_source, f1.Nxz2_arrow_source, f1.Nxz3_arrow_source, f1.Nxz4_arrow_source]
clear_arrow_source(sources_to_reset)
## Figure 1, Reset Rectangles:
sources_to_reset = [f1.NxP_rect_source, f1.NxN_rect_source, f1.NzP_rect_source, f1.NzN_rect_source,
f1.Nxz1_rect_source, f1.Nxz2_rect_source, f1.Nxz3_rect_source, f1.Nxz4_rect_source]
clear_rect_source(sources_to_reset)
### Figure 2, Reset Circle:
f2.reset_circle(centreX, radius, glMohrFigure2_angle_label)
## Figure 3, Reset arrows:
sources_to_reset = [f3.NzetaP_arrow_source, f3.NzetaN_arrow_source, f3.NetaP_arrow_source, f3.NetaN_arrow_source,
f3.Nzetaeta1_arrow_source, f3.Nzetaeta2_arrow_source, f3.Nzetaeta3_arrow_source, f3.Nzetaeta4_arrow_source]
clear_arrow_source(sources_to_reset)
## Figure 3, Reset rectangles:
sources_to_reset = [f3.NzetaP_rect_source, f3.NzetaN_rect_source, f3.NetaP_rect_source, f3.NetaN_rect_source,
f3.Nzetaeta1_rect_source, f3.Nzetaeta2_rect_source, f3.Nzetaeta3_rect_source, f3.Nzetaeta4_rect_source]
clear_rect_source(sources_to_reset)
## Figure 3, Reset rotating plane and axis:
f3.reset_rotating_plane()
def show():
MohrNx = global_vars["MohrNx"]
MohrNz = global_vars["MohrNz"]
MohrNxz = global_vars["MohrNxz"]
MohrChangeShow = global_vars["MohrChangeShow"]
if MohrChangeShow == 1:
[radius, centreX, rleft_x] = calculate_radius_and_center(global_vars)
rright_x = centreX+radius
## Print Labels for principal stress and direction
alpha = 180*atan(MohrNxz/(MohrNz+(-rleft_x+0.00001)))/(pi)
alpha = int(alpha+0.5)
f2.Show_Label_source.data = dict(x=[rleft_x,rright_x,centreX],
y=[0,0,0],
names=['\\sigma_{II}','\\sigma_{I}','\\sigma_{M}'])
f2.Wedge_source.data = dict(x=[rleft_x], y=[0],radius=[radius/2], sA=[atan(MohrNxz/(MohrNz+(-rleft_x+0.00001)))], eA=[0])
glMohrFigure2_angle_label.text = '\\alpha_0=' + str(alpha)
global_vars["MohrChangeShow"] = MohrChangeShow*-1
elif MohrChangeShow == -1:
f2.Wedge_source.data = dict(x=[], y=[],radius=[], sA=[], eA=[])
f2.Show_Label_source.data = dict(x=[], y=[], names =[])
glMohrFigure2_angle_label.text = ''
global_vars["MohrChangeShow"] = MohrChangeShow*-1
def draw():
MohrNx = global_vars["MohrNx"]
MohrNz = global_vars["MohrNz"]
MohrNxz = global_vars["MohrNxz"]
MohrP_Angle = global_vars["MohrP_Angle"]
Normal_X_slider.disabled = True
Normal_Z_slider.disabled = True
Tangential_XZ_slider.disabled = True
Plane_Angle_slider.disabled = False
show_button.disabled = False
draw_button.disabled = True
global_vars["MohrChangeShow"] = 1
## Calculations
[radius, centreX, _] = calculate_radius_and_center(global_vars)
Neta = float(((MohrNx+MohrNz)/2)-(((MohrNx-MohrNz)/2)*cos(2*MohrP_Angle))-MohrNxz*sin(2*MohrP_Angle))
Nzetaeta = float((-(((MohrNx-MohrNz)/2)*sin(2*MohrP_Angle)))+MohrNxz*cos(2*MohrP_Angle))
## Calculate Angle for which Nzeta or Neta will be zero (sign-change-method):
NZeta_List0 = [181]*360
NZeta_List1 = [181]*360
global_vars["MohrNzeta_zero_angles"] = [181]*360
Neta_List0 = [181]*360
Neta_List1 = [181]*360
global_vars["MohrNeta_zero_angles"] = [181]*360
## Nzeta:
for n in range(-180,180):
NZeta_List0[n+180] = float(((MohrNx+MohrNz)/2)+(((MohrNx-MohrNz)/2)*cos(2*-n*pi/180))+MohrNxz*sin(2*-n*pi/180))
NZeta_List1[n+180] = n
count = 0
for m in range(-180,179):
if NZeta_List0[m+180]*NZeta_List0[m+181]<0:
global_vars["MohrNzeta_zero_angles"][count]=NZeta_List1[m+180]
count = count+1
## Neta:
for n in range(-180,180):
Neta_List0[n+180] = float(((MohrNx+MohrNz)/2)-(((MohrNx-MohrNz)/2)*cos(2*-n*pi/180))-MohrNxz*sin(2*-n*pi/180))
Neta_List1[n+180] = n
count = 0
for m in range(-180,179):
if Neta_List0[m+180]*Neta_List0[m+181]<0:
global_vars["MohrNeta_zero_angles"][count]=Neta_List1[m+180]
count = count+1
##Figure 1, Draw MohrNx and keep it until reset() ist called:
if(MohrNx*0.75<0):
f1.NxP_arrow_source.data = dict(xS=[12.5-MohrNx*0.75], xE=[12.5], yS=[0], yE=[0], lW = [2])
f1.NxN_arrow_source.data = dict(xS=[-12.5+MohrNx*0.75], xE=[-12.5], yS=[0], yE=[0], lW = [2])
f1.NxP_rect_source.data = dict(x=[(25-MohrNx*0.75)/2], y=[0], w=[MohrNx*0.75-1.5], h = [13], angle=[0])
f1.NxN_rect_source.data = dict(x=[(-25+MohrNx*0.75)/2], y=[0], w=[MohrNx*0.75-1.5], h = [13], angle=[0])
elif(MohrNx*0.75==0):
clear_arrow_source( [f1.NxP_arrow_source, f1.NxN_arrow_source] )
clear_rect_source ( [f1.NxP_rect_source, f1.NxN_rect_source ] )
else:
f1.NxP_arrow_source.data = dict(xS=[12.5], xE=[12.5+MohrNx*0.75], yS=[0], yE=[0], lW = [2])
f1.NxN_arrow_source.data = dict(xS=[-12.5], xE=[-12.5-MohrNx*0.75], yS=[0], yE=[0], lW = [2])
f1.NxP_rect_source.data = dict(x=[(25+MohrNx*0.75)/2], y=[0], w=[MohrNx*0.75+1.5], h = [13], angle=[0])
f1.NxN_rect_source.data = dict(x=[(-25-MohrNx*0.75)/2], y=[0], w=[MohrNx*0.75+1.5], h = [13], angle=[0])
##Figure 1, Draw MohrNz and keep it until reset() ist called:
new = MohrNz
new = new*0.75
if(new<0):
f1.NzP_arrow_source.data = dict(xS=[0], xE=[0], yS=[12.5-new], yE=[12.5], lW = [2])
f1.NzN_arrow_source.data = dict(xS=[0], xE=[0], yS=[-12.5+new], yE=[-12.5], lW = [2])
f1.NzP_rect_source.data = dict(x=[0], y=[(25-new)/2], w=[13], h = [new-1.5], angle=[0])
f1.NzN_rect_source.data = dict(x=[0], y=[(-25+new)/2], w=[13], h = [new-1.5], angle=[0])
elif (new==0):
clear_arrow_source( [f1.NzP_arrow_source, f1.NzN_arrow_source] )
clear_rect_source ( [f1.NzP_rect_source, f1.NzN_rect_source ] )
else:
f1.NzP_arrow_source.data = dict(xS=[0], xE=[0], yS=[12.5], yE=[12.5+new], lW = [2])
f1.NzN_arrow_source.data = dict(xS=[0], xE=[0], yS=[-12.5], yE=[-12.5-new], lW = [2])
f1.NzP_rect_source.data = dict(x=[0], y=[(25+new)/2], w=[13], h = [new+1.5], angle=[0])
f1.NzN_rect_source.data = dict(x=[0], y=[(-25-new)/2], w=[13], h = [new+1.5], angle=[0])
new = MohrNxz
new=new*0.75
if(new==0):
clear_arrow_source( [f1.Nxz1_arrow_source, f1.Nxz2_arrow_source, f1.Nxz3_arrow_source, f1.Nxz4_arrow_source] )
clear_rect_source ( [f1.Nxz1_rect_source, f1.Nxz2_rect_source, f1.Nxz3_rect_source, f1.Nxz4_rect_source ] )
else:
f1.Nxz1_arrow_source.data = dict(xS=[9], xE=[9], yS=[0-(new/2)], yE=[0+(new/2)], lW = [2])
f1.Nxz2_arrow_source.data = dict(xS=[-9], xE=[-9], yS=[0+(new/2)], yE=[0-(new/2)], lW = [2])
f1.Nxz3_arrow_source.data = dict(xS=[-new/2], xE=[new/2], yS=[9], yE=[9], lW = [2])
f1.Nxz4_arrow_source.data = dict(xS=[(new/2)], xE=[-(new/2)], yS=[-9], yE=[-9], lW = [2])
f1.Nxz1_rect_source.data = dict(x=[9], y=[0], w=[0.3*new+0.5], h=[13], angle=[0])
f1.Nxz2_rect_source.data = dict(x=[-9], y=[0], w=[0.3*new+0.5], h=[13], angle=[0])
f1.Nxz3_rect_source.data = dict(x=[0], y=[9], w=[13], h=[0.3*new+0.5], angle=[0])
f1.Nxz4_rect_source.data = dict(x=[0], y=[-9], w=[13], h=[0.3*new+0.5], angle=[0])
## Figure 2, draw Mohr-Circle:
f2.Mohr_Circle_source.data = dict(x=[centreX], y=[0], radius=[radius])
f2.Wedge_source.data = dict(x=[], y=[],radius=[], sA=[], eA=[])
f2.Newplane_line_source.data = dict(x=[rleft_x,Neta,Neta], y=[rleft_z,Nzetaeta,0])
f2.OriginalPlane_line_source.data = dict(x=[rleft_x,MohrNz,MohrNz], y=[rleft_z,MohrNxz,0])
f2.Show_Label_source.data = dict(x=[], y=[], names =[])
## Figure 3, initializing:
f3.Rotating_Plane_source.data = dict(x=[0], y=[0],angle =[-MohrP_Angle],size = [75])
f3.ChangeRotatingPlane_Forces(global_vars)
f2.ChangeMohrCircle(global_vars)
f3.move_labels(MohrP_Angle)
def NormalForceX_init(attr,old,new):
## Figure 1, Present the Normal Forces while Draw-Button wasn't yet activated:
global_vars["MohrNx"] = new
f1.plot_normal_forces_x(new)
def NormalForceZ_init(attr,old,new):
## Figure 1, Present the Normal Forces while draw() hasn't been called yet:
global_vars["MohrNz"] = new
f1.plot_normal_forces_z(new)
def TangentialXZ_init(attr,old,new):
## Figure 1, Present the Shear Forces while draw() hasn't yet been called:
global_vars["MohrNxz"] = new
f1.plot_shear_forces(global_vars["MohrNxz"])
def changePlaneAngle(attr,old,new):
MohrNx = global_vars["MohrNx"]
MohrNz = global_vars["MohrNz"]
MohrNxz = global_vars["MohrNxz"]
global_vars["alpha"] = new
MohrP_Angle = -new*(pi/180)
## Paint Rotating Plane red if angle=alpha_0
[radius, centreX, rleft_x] = calculate_radius_and_center(global_vars)
alpha_0 = 180*atan(MohrNxz/(MohrNz+(-rleft_x+0.00001)))/(pi)
alpha_0 = int(alpha_0+0.5)
alpharepetitions = [-90, -180, 0, 90, 180]
for n in alpharepetitions:
if new == alpha_0+n:
f3.Rotating_Plane_red_source.data = dict(x=[0], y=[0], angle =[-MohrP_Angle], size = [75])
f3.Rotating_Plane_source.data = dict(x=[], y=[], angle =[], size = [] )
break
else:
f3.Rotating_Plane_source.data = dict(x=[0], y=[0], angle =[-MohrP_Angle], size = [75])
f3.Rotating_Plane_red_source.data = dict(x=[], y=[], angle =[], size = [] )
# Figure 3, Rotate Axis:
MohrP_Angle = -MohrP_Angle
f3.Rotating_Axis_X_source.stream(dict(xS=[0], yS=[0], xE=[25*cos(MohrP_Angle)], yE=[25*sin(MohrP_Angle) ]),rollover=1)
f3.Rotating_Axis_Y_source.stream(dict(xS=[0], yS=[0], xE=[-25*sin(-MohrP_Angle)], yE=[-25*cos(-MohrP_Angle)]),rollover=1)
global_vars["MohrP_Angle"] = -MohrP_Angle # /output
f2.ChangeMohrCircle(global_vars)
f3.move_labels(-MohrP_Angle)
f3.ChangeRotatingPlane_Forces(global_vars)
###################################
# Change language
###################################
def update_figures():
legend1.items = [(figure_texts.data['text'][0], [dummy_normal_1]),
(figure_texts.data['text'][1], [dummy_shear_1 ])]
legend3.items = [(figure_texts.data['text'][2], [dummy_normal_3]),
(figure_texts.data['text'][3], [dummy_shear_3 ])]
figure1.title.text = figure_texts.data['text'][4]
figure2.title.text = figure_texts.data['text'][5]
figure3.title.text = figure_texts.data['text'][6]
def changeLanguage():
[lang] = flags.data["lang"]
if lang == "en":
setDocumentLanguage('de')
elif lang == "de":
setDocumentLanguage('en')
update_figures()
def setDocumentLanguage(lang):
flags.patch( {'lang':[(0,lang)]} )
for s in strings:
if 'checkFlag' in strings[s]:
flag = flags.data[strings[s]['checkFlag']][0]
exec( (s + '=\"' + strings[s][flag][lang] + '\"').encode(encoding='utf-8') )
elif 'isCode' in strings[s] and strings[s]['isCode']:
exec( (s + '=' + strings[s][lang]).encode(encoding='utf-8') )
else:
exec( (s + '=\"' + strings[s][lang] + '\"').encode(encoding='utf-8') )
###################################
# Figures
###################################
### Figure 1, Define Geometry:
NxP_arrow_glyph = Arrow(end=OpenHead(line_color=c_orange,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.NxP_arrow_source,line_color=c_orange)
NxN_arrow_glyph = Arrow(end=OpenHead(line_color=c_orange,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.NxN_arrow_source,line_color=c_orange)
NzP_arrow_glyph = Arrow(end=OpenHead(line_color=c_orange,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.NzP_arrow_source,line_color=c_orange)
NzN_arrow_glyph = Arrow(end=OpenHead(line_color=c_orange,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.NzN_arrow_source,line_color=c_orange)
Nxz1_arrow_glyph = Arrow(end=OpenHead(line_color=c_blue,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.Nxz1_arrow_source,line_color=c_blue)
Nxz2_arrow_glyph = Arrow(end=OpenHead(line_color=c_blue,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.Nxz2_arrow_source,line_color=c_blue)
Nxz3_arrow_glyph = Arrow(end=OpenHead(line_color=c_blue,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.Nxz3_arrow_source,line_color=c_blue)
Nxz4_arrow_glyph = Arrow(end=OpenHead(line_color=c_blue,line_width= 2, size=5),
x_start='xS', y_start='yS', x_end='xE', y_end='yE',line_width= "lW", source=f1.Nxz4_arrow_source,line_color=c_blue)
### Figure 1, Rectangle glyphs:
NNP_rect_glphys = Rect(x="x", y="y", width="w", height="h", angle="angle", fill_color=c_orange, fill_alpha=0.5)
Nxz_rect_glyphs = Rect(x="x", y="y", width="w", height="h", angle="angle", fill_color=c_blue, fill_alpha=0.5)
### Figure 1, Define Figure and add Geometry:
figure1 = figure(title=figure_texts.data['text'][4], tools="save", x_range=(-30,30), y_range=(-30,30),width=400,height=400)
figure1.square([0], [0], size=75, color="black", alpha=0.5)
figure1.add_layout(Arrow(end=NormalHead(fill_color="black", size=15),
x_start=0, y_start=0, x_end=25, y_end=0))
figure1.add_layout(Arrow(end=NormalHead(fill_color="black", size=15),
x_start=0, y_start=0, x_end=0, y_end=-25))
glyphs_to_add = [NxP_arrow_glyph, NxN_arrow_glyph, NzP_arrow_glyph, NzN_arrow_glyph, Nxz1_arrow_glyph, Nxz2_arrow_glyph, Nxz3_arrow_glyph, Nxz4_arrow_glyph]
add_layouts_from_list(figure1, glyphs_to_add)
glyphs_to_add | |
<reponame>tnnfnc/skipkey<gh_stars>0
'''Crypto fachade...'''
import cryptography.hazmat.primitives.keywrap as keywrap
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
import os
import json
import gzip as gzip
import base64
RANDOM_BYTES = 32
def _(x):
"""Translation mask
"""
return x
def init_symbols():
"""Return a tuple of symbols:
! # $ % & ( ) , + * - = . : ; ? @ [ ] ^ _
"""
# '"', no
# "'", no
# '/', no
# '<', no
# '>', no
# '\', no
# "`", maybe
# '~', maybe
return ('!', '#', '$', '%', '&', '(', ')', ',', '+', '*', '-',
'=', '.', ':', ';', '?', '@', '[', ']', '^', '_')
# Digit
def init_numbers():
"""Return a tuple of numbers digit from exadecimal
ranges [0x30-0x39]."""
return ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# Upper
def init_letters():
"""Return a tuple of lowercase and uppercase letters
from exadecimal ranges [0x41-0x5a], [0x61-0x7a]."""
return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')
SYMBOLS = init_symbols()
NUMBERS = init_numbers()
LETTERS = init_letters()
def init_crypto_args(**kwargs):
"""Return a dictionary of default cryptographic parameters.
Existing keys are updated from optional kwargs.
--------
"""
d = {'algorithm': 'AES',
'mode': 'CBC',
'keysize': 256,
'pbkdf': 'PBKDF2HMAC',
'hash': 'SHA256',
'length': int(256/8),
'iterations': 100,
'salt': str(base64.b64encode(os.urandom(RANDOM_BYTES)), encoding='utf-8')}
return {**d, **kwargs}
def cipher_algorithms():
'''Advanced Encryption Standard is a block cipher standardized by NIST;
it is fast and cryptographically strong. Camellia is a block cipher
approved for use by CRYPTREC and ISO/IEC. It is considered to have
comparable security and performance to AES but is not as widely studied
or deployed.
CAST5 (also known as CAST-128) is a block cipher approved for use in
the Canadian government by the Communications Security Establishment.
SEED is a block cipher developed by the Korea Information Security
Agency (KISA).
It is defined in RFC 4269 and is used broadly throughout South Korean
industry.'''
return {'AES': _('Advanced Encryption Standard'),
'Camellia': _('Camellia is a block cipher approved for use by CRYPTREC and ISO/IEC.'),
'CAST5': _('CAST5 (also known as CAST-128) is a block cipher approved for use in the Canadian government.'),
'SEED': _('SEED is a block cipher developed by the Korea Information Security Agency (KISA).')
}
def key_derivators():
return {'PBKDF2HMAC': _('Password Based Key Derivation Function 2, is typically used for deriving a cryptographic key from a password.'),
'HKDF': _('HMAC-based Extract-and-Expand Key Derivation Function, is suitable for deriving keys of a fixed size.'),
}
class PatternException(Exception):
pass
class Schema():
"""[summary]
Raises:
PatternException: [description]
Returns:
[type]: [description]
"""
SEP = ','
def __init__(self, schema):
"""[summary]
Args:
schema ([type]): [description]
Raises:
PatternException: [description]
"""
if schema:
if isinstance(schema, dict):
self.auto = schema.get('auto', '')
self.lenght = schema.get('lenght', '')
self.letters = schema.get('letters', '')
self.numbers = schema.get('numbers', '')
self.symbols = schema.get('symbols', '')
self.gliphs = schema.get('gliphs', '')
elif isinstance(schema, list) or isinstance(schema, tuple):
self.auto = schema[0]
self.lenght = schema[1]
self.letters = schema[2]
self.numbers = schema[3]
self.symbols = schema[4]
self.gliphs = schema[5]
elif isinstance(schema, str):
self._pattern = schema
else:
self._pattern = 'True,0,True,0,0,""'
if not self.check(self._pattern):
raise PatternException(f'Schema not valid: {schema}')
@staticmethod
def check(pattern):
if pattern:
split = pattern.split(Schema.SEP)
try:
if str(split[0]) in ('True', 'False') and\
str(split[2]) in ('True', 'False') and\
int(split[1]) > -1 and int(split[3]) > -1 and\
int(split[4]) > -1 and isinstance(split[5], str):
return True
return False
except Exception:
return False
return False
@property
def auto(self):
return self._pattern.split(self.SEP)[0]
@property
def lenght(self):
return self._pattern.split(self.SEP)[1]
@property
def letters(self):
return self._pattern.split(self.SEP)[2]
@property
def numbers(self):
return self._pattern.split(self.SEP)[3]
@property
def symbols(self):
return self._pattern.split(self.SEP)[4]
@property
def gliphs(self):
return self._pattern.split(self.SEP)[5]
@auto.setter
def auto(self, value: bool):
self._encode(value, 1)
@lenght.setter
def lenght(self, value: int):
self._encode(value, 2)
@letters.setter
def letters(self, value: bool):
self._encode(value, 3)
@numbers.setter
def numbers(self, value: int):
self._encode(value, 4)
@symbols.setter
def symbols(self, value: int):
self._encode(value, 5)
@gliphs.setter
def gliphs(self, value: str):
self._encode(value, 6)
def _encode(self, value, maxsplit):
split = self._pattern.split(self.SEP, maxsplit)
split[maxsplit - 1] = str(value)
self._pattern = self.SEP.join(split)
@property
def schema(self):
return self._pattern
class Pattern():
"""Define the pattern that a token must be compliant to.
"""
def __init__(self, letters, numbers, symbols, length, *args, **kwargs):
self.numbers = self._convert(numbers)
self.symbols = self._convert(symbols)
self.letters = self._convert(letters)
self._length = self._convert(length)
self._gliphs = []
if self.letters > 0:
self._gliphs.extend(LETTERS)
if self.numbers > 0:
self._gliphs.extend(NUMBERS)
if self.symbols > 0:
self._gliphs.extend(SYMBOLS)
if self.numbers + self.letters + self.symbols == 0:
raise ValueError(_('Inconsistent pattern of zeros gliphs'))
def _convert(self, s):
if s == None or s == '' or str(s) == 'False':
s = 0
elif str(s) == 'True':
s = 1
return int(s)
@property
def gliphs(self):
"""Return the list of allowed textual symbols."""
return self._gliphs
@property
def length(self):
"""Lenght"""
return self._length
def check(self, text):
"""Check the text contains at least as many allowed
textual symbols as set."""
lt = s = n = 0
result = False
for g in text:
if g in LETTERS and lt < self.letters:
lt += 1
if g in NUMBERS and n < self.numbers:
n += 1
if g in SYMBOLS and s < self.symbols:
s += 1
if n == self.numbers and s == self.symbols and lt == self.letters:
result = True
break
return result
def token(self, key):
"""Transform a bytes sequence to a token compliant to the pattern."""
if len(key) < 1:
raise PatternException(_('Password length must be at least 2'))
n = int(key.hex(), base=16)
d = len(self.gliphs)
p = []
p.append(self.gliphs[n % d])
while len(p) < self.length:
n = int(n/d) if int(n/d) > 0 else n
if n < d:
p.append(self.gliphs[n])
break
else:
p.append(self.gliphs[n % d])
if len(p) != self.length:
raise PatternException()
# while True:
# p.append(self.gliphs[n % d])
# n = int(n/d)
# if n < d or len(p) == self.length:
# break
return ''.join(p)
class KeyWrapper():
''' Wrap the secret key.
Keep the wrapped secret and the wrapping key separated.'''
def wrap(self, secret):
'''Wrap the key and save the further parameters'''
# _wrapkey = os.urandom(16)
_wrapkey = os.urandom(RANDOM_BYTES)
self._wrappedkey = keywrap.aes_key_wrap_with_padding(
_wrapkey, secret, default_backend())
return _wrapkey
def unwrap(self, wrapkey):
'''Return the secret key'''
secret = keywrap.aes_key_unwrap_with_padding(
wrapkey, self._wrappedkey, default_backend())
return secret
class CipherFachade():
def __init__(self, encoding='utf-8'):
self.encoding = encoding
self.backend = default_backend()
@staticmethod
def parse(obj):
"""Parse a JSON string containing an encrypted content to a dictionary format.
Args:
obj (str): JSON string containing an encrypted content.
Returns:
dict: a dictionary containing an encrypted content.
"""
data = json.loads(obj)
cryptopars = init_crypto_args(**data)
return cryptopars
# return cipherdata(cryptopars, **data)
def _algorithm(self, secret, name='AES'):
if not name:
return algorithms.AES(secret)
name = name.upper()
if name == 'AES':
return algorithms.AES(secret)
if name == 'CAMELLIA':
return algorithms.Camellia(secret)
elif name == 'CAST5':
return algorithms.CAST5(secret)
elif name == 'SEED':
return algorithms.SEED(secret)
else:
return algorithms.AES(secret)
def encrypt(self, obj, cryptod, secret):
"""Encrypt a python object to a dictionary format.
Args:
obj (Object): the content to be encrypted.
cryptod (dict): criptographic arguments.
secret (bytes): secret key.
Raises:
ValueError: generic encryption error.
Returns:
dict: encrypted content dictionary.
"""
# Items to json string
try:
# Compress content
data = gzip.compress(
bytes(json.dumps(obj), encoding=self.encoding))
# Format content in a crypto-dictioray
algorithm = self._algorithm(
secret=secret, name=cryptod['algorithm'])
iv = os.urandom(int(algorithm.block_size/8))
cryptod['iv'] = str(base64.b64encode(iv), encoding=self.encoding)
# Encrypt content
cipher = Cipher(algorithm, modes.CBC(iv), backend=self.backend)
encryptor = cipher.encryptor()
# Pad
padder = padding.PKCS7(cipher.algorithm.block_size).padder()
data = padder.update(data) + padder.finalize()
data = encryptor.update(data) + encryptor.finalize()
# Base64 encoded
data = base64.b64encode(data)
cryptod['mode'] = modes.CBC.name
cryptod['keysize'] = cipher.algorithm.key_size
cryptod['ciphervalue'] = str(data, encoding=self.encoding)
cipher = None
return cryptod
except ValueError as ve:
raise ValueError('Encrypting failure!') from ve
def decrypt(self, cryptod, secret):
"""Decrypt the content from a dictionary format to a JSON string.
Args:
cryptod (dict): encrypted content
secret (bytes array): secret key
Raises:
ValueError: Generic decryption error.
Returns:
str: a JSON string.
"""
try:
# From json to python crypto dict
data = base64.b64decode(
bytes(cryptod['ciphervalue'], encoding=self.encoding))
# Decrypt
iv = base64.b64decode(bytes(cryptod['iv'], encoding=self.encoding))
| |
"""
Ver 1.6
"""
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ParseMode
from telegram.ext import Updater, CommandHandler, ConversationHandler, CallbackQueryHandler
from sendMenu import getMenuURL
from databasefn import Database
from buildMenu import build_menu
import os
import logging
import datetime
import schedule
import time
import threading
# โโโ โโโโโโโ โโโโโโโ โโโโโโโ โโโโโโโ โโโ โโโโโโโ
# โโโ โโโโโโโโโโโโโโโโโ โโโโโโโโ โโโโโโโโ โโโโโโโโโโโ
# โโโ โโโ โโโโโโ โโโโโโโ โโโโโโโโโโโโโ โโโโโโ โโโโ
# โโโ โโโ โโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโ โโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ
# โโโโโโโโ โโโโโโโ โโโโโโโ โโโโโโโ โโโโโโ โโโโโ โโโโโโโ
#
logging.basicConfig(
format='%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
logger = logging.getLogger(__name__)
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
# โโโโโโโโโโโโ โโโโ โโโโโโโ โโโโโโ
# โโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโโโโ
# โโโโโโ โโโโโโโโโโโโโโ โโโ โโโโโโ
# โโโโโโ โโโโโโโโโโโโโโ โโโโโ โโโโโโ
# โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโ โโโ โโโโโโโ โโโโโโ โโโ
#
WHALE = u"\U0001F40B"
THERMOMETER = u"\U0001F321"
FLEXED_BICEPS = u"\U0001F4AA\U0001F3FB"
CAMERA = U"\U0001F4F8"
BUTTON = u"\U0001F518"
ROBOT = u"\U0001F916"
QUEUE = u"\U0001F46B"
EAT = u"\U0001F37D"
BURGER = u"\U0001f354"
HAPPY = u"\U0001F970"
BOO = u"\U0001F92C"
RUN = u"\U0001F3C3\U0001F3FB"
LIGHTNING = u"\U000026A1"
INFO = u"\U00002139"
FIRE = u"\U0001f525"
# โโโโโโโ โโโโโโโ โโโโ โโโโโโ โโโ โโโโโโโ โโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโ โโโโโโโโโ โโโโโโ โโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโ โโโโโโโโ
# โโโ โโโ โโโโโโโโโโโโโโโโโ โโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโ โโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโ โโโโโโโโโ โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโโโโโโโโโ
# โโโโโโโ โโโโโโโ โโโ โโโโโ โโโโโ โโโโโโโ โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโโโโโโโโโ
#
# Set up states in the conversation
(AFTER_START, AFTER_HELP, CONFIRM_ENTRY) = range(3) # CONFIRM_EXIT
# โโโโโโโ โโโโโโ โโโโโโโโโ โโโโโโ โโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโ โโโ โโโ โโโ โโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโ
# โโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโ
#
db = Database()
def purge_db():
logger.info("NOTE: DB HAS BEEN PURGED - DH has closed.")
db.purge()
return
# โโโ โโโโโโโโโโโโโโ โโโโโโโ โโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโ
# โโโ โโโโโโโโโโโโโโ โโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโโโโโโโ โโโ โโโโโโโโ โโโ โโโโโโ โโโโโโ โโโ
# โโโโโโโโโโโโโโ โโโ โโโโโโโ โโโ โโโโโโ โโโโโโ โโโ
# โโโ โโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโ โโโ โโโ
# โโโ โโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโ โโโ
#
HELP_TEXT = """\n<b>DINING HALL CROWD REGULATION</b>
<b>Commands on this bot:</b>
/start : To start or restart the bot
/status : Check the current status of DH only
/foodtoday : Get the menu for DH today
/foodtmr : Get the menu for DH tomorrow
<b>Buttons and what they mean:</b>\n""" + \
BUTTON + "<i>Enter:</i> Click this button only if you are about to enter the dining hall.\n" + \
BUTTON + "<i>Leave:</i> Click this button if you are currently leaving the dining hall.\n" + \
BUTTON + "<i>Dine In:</i> To indicate if you are eating inside the dining hall. Do try to finish your food within 20-25 mins!\n" + \
BUTTON + "<i>Takeaway:</i> To indicate that you are taking away food and not staying to eat inside the dining hall." + \
"\n\n<b>Feedbacks / Wish to contribute?</b>" + \
"\nContacts: @haveaqiupill, @PakornUe, @TeaR_RS, @Cpf05"
DINE_IN_OVERFLOW_MESSAGE = "Number of dine-in user has reached warning threshold (45)"
TAKEAWAY_OVERFLOW_MESSAGE = "Number of takeaway user has reached warning threshold (12)"
DINE_IN_OVERFLOW_RESOLVED_MESSAGE = "Number of dine-in user has dropped below warning threshold (45)"
TAKEAWAY_OVERFLOW_RESOLVED_MESSAGE = "Number of takeaway user has dropped below warning threshold (12)"
def notify_admin(message, context):
context.bot.send_message(text = message, chat_id = os.environ['REPORT_GROUP_ID'], parse_mode = ParseMode.HTML)
# โโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโ โโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
# โโโโโโโโ โโโ โโโโโโโโโโโโโโโโ โโโ
# โโโโโโโโ โโโ โโโโโโโโโโโโโโโโ โโโ
# โโโโโโโโ โโโ โโโ โโโโโโ โโโ โโโ
# โโโโโโโโ โโโ โโโ โโโโโโ โโโ โโโ
#
def start(update, context):
reply_text = "Hello! You are currently being served by the RC4 Dining Hall Regulation Bot. " + ROBOT + "\n\n"
# Get current status from DB
DINE_IN_COUNT, TAKEAWAY_COUNT = db.getCount()
TOTAL_COUNT = int(DINE_IN_COUNT) + int(TAKEAWAY_COUNT)
timeNow = datetime.datetime.now()
STATUS_TEXT = "<b>Current Status of DH:</b>\n"
# check if overload > 50 people in DH
if TOTAL_COUNT >= 50:
STATUS_TEXT += FIRE + " <b>Crowd level is currently HIGH, please wait before coming to the dining hall.</b>\n\n"
STATUS_TEXT += "Total number of people in Dining Hall: <b>{}</b>".format(str(TOTAL_COUNT))
STATUS_TEXT += "\n" + EAT + " Dining In: <b>{}</b>".format(str(DINE_IN_COUNT))
STATUS_TEXT += "\n" + BURGER + " Taking Away: <b>{}</b>".format(str(TAKEAWAY_COUNT))
STATUS_TEXT += "\n<i>Accurate as of: {}</i>".format(timeNow.strftime("%d/%m/%Y %H:%M:%S"))
reply_text += STATUS_TEXT
reply_text += "\n\n**************************************\n"
reply_text += "\nHey there! Thanks for using the bot! Do you wish to dine-in or takeaway?\n\n" \
+ BUTTON + "Press <i>Dine-In</i> to eat inside the dining hall (limit of 25 mins)\n\n" \
+ BUTTON + "Press <i>Takeaway</i> to takeaway food with your own container (limit of 7 mins)\n\n" \
+ BUTTON + "Press <i>Refresh</i> to get the latest crowd level!\n\n" \
+ BUTTON + "Press <i>Help</i> if you need further assistance or to find more information :)" \
takeawayText = BURGER + " Takeaway"
dineInText = EAT + " Dine-In"
helpText = INFO + " Help"
refreshText = LIGHTNING + " Refresh"
button_list = [InlineKeyboardButton(text= takeawayText, callback_data='INTENT_0'),
InlineKeyboardButton(text= dineInText, callback_data='INTENT_1'),
InlineKeyboardButton(text= helpText, callback_data='HELP'),
InlineKeyboardButton(text= refreshText, callback_data='REFRESH')]
menu = build_menu(button_list, n_cols=2, header_buttons=None, footer_buttons=None)
# create a jobqueue
jobq = context.job_queue
# split into 2 modes of entry for this state - can be command or callbackquery data
try: # for command entry
user = update.message.from_user
chatid = update.message.chat_id
# get status of user from POSTGRESQL + if user is already indicated, cannot press /start again
userIn = db.checkUser(str(user.id))
if userIn:
warnText = "<b>You have already indicated earlier.</b> You can't enter the DH twice!\n\nTo leave the dining hall, press the leave button in any previous message (or reminder message) I have sent you!\n\n"
warnText += STATUS_TEXT
context.bot.send_message(text=warnText,
chat_id=user.id,
parse_mode=ParseMode.HTML)
return ConversationHandler.END # end convo if user pressed start but is in DH
else:
# if new start, send a new message
context.bot.send_message(text=reply_text,
chat_id=chatid,
parse_mode=ParseMode.HTML,
reply_markup=InlineKeyboardMarkup(menu))
# job queue for reminders for temp takings; if job has been created, delete it first, then create new one again (following telegram API)
if 'morningReminder' in context.chat_data:
old_job = context.chat_data['morningReminder']
old_job.schedule_removal()
if 'eveningReminder' in context.chat_data:
old_job = context.chat_data['eveningReminder']
old_job.schedule_removal()
morningReminder = jobq.run_daily(callback_reminder, datetime.time(8, 00, 00), context=chatid) # reminder at 8am
context.chat_data['morningReminder'] = morningReminder
eveningReminder = jobq.run_daily(callback_reminder, datetime.time(17, 30, 00), context=chatid) # reminder at 530pm
context.chat_data['eveningReminder'] = eveningReminder
except AttributeError: # for backs and refreshes
query = update.callback_query
user = query.from_user
chatid = query.message.chat_id
# if existing user, edit message
context.bot.editMessageText(text=reply_text, # same reply text
chat_id=chatid,
message_id=query.message.message_id, # to edit the prev message sent by bot
reply_markup=InlineKeyboardMarkup(menu),
parse_mode=ParseMode.HTML)
log_text = "User " + str(user.id) + " has started using bot."
logger.info(log_text)
return AFTER_START
# โโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโ โโโโโโโโโโโ
# โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโ
# โโโโโโโโ โโโ โโโโโโโโ โโโ โโโ โโโโโโโโโโโ
# โโโโโโโโ โโโ โโโโโโโโ โโโ โโโ โโโโโโโโโโโ
# โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโโโโโโโโโโโ
# โโโโโโโโ โโโ โโโ โโโ โโโ โโโโโโโ โโโโโโโโ
#
def status(update, context):
user = update.message.from_user
chatid = update.message.chat_id
# Get current status from DB
DINE_IN_COUNT, TAKEAWAY_COUNT = db.getCount()
TOTAL_COUNT = int(DINE_IN_COUNT) + int(TAKEAWAY_COUNT)
timeNow = datetime.datetime.now()
STATUS_TEXT = "<b>Current Status of DH:</b>\n"
# check if overload > 50 people in DH
if TOTAL_COUNT >= 50:
STATUS_TEXT += FIRE + " <b>Crowd level is currently HIGH, please wait before coming to the dining hall.</b>\n\n"
STATUS_TEXT += "Total number of people in Dining Hall: <b>{}</b>".format(str(TOTAL_COUNT))
STATUS_TEXT += "\n" + EAT + " Dining In: <b>{}</b>".format(str(DINE_IN_COUNT))
STATUS_TEXT += "\n" + BURGER + " Taking Away: <b>{}</b>".format(str(TAKEAWAY_COUNT))
STATUS_TEXT += "\n<i>Accurate as of: {}</i>".format(timeNow.strftime("%d/%m/%Y %H:%M:%S"))
context.bot.send_message(text=STATUS_TEXT,
chat_id=chatid,
parse_mode=ParseMode.HTML)
return
# โโโ โโโโโโโโโโโโโโ โโโโโโโ
# โโโ โโโโโโโโโโโโโโ โโโโโโโโ
# โโโโโโโโโโโโโโ โโโ โโโโโโโโ
# โโโโโโโโโโโโโโ โโโ โโโโโโโ
# โโโ โโโโโโโโโโโโโโโโโโโโโโ
# โโโ โโโโโโโโโโโโโโโโโโโโโโ
#
def send_help(update, context):
query = update.callback_query
user = query.from_user
chatid = query.message.chat_id
log_text = "User " + str(user.id) + " is now seeking help."
logger.info(log_text)
reply_text = HELP_TEXT
button_list = [InlineKeyboardButton(text='Back', callback_data='BACKTOSTART')]
menu = build_menu(button_list, n_cols=1, header_buttons=None, footer_buttons=None)
context.bot.editMessageText(text=reply_text,
chat_id=chatid,
message_id=query.message.message_id,
reply_markup=InlineKeyboardMarkup(menu),
parse_mode=ParseMode.HTML)
return AFTER_HELP
# โโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโ โโโโโโโ โโโโ โโโ
# โโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ
# โโโโโโโโโ โโโ โโโ โโโโโโ โโโโโโ โโโ โโโ โโโโโโ โโโโโโโโโ โโโ
# โโโโโโโโโโโโโ โโโ โโโโโโ โโโโโโโโโโ โโโ โโโโโโ โโโโโโโโโโโโโ
# โโโโโโ โโโโโโ โโโ โโโโโโโโโโโ โโโโโโ โโโ โโโโโโโโโโโโโโโ โโโโโโ
# โโโโโโ โโโโโ โโโ โโโโโโโโโโโ โโโโโ โโโ โโโ โโโโโโโ โโโ โโโโโ
#
def indicate_intention(update, context):
query = update.callback_query
user = query.from_user
chatid = query.message.chat_id
# get status of user from POSTGRESQL + if user is already indicated, cannot press start again
userIn = db.checkUser(str(user.id))
if userIn:
warnText = "<b>You have already indicated earlier.</b> You can't enter the DH twice!\n\nTo check the status of the | |
data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["location_time"] = str(datetime.now())
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_location_time(self):
data = self.minimum_valid_data.copy()
data["location_time"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["location_time"] = "12:Dec:2018:20:45:2331"
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = 1.1234567891
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["longitude"] = 1.12345678911
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = 1.1234567891
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["latitude"] = 1.12345678911
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_address(self):
data = self.minimum_valid_data.copy()
data["address"] = "valid_address"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["address"] = generate_random_string(299)
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["address"] = generate_random_string(300)
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = None
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_address(self):
data = self.minimum_valid_data.copy()
data["address"] = generate_random_string(301)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_status(self):
data = self.minimum_valid_data.copy()
data["status"] = "valid_address"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = generate_random_string(299)
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = generate_random_string(300)
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = None
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_status(self):
data = self.minimum_valid_data.copy()
data["status"] = generate_random_string(301)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_inactive_sms_sent_at(self):
data = self.minimum_valid_data.copy()
data["inactive_sms_sent_at"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["inactive_sms_sent_at"] = str(datetime.now())
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["inactive_sms_sent_at"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_inactive_sms_sent_at(self):
data = self.minimum_valid_data.copy()
data["inactive_sms_sent_at"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["inactive_sms_sent_at"] = "12-Dec-2018"
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["inactive_sms_sent_at"] = "12-12-2018:20:20:330"
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_driver(self):
data = self.minimum_valid_data.copy()
data["driver"] = self.driver.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_driver(self):
data = self.minimum_valid_data.copy()
data["driver"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = -12
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = 0
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = 12345678
data["device_id"] = "mh2003"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = datetime.now()
data["device_id"] = "mh2004"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = 1.32
data["device_id"] = "mh2005"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_number_verified(self):
data = self.minimum_valid_data.copy()
data["number_verified"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["number_verified"] = True
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["number_verified"] = 0
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["number_verified"] = 1
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_number_verified(self):
data = self.minimum_valid_data.copy()
data["number_verified"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["number_verified"] = -12
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["number_verified"] = None
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_is_active(self):
data = self.minimum_valid_data.copy()
data["is_active"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["is_active"] = True
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["is_active"] = 0
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["is_active"] = 1
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_invalid_is_active(self):
data = self.minimum_valid_data.copy()
data["is_active"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["is_active"] = -12
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["is_active"] = None
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Adding latitude field to minimum valid data required
def test_create_mahindra_gps_device_with_latitude(self):
self.minimum_valid_data["latitude"] = "21.9200000763"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding vehicle_status field to minimum valid data required
def test_create_mahindra_gps_device_with_vehicle_status(self):
self.minimum_valid_data["vehicle_status"] = "unloaded"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding is_active field to minimum valid data required
def test_create_mahindra_gps_device_with_is_active(self):
self.minimum_valid_data["is_active"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding driver field to minimum valid data required
def test_create_mahindra_gps_device_with_driver(self):
self.minimum_valid_data["driver"] = self.driver.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_with_full_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_invalid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
data = self.mahindra_gps_device_data.copy()
data["location_time"] = "10-09-2015"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.mahindra_gps_device_data.copy()
data["vehicle_status"] = "InvalidChoice"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.mahindra_gps_device_data.copy()
data["latitude"] = "InvalidLatitude"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = self.mahindra_gps_device_data.copy()
data["driver"] = -1
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = "asdsad"
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver"] = self.driver.id * 100
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_all_4_function_without_token(self):
response = self.client.post(self.create_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(self.update_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(self.partial_update_url, self.mahindra_gps_device_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(self.retrieve_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_all_4_functions_with_invalid_token(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token + "invalidToken")
response = self.client.post(self.create_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(self.update_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(self.partial_update_url, self.mahindra_gps_device_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(self.retrieve_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_mahindra_gps_device_with_invalid_method(self):
data = self.minimum_valid_data.copy()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.patch(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_mahindra_gps_device_with_minimum_valid_date(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.update_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_update_mahindra_gps_device_with_full_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.update_url, self.mahindra_gps_device_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_partial_update_mahindra_gps_device_with_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
data = {"driver_number": "1234567890"}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
data = {"location_time": "2017-01-28T22:22:30.792000"}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
data = {"driver": self.driver.id}
response = self.client.patch(self.partial_update_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_retrieve_existing_mahindra_gps_device(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_retrieve_non_existing_mahindra_gps_device(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
bad_retrieve_url = reverse("driver_mahindra_gps_device_retrieve",
kwargs={"pk": self.mahindra_gps_device.id * 1000})
response = self.client.get(bad_retrieve_url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class MahindraGPSDeviceLogTest(DriverSetup):
"""
Test cases for Mahindra GPS Device Log
"""
def setUp(self):
super().setUp()
self.mahindra_gps_device_log_data = {
"datetime": "2017-12-30T15:30:00.815272",
"vehicle_id": "cg04lx2547",
"latitude": "22.3190652778",
"longitude": "82.7222097222",
"speed": 0,
"fuel_efficiency": "0 KMPL",
"address": None,
"status": "Idle",
"driver_name": "Cg04lx2547",
"driver_number": "7766084087",
"driving_licence_number": "JH05/2008/0002195",
"vehicle_number": "cg04lx2547",
"vehicle_type": "Non Tipper",
"vehicle_status": "unloaded",
"device": self.mahindra_gps_device.id,
}
self.minimum_valid_data = {
"vehicle_id": "358511022368256|CG 07 BG 6237",
"datetime": "2017-05-06T11:36:30"
}
self.create_url = reverse("driver_mahindra_gps_device_log_create")
self.update_url = reverse("driver_mahindra_gps_device_log_update",
kwargs={"pk": self.mahindra_gps_device_log.id})
self.partial_update_url = reverse("driver_mahindra_gps_device_log_partial_update",
kwargs={"pk": self.mahindra_gps_device_log.id})
self.retrieve_url = reverse("driver_mahindra_gps_device_log_retrieve",
kwargs={"pk": self.mahindra_gps_device_log.id})
def test_create_mahindra_gps_device_log_with_minimum_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_other_data_format(self):
pass
def test_create_mahindra_gps_device_log_with_valid_deleted(self):
data = self.minimum_valid_data.copy()
data["deleted"] = True
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = False
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = 0
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted"] = 1
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_log_device_with_invalid_deleted(self):
data = self.minimum_valid_data.copy()
data["deleted"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted"] = -12
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| |
# program for the little one to practice spelling (and Spanish!)
import cv2
import enum
import tensorflow as tf
import pandas as pd
import numpy as np
import os
from PIL import Image as im
from translate import Translator
from threading import Thread
from datetime import datetime
# only play the spanish word if found and only once
found = False
numFound = 0
time_found = datetime.now()
#play welcome message
os.system('mpg123 sounds/welcome.mp3')
#video stream class for multithreading
class vStream:
def __init__(self,src,width,height):
self._running = True
self.width=width
self.height=height
self.capture=cv2.VideoCapture(src)
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
success,self.frame=self.capture.read()
if success:
self.frame2=cv2.resize(self.frame,(self.width,self.height))
def getFrame(self):
return self.frame2
#kill the thread
def kill(self):
self.capture.release()
self._running = False
#play the spanish word if the letter is found
class spanishAudio:
isFound = False
fileName = ""
def __init__(self):
self._running = True
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
if self.isFound:
print("Found1")
cmd = 'mpg123 sounds/' + self.fileName
os.system(cmd)
self.isFound = False
def setFound(self,found, file_path):
print("Found2")
self.isFound=found
self.fileName=file_path
def kill(self):
self._running = False
# enumeration of objects to display on the screen
class Object(enum.Enum):
cat = 1
dog = 2
cow = 3
ball = 4
duck = 5
goat = 6
#increment to the next object
def inc(self):
v = self.value + 1
#if we reached the end, start over
if v > 6:
v = 1
return Object(v)
#return the missing letter and its position
#given that the kiddo is just learning letters, only using the first letter
#set up to have the missing letter be anywhere though
def letterPos(self):
l = 1
if self.value == 1:
#l = 1
val = "C"
if self.value == 2:
#l = 3
val = "D"
if self.value == 3:
#l = 2
val = "C"
if self.value == 4:
#l = 2
val = "B"
if self.value == 5:
#l = 4
val = "D"
if self.value == 6:
#l = 3
val = "G"
return (l,val)
# put cat letters on the screen
def drawCatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put duck letters on the screen
def drawDuckText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "U", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "C", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "K", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put goat letters on the screen
def drawGoatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "G", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (435, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put ball letters on the screen
def drawBallText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "B", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "L", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
#image = cv2.rectangle(image, (430, 175), (545, 305), (255, 0, 0), 3)
cv2.putText(image, "L", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put cow letters on the screen
def drawCowText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "C", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "W", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put dog letters on the screen
def drawDogText(image):
# show the letters and the one to fill in
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "G", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (440, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
#put the letters on the screen depending on which object it is
def addLetters(curObject, image):
if curObject.name == "cat":
image = drawCatText(image)
elif curObject.name == "dog":
image = drawDogText(image)
elif curObject.name == "cow":
image = drawCowText(image)
elif curObject.name == "ball":
image = drawBallText(image)
elif curObject.name == "duck":
image = drawDuckText(image)
elif curObject.name == "goat":
image = drawGoatText(image)
return image
# draw the object picture and letters to the screen
def drawScreen(filename, image, curObject):
game_pic = cv2.imread(filename, 1)
game_pic = cv2.resize(game_pic, (200, 150), interpolation=cv2.INTER_LINEAR)
added_image = cv2.addWeighted(
image[10:160, 200:400, :], 0.1, game_pic[0:150, 0:200, :], 0.9, 0)
# Change the region with the result
image[10:160, 200:400] = added_image
# add the letters for the given object to the screen
image = addLetters(curObject, image)
#draw a border around the letters
image = cv2.rectangle(image, (0, 0), (100, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (0, 325), (640, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (540, 0), (640, 480), (185, 185, 185), -1)
return image
# get the input from the screen where the letter goes
def getLetter(image, location):
get_letter = []
#only doing the first letter, but can eventually have
#missing letter anywhere in the word
get_letter = image[180:298, 130:240]
#if location == 1:
# get_letter = image[180:298, 130:240]
#if location == 2:
# get_letter = image[180:298, 245:335]
#if location == 3:
# get_letter = image[180:298, 345:435]
#if location == 4:
# get_letter = image[180:298, 445:535]
get_letter = cv2.cvtColor(get_letter, cv2.COLOR_RGB2GRAY)
get_letter = cv2.resize(get_letter, (28, 28),
interpolation=cv2.INTER_LINEAR)
# invert the black and white colows
img = cv2.bitwise_not(get_letter)
# turn the background black
# if the pixel value is less than 160, that means it's background,
# so turn | |
# -*- coding: utf-8 -*-
import subprocess
import sys
import json
import tempfile
import requests
import logging
import time
from datetime import datetime
from shapely.geometry import mapping
from pyramid.httpexceptions import HTTPBadRequest
from pyramid_oereb import Config
from pyramid_oereb.lib.renderer.extract.json_ import Renderer as JsonRenderer
from pyramid_oereb.lib.url import parse_url
if sys.version_info.major == 2:
import urlparse
else:
from urllib import parse as urlparse
log = logging.getLogger(__name__)
LEGEND_ELEMENT_SORT_ORDER = [
'AreaShare',
'LengthShare',
'NrOfPoints'
]
class Renderer(JsonRenderer):
def lpra_flatten(self, items):
for item in items:
self._flatten_object(item, 'Lawstatus')
self._localised_text(item, 'Lawstatus_Text')
self._flatten_object(item, 'ResponsibleOffice')
self._multilingual_text(item, 'ResponsibleOffice_Name')
self._multilingual_text_at_web(item)
self._multilingual_m_text(item, 'Text')
self._multilingual_text(item, 'Title')
self._multilingual_text(item, 'OfficialTitle')
self._multilingual_text(item, 'Abbreviation')
def __call__(self, value, system):
"""
Implements a subclass of pyramid_oereb.lib.renderer.extract.json_.Renderer to create a print result
out of a json. The json extract is reformatted to fit the structure of mapfish print.
Args:
value (tuple): A tuple containing the generated extract record and the params
dictionary.
system (dict): The available system properties.
Returns:
buffer: The pdf content as received from configured mapfish print instance url.
"""
log.debug("Parameter webservice is {}".format(value[1]))
if value[1].images:
raise HTTPBadRequest('With image is not allowed in the print')
self._request = self.get_request(system)
# Create a lower case GET dict to be able to accept all cases of upper and lower case writing
self._lowercase_GET_dict = dict((k.lower(), v.lower()) for k, v in self._request.GET.iteritems())
# If a language is specified in the request, use it. Otherwise, use the language from base class
self._fallback_language = Config.get('default_language')
if 'lang' in self._lowercase_GET_dict:
self._language = self._lowercase_GET_dict.get('lang')
# Based on extract record and webservice parameter, render the extract data as JSON
extract_record = value[0]
extract_as_dict = self._render(extract_record, value[1])
feature_geometry = mapping(extract_record.real_estate.limit)
pdf_to_join = set()
self.convert_to_printable_extract(extract_as_dict, feature_geometry, pdf_to_join)
print_config = Config.get('print', {})
extract_as_dict['Display_RealEstate_SubunitOfLandRegister'] = print_config.get(
'display_real_estate_subunit_of_land_register', True
)
extract_as_dict['Display_Certification'] = print_config.get(
'display_certification', True
)
extract_as_dict['MultiPageTOC'] = print_config.get(
'multi_page_TOC', False
)
spec = {
'layout': Config.get('print', {})['template_name'],
'outputFormat': 'pdf',
'lang': self._language,
'attributes': extract_as_dict,
}
response = self.get_response(system)
if self._request.GET.get('getspec', 'no') != 'no':
response.headers['Content-Type'] = 'application/json; charset=UTF-8'
return json.dumps(spec, sort_keys=True, indent=4)
print_result = requests.post(
urlparse.urljoin(Config.get('print', {})['base_url'] + '/', 'buildreport.pdf'),
headers=Config.get('print', {})['headers'],
data=json.dumps(spec)
)
if not extract_as_dict['isReduced'] and print_result.status_code == 200:
main = tempfile.NamedTemporaryFile(suffix='.pdf')
main.write(print_result.content)
main.flush()
cmd = ['pdftk', main.name]
temp_files = [main]
for url in pdf_to_join:
result = requests.get(url)
content_type = result.headers.get('content-type')
log.debug("document url: " + url + " => content_type: " + content_type)
if content_type != 'application/pdf':
msg = "Skipped document inclusion (url: '{}') because content_type: '{}'"
log.warn(msg.format(url, content_type))
continue
tmp_file = tempfile.NamedTemporaryFile(suffix='.pdf')
tmp_file.write(result.content)
tmp_file.flush()
temp_files.append(tmp_file)
cmd.append(tmp_file.name)
out = tempfile.NamedTemporaryFile(suffix='.pdf')
cmd += ['cat', 'output', out.name]
sys.stdout.flush()
time.sleep(0.1)
subprocess.check_call(cmd)
content = out.file.read()
else:
content = print_result.content
response.status_code = print_result.status_code
response.headers = print_result.headers
if 'Transfer-Encoding' in response.headers:
del response.headers['Transfer-Encoding']
if 'Connection' in response.headers:
del response.headers['Connection']
return content
@staticmethod
def get_wms_url_params():
"""
Returns the list of additionally configured wms_url_params.
:return: The configured wms_url_params.
:rtype: list
"""
result = {}
wms_url_params = Config.get('print', {}).get('wms_url_params', False)
if wms_url_params:
log.debug("get_wms_url_params() read configuration {}".format(wms_url_params))
if isinstance(wms_url_params, dict):
result = wms_url_params
else:
log.warning("get_wms_url_params() ignoring unaccepted configuration value {}"
.format(wms_url_params))
else:
log.info("no wms_url_params configuration detected; using default value")
result = {'TRANSPARENT': 'true'}
return result
def convert_to_printable_extract(self, extract_dict, feature_geometry, pdf_to_join):
"""
Converts an oereb extract into a form suitable for printing by mapfish print.
Args:
extract_dict: the oereb extract, will get converted by this function into a form
convenient for mapfish-print
feature_geometry: the geometry for this extract, will get added to the extract information
pdf_to_join: a set of additional information for the pdf. Will get filled by this function.
Used in the full extract only
"""
log.debug("Starting transformation, extract_dict is {}".format(extract_dict))
log.debug("Parameter feature_geometry is {}".format(feature_geometry))
creation_date = datetime.strptime(extract_dict['CreationDate'], '%Y-%m-%dT%H:%M:%S')
extract_dict['Footer'] = ' '.join([
creation_date.strftime('%d.%m.%Y'),
creation_date.strftime('%H:%M:%S'),
extract_dict['ExtractIdentifier']
])
extract_dict['CreationDate'] = creation_date.strftime('%d.%m.%Y')
for attr_name in ['NotConcernedTheme', 'ThemeWithoutData', 'ConcernedTheme']:
for theme in extract_dict[attr_name]:
self._localised_text(theme, 'Text')
self._flatten_object(extract_dict, 'PLRCadastreAuthority')
self._flatten_object(extract_dict, 'RealEstate')
if 'Image' in extract_dict.get('RealEstate_Highlight', {}):
del extract_dict['RealEstate_Highlight']['Image']
main_page_url, main_page_params = \
parse_url(extract_dict['RealEstate_PlanForLandRegisterMainPage']['ReferenceWMS'])
base_url = urlparse.urlunsplit((main_page_url.scheme,
main_page_url.netloc,
main_page_url.path,
None,
None))
wms_url_params = self.get_wms_url_params()
main_page_basemap = {
'type': 'wms',
'styles': 'default',
'opacity': extract_dict['RealEstate_PlanForLandRegisterMainPage'].get('layerOpacity', 0.6),
'baseURL': base_url,
'layers': main_page_params['LAYERS'][0].split(','),
'imageFormat': 'image/png',
'customParams': wms_url_params,
}
extract_dict['baseLayers'] = {'layers': [main_page_basemap]}
url, params = parse_url(extract_dict['RealEstate_PlanForLandRegister']['ReferenceWMS'])
basemap = {
'type': 'wms',
'styles': 'default',
'opacity': extract_dict['RealEstate_PlanForLandRegister'].get('layerOpacity', 0.6),
'baseURL': urlparse.urlunsplit((url.scheme, url.netloc, url.path, None, None)),
'layers': params['LAYERS'][0].split(','),
'imageFormat': 'image/png',
'customParams': wms_url_params,
}
del extract_dict['RealEstate_PlanForLandRegister'] # /definitions/Map
self._multilingual_m_text(extract_dict, 'GeneralInformation')
self._multilingual_m_text(extract_dict, 'BaseData')
self._multilingual_m_text(extract_dict, 'Certification')
self._multilingual_m_text(extract_dict, 'CertificationAtWeb')
for item in extract_dict.get('Glossary', []):
self._multilingual_text(item, 'Title')
self._multilingual_text(item, 'Content')
self._multilingual_text(extract_dict, 'PLRCadastreAuthority_Name')
for restriction_on_landownership in extract_dict.get('RealEstate_RestrictionOnLandownership', []):
self._flatten_object(restriction_on_landownership, 'Lawstatus')
self._flatten_object(restriction_on_landownership, 'Theme')
self._flatten_array_object(restriction_on_landownership, 'Geometry', 'ResponsibleOffice')
self._localised_text(restriction_on_landownership, 'Theme_Text')
self._localised_text(restriction_on_landownership, 'Lawstatus_Text')
self._multilingual_m_text(restriction_on_landownership, 'Information')
self._multilingual_text(restriction_on_landownership['ResponsibleOffice'], 'Name')
restriction_on_landownership['ResponsibleOffice'] = \
[restriction_on_landownership['ResponsibleOffice']]
url, params = parse_url(restriction_on_landownership['Map']['ReferenceWMS'])
restriction_on_landownership['baseLayers'] = {
'layers': [{
'type': 'wms',
'opacity': restriction_on_landownership['Map'].get('layerOpacity', 0.6),
'styles': 'default',
'baseURL': urlparse.urlunsplit((url.scheme, url.netloc, url.path, None, None)),
'layers': params['LAYERS'][0].split(','),
'imageFormat': 'image/png',
'customParams': wms_url_params,
}, basemap]
}
restriction_on_landownership['legend'] = restriction_on_landownership['Map'].get(
'LegendAtWeb', '')
# Legend of other visible restriction objects in the topic map
restriction_on_landownership['OtherLegend'] = restriction_on_landownership['Map'].get(
'OtherLegend', [])
for legend_item in restriction_on_landownership['OtherLegend']:
self._multilingual_text(legend_item, 'LegendText')
for legend_entry in restriction_on_landownership['OtherLegend']:
for element in list(legend_entry.keys()):
if element not in ['LegendText', 'SymbolRef', 'TypeCode']:
del legend_entry[element]
del restriction_on_landownership['Map'] # /definitions/Map
for item in restriction_on_landownership.get('Geometry', []):
self._multilingual_text(item, 'ResponsibleOffice_Name')
legal_provisions = {}
laws = {}
hints = {}
if 'LegalProvisions' in restriction_on_landownership:
finish = False
while not finish:
finish = True
for legal_provision in restriction_on_landownership['LegalProvisions']:
if 'Base64TextAtWeb' in legal_provision:
del legal_provision['Base64TextAtWeb']
if 'Reference' in legal_provision:
for reference in legal_provision['Reference']:
self._categorize_documents(reference, legal_provisions, laws, hints)
del legal_provision['Reference']
finish = False
if 'Article' in legal_provision:
for article in legal_provision['Article']:
self._categorize_documents(article, legal_provisions, laws, hints)
del legal_provision['Article']
finish = False
self._categorize_documents(legal_provision, legal_provisions, laws, hints)
del restriction_on_landownership['LegalProvisions']
restriction_on_landownership['LegalProvisions'] = legal_provisions
restriction_on_landownership['Laws'] = laws
restriction_on_landownership['Hints'] = hints
# One restriction entry per theme
theme_restriction = {}
text_element = [
'Information', 'Lawstatus_Code', 'Lawstatus_Text', 'SymbolRef', 'TypeCode'
]
legend_element = [
'TypeCode', 'TypeCodelist', 'AreaShare', 'PartInPercent', 'LengthShare', 'NrOfPoints',
'SymbolRef', 'Information'
]
split_sub_themes = Config.get('print', {}).get('split_sub_themes', False)
for restriction_on_landownership in extract_dict.get('RealEstate_RestrictionOnLandownership', []):
theme = restriction_on_landownership['Theme_Code']
if split_sub_themes:
if 'SubTheme' in restriction_on_landownership:
theme = theme + '_' + restriction_on_landownership['SubTheme']
restriction_on_landownership['Split_SubTheme'] = True
geom_type = \
'AreaShare' if 'AreaShare' in restriction_on_landownership else \
'LengthShare' if 'LengthShare' in restriction_on_landownership else 'NrOfPoints'
if theme not in theme_restriction:
current = dict(restriction_on_landownership)
current['Geom_Type'] = geom_type
theme_restriction[theme] = current
# Legend
legend = {}
for element in legend_element:
if element in current:
legend[element] = current[element]
del current[element]
legend['Geom_Type'] = geom_type
current['Legend'] = [legend]
# Text
for element in text_element:
if element in current:
current[element] = set([current[element]])
else:
current[element] = set()
continue
current = theme_restriction[theme]
if 'Geom_Type' in current and current['Geom_Type'] != geom_type:
del current['Geom_Type']
# Legend
legend = {}
for element in legend_element:
if element in restriction_on_landownership:
legend[element] = restriction_on_landownership[element]
del restriction_on_landownership[element]
legend['Geom_Type'] = geom_type
current['Legend'].append(legend)
# Remove in OtherLegend elements that are already in the legend
current['OtherLegend'] = [other_legend_element
for other_legend_element in current['OtherLegend']
if other_legend_element['SymbolRef'] != legend['SymbolRef']]
# Number or array
for element in ['Laws', 'LegalProvisions', 'Hints']:
if current.get(element) is not None and restriction_on_landownership.get(element) is not None:
current[element].update(restriction_on_landownership[element])
elif restriction_on_landownership.get(element) is not None:
current[element] = restriction_on_landownership[element]
# add additional ResponsibleOffice to theme if it not already exists there
new_responsible_office = restriction_on_landownership['ResponsibleOffice'][0]
existing_office_names = list(map(lambda o: o['Name'], current['ResponsibleOffice']))
if new_responsible_office['Name'] not in existing_office_names:
current['ResponsibleOffice'].append(new_responsible_office)
# Text
for element in text_element:
if element in restriction_on_landownership:
current[element].add(restriction_on_landownership[element])
for restriction_on_landownership in theme_restriction.values():
for element in text_element:
restriction_on_landownership[element] = '\n'.join(restriction_on_landownership[element])
for element in ['Laws', 'LegalProvisions', 'Hints']:
values = list(restriction_on_landownership[element].values())
self.lpra_flatten(values)
restriction_on_landownership[element] = values
if element == 'LegalProvisions':
# This adds the first URL of TextAtWeb to the pdf_to_join set. At this point of the code
# there should only be one URL as the grouping takes place only after this if statement.
pdf_to_join.update([legal_provision['TextAtWeb'][0]['URL'] for legal_provision in values])
# Group legal provisions and hints which have the same title.
if (
(Config.get('print', {}).get('group_legal_provisions', False)) and
(element == 'LegalProvisions' or element == 'Hints')
):
restriction_on_landownership[element] = \
self.group_legal_provisions(restriction_on_landownership[element])
# sort legal provisioning, hints and laws
restriction_on_landownership['LegalProvisions'] = self.sort_dict_list(
restriction_on_landownership['LegalProvisions'],
self.sort_legal_provision
)
restriction_on_landownership['Laws'] = self.sort_dict_list(
restriction_on_landownership['Laws'],
self.sort_laws
)
restriction_on_landownership['Hints'] = self.sort_dict_list(
restriction_on_landownership['Hints'],
self.sort_hints
)
restrictions = list(theme_restriction.values())
for restriction in restrictions:
legends = {}
for legend in restriction['Legend']:
type_ = legend['TypeCode']
if type_ in legends:
for item in ['AreaShare', 'LengthShare', 'PartInPercent']:
if item in legend:
if item in | |
import wx
import wx.lib.mixins.listctrl as listmix
import wx.lib.intctrl
import sys
import os
import re
import six
import time
import math
import json
import threading
import socket
import atexit
import time
import platform
import webbrowser
from six.moves.queue import Queue, Empty
import CamServer
from roundbutton import RoundButton
from datetime import datetime, timedelta, time
now = datetime.now
import Utils
import CVUtil
from SocketListener import SocketListener
from Database import Database, DBWriter
from ScaledBitmap import ScaledBitmap
from FinishStrip import FinishStripPanel
from ManageDatabase import ManageDatabase
from PhotoDialog import PhotoDialog
from Clock import Clock
from AddPhotoHeader import AddPhotoHeader
from Version import AppVerName
from AddExifToJpeg import AddExifToJpeg
imageWidth, imageHeight = 640, 480
tdCaptureBeforeDefault = timedelta(seconds=0.5)
tdCaptureAfterDefault = timedelta(seconds=2.0)
closeFinishThreshold = 3.0/30.0
closeColors = ('E50000','D1D200','00BF00')
def getCloseFinishBitmaps( size=(16,16) ):
bm = []
dc = wx.MemoryDC()
for c in closeColors:
bitmap = wx.Bitmap( *size )
dc.SelectObject( bitmap )
dc.SetPen( wx.Pen(wx.Colour(0,0,0), 1) )
dc.SetBrush( wx.Brush(wx.Colour(*[int(c[i:i+2],16) for i in six.moves.range(0,6,2)]) ) )
dc.DrawRectangle( 0, 0, size[0]-1, size[1]-1 )
dc.SelectObject( wx.NullBitmap )
bm.append( bitmap )
return bm
def setFont( font, w ):
w.SetFont( font )
return w
def OpenHelp():
webbrowser.open( os.path.join(Utils.getHelpFolder(), 'QuickStart.html'), new=0, autoraise=1 )
from CalendarHeatmap import CalendarHeatmap
class DateSelectDialog( wx.Dialog ):
def __init__( self, parent, triggerDates, id=wx.ID_ANY, ):
wx.Dialog.__init__( self, parent, id, title=_("Date Select") )
sizer = wx.BoxSizer( wx.VERTICAL )
self.dateSelect = None
self.triggerDates = triggerDates
self.chm = CalendarHeatmap( self, dates=self.triggerDates )
self.chm.Bind( wx.EVT_BUTTON, self.onCHMSelect )
self.chm.Bind( wx.EVT_COMMAND_LEFT_DCLICK, self.onCHMChoose )
self.triggerDatesList = wx.ListCtrl( self, style=wx.LC_REPORT|wx.LC_SINGLE_SEL, size=(-1,230) )
self.triggerDatesList.InsertColumn( 0, 'Date' )
self.triggerDatesList.InsertColumn( 1, 'Entries', format=wx.LIST_FORMAT_CENTRE, width=wx.LIST_AUTOSIZE_USEHEADER )
for i, (d, c) in enumerate(triggerDates):
self.triggerDatesList.InsertItem( i, d.strftime('%Y-%m-%d') )
self.triggerDatesList.SetItem( i, 1, six.text_type(c) )
if self.triggerDates:
self.triggerDatesList.Select( 0 )
self.chm.SetDate( self.triggerDates[0][0] )
self.triggerDatesList.Bind( wx.EVT_LIST_ITEM_SELECTED, self.onItemSelect )
self.triggerDatesList.Bind( wx.EVT_LIST_ITEM_ACTIVATED, self.onItemActivate )
btns = self.CreateSeparatedButtonSizer( wx.OK|wx.CANCEL )
self.ok = wx.FindWindowById(wx.ID_OK, self)
self.cancel = wx.FindWindowById(wx.ID_CANCEL, self )
sizer.Add( self.chm, flag=wx.ALL, border=4 )
sizer.Add( self.triggerDatesList, flag=wx.ALL, border=4 )
sizer.Add( btns, flag=wx.EXPAND|wx.ALL, border=4 )
self.SetSizer( sizer )
wx.CallAfter( self.Fit )
def onCHMSelect( self, event ):
dSelect = event.GetDate()
for i, (d, c) in enumerate(self.triggerDates):
if d == dSelect:
self.triggerDatesList.Select( i )
break
def onCHMChoose( self, event ):
self.onCHMSelect( event )
self.dateSelect = event.GetDate()
self.EndModal( wx.ID_OK )
def onItemSelect( self, event ):
self.dateSelect = self.triggerDates[event.GetIndex()][0]
self.chm.SetDate( self.dateSelect )
def onItemActivate( self, event ):
self.onItemSelect( event )
self.EndModal( wx.ID_OK )
def GetDate( self ):
return self.dateSelect
cameraResolutionChoices = (
'320x240',
'640x480',
'800x600',
'1024x768',
'1280x720',
'1280x1024',
'1920x1080',
'1600x1200',
'MAXxMAX',
)
def pixelsFromRes( res ):
return tuple( (int(v) if v.isdigit() else 10000) for v in res.split('x') )
def getCameraResolutionChoice( resolution ):
for i, res in enumerate(cameraResolutionChoices):
if resolution == pixelsFromRes(res):
return i
return len(cameraResolutionChoices) - 1
class ConfigDialog( wx.Dialog ):
def __init__( self, parent, cameraDeviceNum=0, fps=30, cameraResolution=(imageWidth,imageHeight), id=wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, title=_('CrossMgr Video Configuration') )
fps = int( fps )
sizer = wx.BoxSizer( wx.VERTICAL )
self.title = wx.StaticText( self, label='CrossMgr Video Configuration' )
self.title.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.explanation = [
'Check that the USB Webcam is plugged in.',
'Check the Camera Device (Usually 0 but could be 1, 2, etc.).',
]
pfgs = wx.FlexGridSizer( rows=0, cols=2, vgap=4, hgap=8 )
pfgs.Add( wx.StaticText(self, label='Camera Device'+':'), flag=wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_RIGHT )
self.cameraDevice = wx.Choice( self, choices=[six.text_type(i) for i in six.moves.range(8)] )
self.cameraDevice.SetSelection( cameraDeviceNum )
pfgs.Add( self.cameraDevice )
pfgs.Add( wx.StaticText(self, label='Camera Resolution'+':'), flag=wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_RIGHT )
self.cameraResolution = wx.Choice( self, choices=cameraResolutionChoices )
self.cameraResolution.SetSelection( getCameraResolutionChoice(cameraResolution) )
pfgs.Add( self.cameraResolution )
pfgs.Add( wx.StaticText(self, label='Frames per second'+':'), flag=wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_RIGHT )
self.fps = wx.lib.intctrl.IntCtrl( self, value=fps, min=10, max=1000 )
pfgs.Add( self.fps )
pfgs.AddSpacer( 1 )
pfgs.Add( wx.StaticText(self, label='\n'.join([
'Your camera may not support all resolutions.',
'Your Camera/Computer may not support the frame rate in low light.',
'Check that the "Frame Processing Time" does not exceed the "Available Time Per Frame".',
'If so, you will have to choose a lower Frames Per Second".',
])), flag=wx.RIGHT, border=4 )
sizer.Add( self.title, flag=wx.ALL, border=4 )
for i, e in enumerate(self.explanation):
sizer.Add( wx.StaticText( self, label=u'{}. {}'.format(i+1, e) ),
flag=wx.LEFT|wx.RIGHT|(wx.TOP if i == 0 else 0)|(wx.BOTTOM if i == len(self.explanation) else 0), border=4,
)
sizer.AddSpacer( 8 )
sizer.Add( pfgs, flag=wx.ALL, border=4 )
self.okBtn = wx.Button( self, wx.ID_OK )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.helpBtn = wx.Button( self, wx.ID_HELP )
self.Bind( wx.EVT_BUTTON, self.onHelp, self.helpBtn )
hs = wx.BoxSizer( wx.HORIZONTAL )
hs.Add( self.okBtn, border=4, flag=wx.ALL )
self.okBtn.SetDefault()
hs.AddStretchSpacer()
hs.Add( self.helpBtn, border=4, flag=wx.ALL )
hs.Add( self.cancelBtn, border=4, flag=wx.ALL )
sizer.AddSpacer( 8 )
sizer.Add( hs, flag=wx.EXPAND )
self.SetSizerAndFit( sizer )
def GetCameraDeviceNum( self ):
return self.cameraDevice.GetSelection()
def GetCameraResolution( self ):
return pixelsFromRes(cameraResolutionChoices[self.cameraResolution.GetSelection()])
def GetFPS( self ):
return self.fps.GetValue()
def onHelp( self, event ):
OpenHelp()
snapshotEnableColour = wx.Colour(0,0,100)
snapshotDisableColour = wx.Colour(100,100,0)
autoCaptureEnableColour = wx.Colour(100,0,100)
autoCaptureDisableColour = wx.Colour(100,100,0)
captureEnableColour = wx.Colour(0,100,0)
captureDisableColour = wx.Colour(100,0,0)
def CreateCaptureButtons( parent ):
snapshot = RoundButton( parent, label="SNAPSHOT", size=(90,90) )
snapshot.SetBackgroundColour( wx.WHITE )
snapshot.SetForegroundColour( snapshotEnableColour )
snapshot.SetFontToFitLabel( wx.Font(wx.FontInfo(10).Bold()) )
snapshot.SetToolTip( _('Record a Single Frame') )
autoCapture = RoundButton( parent, label="AUTO\nCAPTURE", size=(90,90) )
autoCapture.SetBackgroundColour( wx.WHITE )
autoCapture.SetForegroundColour( autoCaptureEnableColour )
autoCapture.SetFontToFitLabel( wx.Font(wx.FontInfo(10).Bold()) )
autoCapture.SetToolTip( _('Capture Video for an Automatic Interval\nSet in "Config Auto Capture"') )
capture = RoundButton( parent, label="CAPTURE", size=(90,90) )
capture.SetBackgroundColour( wx.WHITE )
capture.SetForegroundColour( captureEnableColour )
capture.SetFontToFitLabel( wx.Font(wx.FontInfo(10).Bold()) )
capture.SetToolTip( _('Capture Video\nwhile the Button is held down') )
return snapshot, autoCapture, capture
class FocusDialog( wx.Dialog ):
def __init__( self, parent, id=wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id,
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX,
title=_('CrossMgr Video Focus')
)
self.bitmapSz = None
sizer = wx.BoxSizer( wx.VERTICAL )
self.SetBackgroundColour( wx.Colour(232,232,232) )
btnSizer = wx.BoxSizer( wx.HORIZONTAL )
self.logo = Utils.GetPngBitmap('CrossMgrHeader.png')
self.title = wx.StaticText(self, label='CrossMgr Video\nFocus Window', style=wx.ALIGN_RIGHT )
self.title.SetFont( wx.Font( (0,28), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
self.explain = wx.StaticText(self, label='Click and Drag to Zoom in Photo')
self.snapshot, self.autoCapture, self.capture = CreateCaptureButtons( self )
btnSizer.Add( wx.StaticBitmap(self, wx.ID_ANY, self.logo) )
btnSizer.Add( self.title, flag=wx.ALL, border=10 )
btnSizer.Add( self.explain, flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL, border=4 )
btnSizer.AddStretchSpacer()
btnSizer.Add( self.snapshot, flag=wx.ALL, border=4 )
btnSizer.Add( self.autoCapture, flag=wx.ALL, border=4 )
btnSizer.Add( self.capture, flag=wx.ALL, border=4 )
sizer.Add( btnSizer, flag=wx.EXPAND )
self.bitmap = ScaledBitmap( self, inset=True )
sizer.Add( self.bitmap, 1, wx.EXPAND )
self.SetSizerAndFit( sizer )
def SetBitmap( self, bitmap ):
sz = bitmap.GetSize()
if sz != self.bitmapSz:
if self.bitmapSz is None:
r = wx.GetClientDisplayRect()
dWidth, dHeight = r.GetWidth(), r.GetHeight()
self.SetSize( (int(dWidth*0.85), int(dHeight*0.85)) )
self.bitmapSz = sz
self.SetTitle( u'{} {}x{}'.format( _('CrossMgr Video Focus'), *sz ) )
return self.bitmap.SetBitmap( bitmap )
class TriggerDialog( wx.Dialog ):
def __init__( self, parent, id=wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, title=_('CrossMgr Video Trigger Editor') )
self.db = None
self.triggerId = None
sizer = wx.BoxSizer( wx.VERTICAL )
gs = wx.FlexGridSizer( 2, 2, 4 )
gs.AddGrowableCol( 1 )
fieldNames = [h.replace('_', ' ').title() for h in Database.triggerEditFields]
self.editFields = []
for f in fieldNames:
gs.Add( wx.StaticText(self, label=f), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT )
e = wx.TextCtrl(self, size=(500,-1) )
gs.Add( e )
self.editFields.append(e)
btnSizer = wx.BoxSizer( wx.HORIZONTAL )
self.ok = wx.Button( self, wx.ID_OK )
self.ok.Bind( wx.EVT_BUTTON, self.onOK )
self.cancel = wx.Button( self, wx.ID_CANCEL )
btnSizer.Add( self.ok, flag=wx.ALL, border=4 )
btnSizer.AddStretchSpacer()
btnSizer.Add( self.cancel, flag=wx.ALL|wx.ALIGN_RIGHT, border=4 )
sizer.Add( gs, flag=wx.ALL, border=4 )
sizer.Add( btnSizer, flag=wx.ALL|wx.EXPAND, border=4 )
self.SetSizerAndFit( sizer )
def set( self, db, triggerId ):
self.db = db
self.triggerId = triggerId
ef = db.getTriggerEditFields( self.triggerId )
ef = ef or ['' for f in Database.triggerEditFields]
for e, v in zip(self.editFields, ef):
e.SetValue( six.text_type(v) )
def get( self ):
values = []
for f, e in zip(Database.triggerEditFields, self.editFields):
v = e.GetValue()
if f == 'bib':
try:
v = int(v)
except:
v = 99999
values.append( v )
return values
def commit( self ):
self.db.setTriggerEditFields( self.triggerId, *self.get() )
def onOK( self, event ):
self.commit()
self.EndModal( wx.ID_OK )
class AutoCaptureDialog( wx.Dialog ):
def __init__( self, parent, id=wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, title=_('CrossMgr Video Auto Capture') )
sizer = wx.BoxSizer( wx.VERTICAL )
gs = wx.FlexGridSizer( 2, 2, 4 )
gs.AddGrowableCol( 1 )
fieldNames = ['Seconds Before', 'Seconds After']
self.editFields = []
for f in fieldNames:
gs.Add( wx.StaticText(self, label=f), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT )
e = wx.TextCtrl(self, size=(60,-1) )
gs.Add( e )
self.editFields.append(e)
btnSizer = wx.BoxSizer( wx.HORIZONTAL )
self.ok = wx.Button( self, wx.ID_OK )
self.cancel = wx.Button( self, wx.ID_CANCEL )
btnSizer.Add( self.ok, flag=wx.ALL, border=4 )
btnSizer.AddStretchSpacer()
btnSizer.Add( self.cancel, flag=wx.ALL|wx.ALIGN_RIGHT, border=4 )
sizer.Add( gs, flag=wx.ALL, border=4 )
sizer.Add( btnSizer, flag=wx.ALL|wx.EXPAND, border=4 )
self.SetSizerAndFit( sizer )
def set( self, s_before, s_after ):
self.editFields[0].SetValue( '{:.2f}'.format(s_before) )
self.editFields[1].SetValue( '{:.2f}'.format(s_after) )
def get( self ):
def fixValue( v ):
try:
return abs(float(v))
except:
return None
return [fixValue(e.GetValue()) for e in self.editFields]
class AutoWidthListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID = wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class MainWin( wx.Frame ):
def __init__( self, parent, id = wx.ID_ANY, title='', size=(1000,800) ):
wx.Frame.__init__(self, parent, id, title, size=size)
self.db = Database()
self.bufferSecs = 10
self.setFPS( 30 )
self.xFinish = None
self.tFrameCount = self.tLaunch = self.tLast = now()
self.frameCount = 0
self.fpt = timedelta(seconds=0)
self.iTriggerSelect = None
self.triggerInfo = None
self.tsMax = None
self.captureTimer = wx.CallLater( 10, self.stopCapture )
self.tdCaptureBefore = tdCaptureBeforeDefault
self.tdCaptureAfter = tdCaptureAfterDefault
dataDir = Utils.getHomeDir()
configFileName = os.path.join(dataDir, 'CrossMgrVideo.cfg')
self.config = wx.Config(appName="CrossMgrVideo",
vendorName="<EMAIL>",
localFilename=configFileName
)
self.requestQ = Queue() # Select photos from photobuf.
self.dbWriterQ = Queue() # Photos waiting to be written
self.messageQ = Queue() # Collection point for all status/failure messages.
ID_MENU_RESETCAMERA = wx.NewIdRef()
ID_MENU_FOCUS = wx.NewIdRef()
ID_MENU_CONFIGAUTOCAPTURE = wx.NewIdRef()
ID_MENU_MANAGEDATABASE = wx.NewIdRef()
self.menuBar = wx.MenuBar(wx.MB_DOCKABLE)
if 'WXMAC' in wx.Platform:
self.appleMenu = self.menuBar.OSXGetAppleMenu()
self.appleMenu.SetTitle("CrossMgrVideo")
self.appleMenu.Insert(0, wx.ID_ABOUT, "&About")
self.Bind(wx.EVT_MENU, self.OnAboutBox, id=wx.ID_ABOUT)
self.editMenu = wx.Menu()
self.editMenu.Append(wx.MenuItem(self.editMenu, ID_MENU_RESETCAMERA,"R&eset Camera"))
self.editMenu.Append(wx.MenuItem(self.editMenu, ID_MENU_FOCUS,"&Focus"))
self.editMenu.Append(wx.MenuItem(self.editMenu, ID_MENU_CONFIGAUTOCAPTURE,"&Configure Autocapture"))
self.editMenu.Append(wx.MenuItem(self.editMenu, ID_MENU_MANAGEDATABASE,"&Manage Database"))
self.Bind(wx.EVT_MENU, self.resetCamera, id=ID_MENU_RESETCAMERA)
self.Bind(wx.EVT_MENU, self.onFocus, id=ID_MENU_FOCUS)
self.Bind(wx.EVT_MENU, self.autoCaptureConfig, id=ID_MENU_CONFIGAUTOCAPTURE)
self.Bind(wx.EVT_MENU, self.manageDatabase, id=ID_MENU_MANAGEDATABASE)
self.menuBar.Append(self.editMenu, "&Edit")
else:
self.fileMenu = wx.Menu()
self.fileMenu.Append(wx.MenuItem(self.fileMenu, ID_MENU_RESETCAMERA,"R&eset Camera"))
self.fileMenu.Append(wx.MenuItem(self.fileMenu, ID_MENU_FOCUS,"&Focus"))
self.fileMenu.Append(wx.MenuItem(self.fileMenu, ID_MENU_CONFIGAUTOCAPTURE,"&Configure Autocapture"))
self.fileMenu.Append(wx.MenuItem(self.fileMenu, ID_MENU_MANAGEDATABASE,"&Manage Database"))
self.fileMenu.Append(wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.resetCamera, id=ID_MENU_RESETCAMERA)
self.Bind(wx.EVT_MENU, self.onFocus, id=ID_MENU_FOCUS)
self.Bind(wx.EVT_MENU, self.autoCaptureConfig, id=ID_MENU_CONFIGAUTOCAPTURE)
self.Bind(wx.EVT_MENU, self.manageDatabase, id=ID_MENU_MANAGEDATABASE)
self.Bind(wx.EVT_MENU, self.onCloseWindow, id=wx.ID_EXIT)
self.menuBar.Append(self.fileMenu, "&File")
self.helpMenu = wx.Menu()
self.helpMenu.Insert(0, wx.ID_ABOUT, "&About")
self.helpMenu.Insert(1, wx.ID_HELP, "&Help")
self.Bind(wx.EVT_MENU, self.OnAboutBox, id=wx.ID_ABOUT)
self.Bind(wx.EVT_MENU, self.onHelp, id=wx.ID_HELP)
self.menuBar.Append(self.helpMenu, "&Help")
self.SetMenuBar(self.menuBar)
self.SetBackgroundColour( wx.Colour(232,232,232) )
self.focusDialog = FocusDialog( self )
self.photoDialog = PhotoDialog( self )
self.autoCaptureDialog = AutoCaptureDialog( self )
self.triggerDialog = TriggerDialog( self )
mainSizer = wx.BoxSizer( wx.VERTICAL )
#------------------------------------------------------------------------------------------------
headerSizer = wx.BoxSizer( wx.HORIZONTAL )
self.logo = Utils.GetPngBitmap('CrossMgrHeader.png')
headerSizer.Add( wx.StaticBitmap(self, wx.ID_ANY, self.logo) )
self.title = wx.StaticText(self, label='CrossMgr Video\nVersion {}'.format(AppVerName.split()[1]), style=wx.ALIGN_RIGHT )
self.title.SetFont( wx.Font( (0,28), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
headerSizer.Add( self.title, flag=wx.ALL, border=10 )
clock = Clock( self, size=(90,90) )
clock.SetBackgroundColour( self.GetBackgroundColour() )
clock.Start()
headerSizer.Add( clock, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, border=4 )
#------------------------------------------------------------------------------
self.cameraDevice = wx.StaticText( self )
self.cameraResolution = wx.StaticText( self )
self.targetFPS = wx.StaticText( self, label='30 fps' )
self.actualFPS = wx.StaticText( self, label='30.0 fps' )
self.frameShape = (0,0,0)
boldFont = self.cameraDevice.GetFont()
boldFont.SetWeight( wx.BOLD )
for w in (self.cameraDevice, self.cameraResolution, self.targetFPS, self.actualFPS):
w.SetFont( boldFont )
fgs = wx.FlexGridSizer( 2, 2, 2 ) # 2 Cols
fgs.Add( | |
<filename>rampwf/hyperopt/hyperopt.py<gh_stars>0
"""Hyperparameter optiomization for ramp-kits."""
from __future__ import print_function
import re
import os
import shutil
import numpy as np
import pandas as pd
from tempfile import mkdtemp
from ..utils import (
assert_read_problem, import_file, run_submission_on_cv_fold)
HYPERPARAMS_SECTION_START = '# RAMP START HYPERPARAMETERS'
HYPERPARAMS_SECTION_END = '# RAMP END HYPERPARAMETERS'
HYPERPARAMS_REPL_REGEX = re.compile('{}.*{}'.format(
HYPERPARAMS_SECTION_START, HYPERPARAMS_SECTION_END), re.S)
class Hyperparameter(object):
"""Discrete grid hyperparameter.
Represented by a list of values, a default value, the name of the
hyperparameter (specified by the user in the workflow element), the
name of the workflow element in which the hyperparemeter appears, and an
optional prior probability vector.
Attributes:
name : string
The name of the hyperparameter variable, used in user interface,
both for specifying the grid of values and getting the report on
an experiment. Initialized to '' then set in set_names, to the
name the user chose for the variable in the workflow element.
workflow_element_name : string
The name of the workflow element in which the hyperparameter is
used. Initialized to '' then set in set_names.
dtype : string
The dtype of the hyperparameter.
default_index: int
The index in values of the current value of the hyperparameter.
values: numpy array of any dtype
The list of hyperparameter values.
prior: numpy array of float
A list of reals that the hyperopt can use as a prior probability
over values. Positivity and summing to one are not checked,
hyperparameter optimizers should do that when using the list
"""
def __init__(self, dtype, default=None, values=None, prior=None):
self.name = ''
self.workflow_element_name = ''
self.dtype = dtype
if default is None and values is None:
raise ValueError('Either default or values must be defined.')
if values is None:
self.values = np.array([default], dtype=self.dtype)
else:
if len(values) < 1:
raise ValueError(
'Values needs to contain at least one element.')
self.values = np.array(values, dtype=self.dtype)
if default is None:
self.default_index = 0
else:
if default not in self.values:
raise ValueError('Default must be among values.')
else:
self.set_default(default)
if prior is None:
self.prior = np.array([1. / self.n_values] * self.n_values)
else:
if len(prior) != len(values):
raise ValueError(
'len(values) == {} != {} == len(prior)'.format(
len(values), len(prior)))
self.prior = prior
@property
def n_values(self):
"""The number of hyperparameter values.
Return:
n_values : int
The number of hyperparameter values len(values)
"""
return len(self.values)
@property
def default(self):
"""The current value of the hyperparameter.
Return:
default : any dtype
The current value of the hyperparameter values[default_index].
"""
return self.values[self.default_index]
@property
def default_repr(self):
"""The string representation of the default value.
It can be used to output the default value into a python file. For
object types it adds '', otherwise it's the string representation of
the default value.
Return:
default_repr : str
The string representation of the default value.
"""
if self.dtype == 'object':
return '\'{}\''.format(self.default)
else:
return str(self.default)
@property
def values_repr(self):
"""The string representation of the list of values.
It can be used to output the list of values into a python file. For
object types it adds '' around the values, otherwise it's the list of
string representations of the values in brackets.
Return:
values_repr : list of str
The string representation of the list of values.
"""
s = '['
for v in self.values:
if self.dtype == 'object':
s += '\'{}\', '.format(v)
else:
s += '{}, '.format(v)
s += ']'
return s
@property
def python_repr(self):
"""The string representation of the hyperparameter.
It can be used to output the hyperparameter definition into a python
file:
<name> = Hyperparameter(
dtype=<dtype>, default=<default>, values=[<values>])
Return:
python_repr : str
The string representation of the hyperparameter.
"""
repr = '{} = Hyperparameter(\n'.format(self.name)
repr += '\tdtype={}'.format(str(self.dtype))
repr += ', default={}'.format(self.default_repr)
repr += ', values={})\n'.format(self.values_repr)
return repr
def set_names(self, name, workflow_element_name):
"""Set the name and workflow element name.
Used when a hyperparameter object is loaded from a workflow element.
Parameters:
name : str
The name of the hyperparameter, declared by the user in the
workflow element.
workflow_element_name : str
The name of the workflow element in which the hyperparameter
is defined.
"""
self.name = name
self.workflow_element_name = workflow_element_name
def get_index(self, value):
"""Get the index of a value.
Parameters:
value : any dtype
The value to look for.
"""
return list(self.values).index(value)
def set_default(self, default):
"""Set the default value.
Parameters:
default : any dtype
The new default value.
"""
self.default_index = self.get_index(default)
def __int__(self):
"""Cast the default value into an integer.
It can be used in the workflow element for an integer hyperparameter.
Return:
int(default) : int
The integer representation of the default value.
"""
return int(self.default)
def __float__(self):
"""Cast the default value into an float.
It can be used in the workflow element for an float hyperparameter.
Return:
float(default) : float
The float representation of the default value.
"""
return float(self.default)
def __str__(self):
"""Cast the default value into a string.
It can be used in the workflow element for a string hyperparameter.
Return:
str(default) : str
The string representation of the default value.
"""
return str(self.default)
def parse_hyperparameters(module_path, workflow_element_name):
"""Parse hyperparameters in a workflow element.
Load the module, take all Hyperparameter objects, and set the name of each
to the name of the hyperparameter the user chose and the workflow element
name of each to workflow_element_name.
Parameters:
module_path : str
The path to the submission directory.
workflow_element_name : string
The name of the workflow element.
Return:
hyperparameters : list of instances of Hyperparameter
"""
hyperparameters = []
workflow_element = import_file(module_path, workflow_element_name)
for object_name in dir(workflow_element):
o = getattr(workflow_element, object_name)
if type(o) == Hyperparameter:
o.set_names(object_name, workflow_element_name)
hyperparameters.append(o)
return hyperparameters
def parse_all_hyperparameters(module_path, workflow):
"""Parse hyperparameters in a submission.
Load all the the modules, take all Hyperparameter objects, and set the name
of each to the name of the hyperparameter the user chose and the workflow
element name of each to the corresponding workflow_element_name.
Parameters:
module_path : str
The path to the submission directory.
workflow_element_name : string
The name of the workflow element.
Return:
hyperparameters : list of instances of Hyperparameter
"""
hyperparameters = []
for wen in workflow.element_names:
hyperparameters += parse_hyperparameters(module_path, wen)
return hyperparameters
def write_hyperparameters(submission_dir, output_submission_dir,
hypers_per_workflow_element):
"""Write hyperparameters in a submission.
Read workflow elements from submission_dir, replace the hyperparameter
section with the hyperparameters in the hypers_per_workflow_element
dictionary (with new hyperparamter values set by, e.g, a hyperopt engine),
then write the new workflow elements into output_submission_dir (which
can be a temporary directory or submission_dir itself when the function
is called to replace the hyperparameters in the input submission with the
best hyperparameters.)
Parameters:
submission_dir : str
The path to the submission directory from which the submission is
read.
output_submission_dir : str
The path to the ouput submission directory into which the
submission with the new hyperparameter values is written.
hypers_per_workflow_element : dictionary
Each key is a workflow element name and each value is a list of
Hyperparameter instances, representing the hyperparemters in
the workflow element.
"""
for wen, hs in hypers_per_workflow_element.items():
hyper_section = '{}\n'.format(HYPERPARAMS_SECTION_START)
for h in hs:
hyper_section += h.python_repr
hyper_section += HYPERPARAMS_SECTION_END
f_name = os.path.join(submission_dir, wen + '.py')
with open(f_name) as f:
content = f.read()
content = HYPERPARAMS_REPL_REGEX.sub(hyper_section, content)
output_f_name = os.path.join(output_submission_dir, wen + '.py')
with open(output_f_name, 'w') as f:
f.write(content)
class RandomEngine(object):
"""Random search hyperopt engine.
Attributes:
hyperparameters: a list of Hyperparameters
"""
def __init__(self, hyperparameters):
self.hyperparameters = hyperparameters
def next_hyperparameter_indices(self, df_scores, n_folds):
"""Return the next hyperparameter indices to try.
Parameters:
df_scores : pandas DataFrame
It represents the results of the experiments that have been
run so far.
Return:
next_value_indices : list of int
The indices in corresponding to the values to try in
hyperparameters.
"""
# First finish incomplete cv's.
hyperparameter_names = [h.name for h in self.hyperparameters]
df_n_folds = df_scores.groupby(hyperparameter_names).count()
incomplete_folds = df_n_folds[(df_n_folds['fold_i'] % n_folds > 0)]
if len(incomplete_folds) > 0:
incomplete_folds = incomplete_folds.reset_index()
next_values = incomplete_folds.iloc[0][
[h.name for h in self.hyperparameters]].values
next_value_indices = [
h.get_index(v) for h, v
in zip(self.hyperparameters, next_values)]
fold_i = incomplete_folds.iloc[0]['fold_i'] % n_folds
# Otherwise select hyperparameter values from those that haven't
# been selected yet, using also prior
else:
fold_i = 0
next_value_indices = []
| |
plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
Examples:
>>> ds.explain()
"""
self._data.explain( extended=extended, mode=mode)
def exceptAll(self, other):
"""
Return a new DataStream containing rows in this DataStream but not in another DataStream while preserving duplicates.
Args:
other: other DataStream object
Returns:
Datastream:
Examples:
>>> ds1.exceptAll(ds2).show()
"""
data = self._data.exceptAll(other=other._data)
return DataStream(data=data, metadata=Metadata())
def fillna(self, value, subset=None):
"""
Replace null values
Args:
value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then subset is ignored and value must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string.
subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if value is a string, and subset contains a non-string column, then the non-string column is simply ignored.
Returns:
Datastream:
Examples:
>>> ds.fill(50).show()
>>> ds.fill({'col1': 50, 'col2': 'unknown'}).show()
"""
data = self._data.fillna(value=value, subset=subset)
return DataStream(data=data, metadata=Metadata())
def repartition(self, numPartitions, *cols):
"""
Returns a new DataStream partitioned by the given partitioning expressions. The resulting DataStream is hash partitioned.
numPartitions can be an int to specify the target number of partitions or a Column. If it is a Column, it will be used as the first partitioning column. If not specified, the default number of partitions is used.
Args:
numPartitions:
*cols:
Returns:
"""
data = self._data.repartition(numPartitions,*cols)
return DataStream(data=data, metadata=Metadata())
def filter(self, condition):
"""
Filters rows using the given condition
Args:
condition: a Column of types.BooleanType or a string of SQL expression.
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.filter("age > 3")
>>> df.filter(df.age > 3)
"""
data = self._data.filter(condition)
return DataStream(data=data, metadata=Metadata())
def foreach(self, f):
"""
Applies the f function to all Row of DataStream. This is a shorthand for df.rdd.foreach()
Args:
f: function
Returns:
DataStream object
Examples:
>>> def f(person):
... print(person.name)
>>> ds.foreach(f)
"""
data = self._data.foreach(f)
return DataStream(data=data, metadata=Metadata())
def first(self):
"""
Returns the first row as a Row.
Returns:
First row of a DataStream
Examples:
>>> ds.first()
"""
return self._data.first()
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in โhttp://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriouโ.
Returns:
DataStream:
Examples:
>>> ds.freqItems("col-name")
"""
data = self._data.freqItems(cols=cols,support=support)
return DataStream(data=data, metadata=Metadata())
def groupby(self, *cols):
"""
Groups the DataFrame using the specified columns, so we can run aggregation on them. This method will return pyspark.sql.GroupedData object.
Args:
list of columns to group by. Each element should be a column name (string) or an expression (Column)
Returns:
"""
data = self._data.groupby(*cols)
return data
def show(self, n=20, truncate=True, vertical=False):
"""
Args:
n: Number of rows to show.
truncate: If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical: If set to ``True``, print output rows vertically (one line
per column value).
Returns:
"""
from pyspark.sql.group import GroupedData
if isinstance(self._data, GroupedData):
raise Exception(
"show is not callable on windowed/grouped data.")
self._data.show(n=n, truncate=truncate, vertical=vertical)
def head(self, n=None):
"""
Returns the first n rows.
Args:
n (int): default 1. Number of rows to return.
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Notes:
This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driverโs memory.
Examples:
>>> ds.head(5)
"""
return self._data.head(n=n)
def intersect(self, other):
"""
Return a new DataFrame containing rows only in both this frame and another frame. This is equivalent to INTERSECT in SQL.
Args:
other (int): DataStream object
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Examples:
>>> ds.intersect(other=ds2)
"""
data = self._data.intersect(other=other._data)
return DataStream(data=data, metadata=Metadata())
def intersectAll(self, other):
"""
Return a new DataFrame containing rows in both this dataframe and other dataframe while preserving duplicates.
Args:
other (int): DataStream object
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Examples:
>>> ds.intersectAll(ds2).show()
"""
data = self._data.intersectAll(other=other._data)
return DataStream(data=data, metadata=Metadata())
def join(self, other, on=None, how=None):
"""
Joins with another DataStream, using the given join expression.
Args:
other (DataStream): Right side of the join
on โ a string for the join column name, a list of column names, a join expression (Column), or a list of Columns. If on is a string or a list of strings indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an equi-join.
how (str) โ str, default inner. Must be one of: inner, cross, outer, full, full_outer, left, left_outer, right, right_outer, left_semi, and left_anti.
Examples:
>>> ds.join(ds2, 'user', 'outer').show()
Returns:
DataStream object with joined streams
"""
data = self._data.join(other=other._data, on=on, how=how)
return DataStream(data=data, metadata=Metadata())
def limit(self, num):
"""
Limits the result count to the number specified.
Args:
num:
Returns:
Datastream:
"""
data = self._data.limit(num=num)
return DataStream(data=data, metadata=Metadata())
def mapInPandas(self, func, schema):
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\\s and return
another iterator of `pandas.DataFrame`\\s. All columns are passed
together as an iterator of `pandas.DataFrame`\\s to the function and the
returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`.
Args:
func: function a Python native function that takes an iterator of `pandas.DataFrame`, and
outputs an iterator of `pandas.DataFrame`.
schema: :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Returns:
Examples:
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
>>> ds.mapInPandas(filter_func, ds.schema).show()
"""
return DataStream(data=self._data.mapInPandas(func=func, schema=schema), metadata=Metadata())
def orderBy(self, *cols):
"""
order by column name
Args:
*cols:
Returns:
Datastream:
"""
data = self._data.orderBy(*cols)
return DataStream(data=data, metadata=Metadata())
def printSchema(self):
"""
Prints out the schema in the tree format.
Examples:
>>> ds.printSchema()
"""
self._data.printSchema()
def randomSplit(self, weights, seed=None):
"""
Randomly splits this :class:`DataFrame` with the provided weights.
Args:
weights: list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed: int, optional
The seed for sampling.
Returns:
Examples:
>>> splits = ds.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
"""
def replace(self, to_replace, value, subset=None):
"""
Returns a new DataStream replacing a value with another value. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with {42: -1, 42.0: 1}) and arbitrary replacement will be used.
Args:
to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then value is ignored or can be omitted, and to_replace must be a mapping between a value and a replacement.
value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If value is a list, value should be of the same length and type as to_replace. | |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 16:35:59 2020
@author: agarwal.270a
"""
#from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
import time
import matplotlib.pyplot as plt
import sys
import os
from tensorflow.keras import layers
#from tensorflow.keras import initializers as initizers
from .utils import find_batch_size, make_data_pipe
#from .networks_GAN import get_GAN_layers_default as get_GAN_layers
from .networks_GAN import get_GAN_layers_conv as get_GAN_layers
tf.keras.backend.set_floatx('float32')
tf_dtype=tf.float32
#%% Default GAN layers
#%%
class downsample(tf.keras.layers.Layer):
def __init__(self,filters, kernel_size, strides,activation=None, apply_batchnorm=True):
super(downsample, self).__init__()
initializer = tf.random_normal_initializer(0., 0.02)
self.layer_list=[layers.Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides,activation=activation,padding='same',
kernel_initializer=initializer)]
if apply_batchnorm:
self.layer_list.append(layers.BatchNormalization())
self.layer_list.append(layers.LeakyReLU())
def call(self,x,training=None):
for lay in self.layer_list:
x=lay(x,training=training)
#print(x.shape.as_list())
return x
class upsample(tf.keras.layers.Layer):
def __init__(self,filters, kernel_size, strides,activation=None,
apply_batchnorm=True, apply_dropout=False):
super(upsample, self).__init__()
initializer = tf.random_normal_initializer(0., 0.02)
self.layer_list=[layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size,
strides=strides,activation=activation,padding='same',
kernel_initializer=initializer)]
if apply_batchnorm:
self.layer_list.append(layers.BatchNormalization())
if apply_dropout:
self.layer_list.append(layers.Dropout(0.25))
self.layer_list.append(layers.ReLU())
def call(self,x,training=None):
for lay in self.layer_list:
x=lay(x,training=training)
#print(x.shape.as_list())
return x
#%%
class Generator(tf.keras.layers.Layer):
def __init__(self,layer_list,optimizer,use_x=True,n_classes=0):
super(Generator, self).__init__()
self.layer_list=layer_list
self.class_layer_list=n_classes*[upsample(filters=1, kernel_size=(1,3),strides=(1,2))]
self.n_classes=None if n_classes<=0 else n_classes
self.use_x=use_x
if optimizer is not None:
self.optimizer=optimizer
else:
self.optimizer=tf.keras.optimizers.Adam(1e-4)
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss(self, fake_logit):
return -tf.reduce_mean(fake_logit)
#return self.bc(tf.ones_like(fake_output), fake_output)
def call(self,input_list,training=None):
z,x=input_list
if self.use_x:
x=tf.concat([z,x],axis=-1)
else:
x=z
for lay in self.layer_list:
x=lay(x,training=training)
#print(x.shape.as_list())
return x
class Discriminator(tf.keras.layers.Layer):
def __init__(self,layer_list,optimizer,use_x=True):
super(Discriminator, self).__init__()
self.layer_list=layer_list
self.use_x=use_x
if optimizer is not None:
self.optimizer=optimizer
else:
self.optimizer=tf.keras.optimizers.Adam(1e-4)
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss(self,real_logit, fake_logit):
real_loss = tf.reduce_mean(real_logit)
fake_loss = tf.reduce_mean(fake_logit)
total_loss = fake_loss - real_loss
#real_loss = self.bc(tf.ones_like(real_logit), real_logit)
#fake_loss = self.bc(tf.zeros_like(fake_logit), fake_logit)
#total_loss = fake_loss + real_loss
return total_loss
def call(self,input_list,training=None, return_all_out=False):
y,x=input_list
y=tf.cast(y, tf.float32)
if self.use_x:
x=tf.concat([y,x],axis=-1)
else:
x=y
out_list=[]
for lay in self.layer_list:
x=lay(x,training=training)
out_list.append(x)
#print(x.shape.as_list())
if return_all_out:
return x, out_list.pop()
else:
return x
#%%
# =============================================================================
# class Generator_pix2pix(Generator):
# def call(self,input_list,training=None):
# z,x=input_list
# if self.use_x:
# x=tf.concat([z,x],axis=-1)
# else:
# x=z
#
# #x=self.layer_list[-3](x,training=training)
# x = tf.expand_dims(x, axis=1) #insert a dummy dim to use conv2d
#
# # Downsampling
# skips=[]
# for i in range(4):
# x=self.layer_list[i](x,training=training)
# skips.append(x)
#
#
# #skips = reversed(skips[:-1])
# skips.pop() #remove last element
# skips.reverse()
#
# # Upsampling
# for i in range(4-1):
# x=self.layer_list[i+4](x,training=training)
# x=tf.concat([x,skips[i]],axis=-1)
# #if i!=0: x=tf.concat([x,skips[i]],axis=-1)
#
# x=self.layer_list[(4-1)+4](x,training=training) #last unet layer
#
# x = tf.squeeze(x, axis=1) #remove the dummy dim
#
# for i in range(-2,0):
# x=self.layer_list[i](x,training=training)
# return x
# =============================================================================
class Generator_pix2pix(Generator):
def __init__(self,layer_list,optimizer,latent_size,z_up_factor=8,
use_x=True,n_classes=0):
super(Generator, self).__init__()
self.layer_list=layer_list
self.class_layer_list=[upsample(filters=1,kernel_size=(1,3),
strides=(1,2)) for _ in range(n_classes)]
self.n_classes=None if n_classes<=0 else n_classes
self.z_up_factor=z_up_factor
self.z_up_layer_list=[upsample(filters=latent_size,kernel_size=(1,3),
strides=(1,2)) for _ in range(int(np.log2(z_up_factor)))]
self.use_x=use_x
if optimizer is not None:
self.optimizer=optimizer
else:
self.optimizer=tf.keras.optimizers.Adam(1e-4)
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
def Unet(self,input_list,training):
x,rnn_state_in=input_list
# Downsampling
skips=[]
for i in range(4):
x=self.layer_list[i](x,training=training)
skips.append(x)
#skips = reversed(skips[:-1])
skips.pop() #remove last element
skips.reverse()
# Upsampling
for i in range(4-1):
x=self.layer_list[i+4](x,training=training)
x=tf.concat([x,skips[i]],axis=-1)
#if i!=0: x=tf.concat([x,skips[i]],axis=-1)
x=self.layer_list[(4-1)+4](x,training=training) #last unet layer
x = tf.squeeze(x, axis=1) #remove the dummy dim
x=self.layer_list[-2](x,training=training,initial_state=rnn_state_in)
self.rnn_state_out=x[:,-1,:]
x=self.layer_list[-1](x,training=training)
return x,self.rnn_state_out
def call(self,input_list,training=None,reps=2,rnn_state=None):
z,x=input_list
#Transform z to create correlated samples
z = tf.expand_dims(z, axis=1) #insert a dummy dim to use conv2d
for lay in self.z_up_layer_list:
z=lay(z,training=training)
z = tf.squeeze(z, axis=1) #remove the dummy dim
#Append condition
if self.use_x:
x=tf.concat([z,x],axis=-1)
else:
x=z
#x=self.layer_list[-3](x,training=training)
x = tf.expand_dims(x, axis=1) #insert a dummy dim to use conv2d
in_shape_Unet=x.get_shape().as_list() #[N,1,T,C]
if self.n_classes is not None:
assert in_shape_Unet[-1]>=self.n_classes,('input condition shape'
'must be >= no. of classes')
in_shape_Unet[0]=reps*1
in_shape_Unet[2]=int(in_shape_Unet[2]/reps)
#in_shape_Unet = (in_shape[1int(in_shape[2]/reps),in_shape[3])
inputs = layers.Reshape(in_shape_Unet)(x) #Check if reshaping in desired fashion
#tf.zeros([tf.shape(inputs)[0],8])
out_list=[]
for i in range(reps):
Unet_out,rnn_state=self.Unet([inputs[:,i,:,:,:],rnn_state],training=training)
out_list.append(Unet_out)
#dec_mem = z[:,-mem_shape[0]:]
out = tf.stack(out_list,axis=1)
out = layers.Reshape((int(in_shape_Unet[2]*reps),-1))(out)#Check if reshaping in desired fashion
return out
class Generator_pix2pix_mod(Generator_pix2pix):
def Unet(self,input_list,training):
x,rnn_state_in=input_list
x,class_signal=x[:,:,:,:-self.n_classes],x[:,0:1,0:1,-self.n_classes:]
# Downsampling
skips=[]
for i in range(4):
x=self.layer_list[i](x,training=training)
skips.append(x)
#skips = reversed(skips[:-1])
skips.pop() #remove last element
skips.reverse()
# Upsampling
for i in range(4-1):
x=self.layer_list[i+4](x,training=training)
x=tf.concat([x,skips[i]],axis=-1)
#if i!=0: x=tf.concat([x,skips[i]],axis=-1)
#last unet layer
#x=self.layer_list[(4-1)+4](x,training=training)
for j in range(len(self.class_layer_list)):
if j==0:
y=(self.class_layer_list[j](x,training=training)*
class_signal[:,:,:,j:j+1])
else:
y+=(self.class_layer_list[j](x,training=training)*
class_signal[:,:,:,j:j+1])
x = tf.squeeze(y, axis=1) #remove the dummy dim
x=self.layer_list[-2](x,training=training,initial_state=rnn_state_in)
self.rnn_state_out=x[:,-1,:]
x=self.layer_list[-1](x,training=training)
return x,self.rnn_state_out
#%%
class Model_CondWGAN(tf.keras.Model):
def __init__(self,model_path,gen_layers=None,disc_layers=None,
save_flag=True,optimizers=None,aux_losses=None,
aux_losses_weights=None,in_shape=None,out_shape=None,
latent_size=10,mode='GAN',use_x=True, T_steps=5, n_classes=1):
'''
Setting all the variables for our model.
'''
super(Model_CondWGAN, self).__init__()
self.model_path=model_path
self.save_flag=save_flag
self.mode=mode
self.latent_size=latent_size
self.use_x=use_x
self.z_up_factor=8
self.n_dims_x=None
self.n_dims_y=None
#self.optimizer=self.net.optimizer
#self.get_data=modify_get_data(get_data_old)
if optimizers is None:
optimizers=[tf.keras.optimizers.Adam(learning_rate=1e-4,beta_1=0.5,
beta_2=0.9,epsilon=1e-7,amsgrad=False),
tf.keras.optimizers.Adam(learning_rate=1e-4,beta_1=0.5,
beta_2=0.9,epsilon=1e-7,amsgrad=False)]
if type(optimizers)!=type([]):
raise AssertionError(('optimizers must be a list of 2 optimizers'
', one each for generator and discrimator'
'respectively.'))
self.optimizers=optimizers
if gen_layers is None:
gen_layers=get_GAN_layers(req_list=['gen_layers'])
self.gen_layers=gen_layers
self.gen=Generator_pix2pix(self.gen_layers,self.optimizers[0],
latent_size,self.z_up_factor,use_x=use_x)#,
#n_classes=n_classes)
#self.gen=Generator(self.gen_layers,self.optimizers[0],use_x=use_x)
if disc_layers is None:
disc_layers=get_GAN_layers(req_list=['disc_layers'])
self.disc_layers=disc_layers
self.disc=Discriminator(self.disc_layers,self.optimizers[1],use_x=use_x)
self.aux_losses=aux_losses
if ((aux_losses is not None) and (aux_losses_weights is None)):
aux_losses_weights=len(aux_losses)*[1]
self.aux_losses_weights=aux_losses_weights
self.feat_loss_weight=5e-3
self.grad_penalty_weight = 10
#'Stateful' Metrics
self.train_loss1 = tf.keras.metrics.Mean(name='train_loss1')
self.train_loss2 = tf.keras.metrics.Mean(name='train_loss2')
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
#self.test_loss1 = tf.keras.metrics.Mean(name='test_loss1')
#self.test_loss2 = tf.keras.metrics.Mean(name='test_loss2')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
#'Stateless' Losses
self.bc=tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.mse = tf.keras.losses.MeanSquaredError()
#self.l1_loss=lambda z: tf.reduce_mean(tf.abs(z))
self.acc= lambda y,y_hat: tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(y,axis=1),tf.argmax(y_hat,axis=1)),tf.float32))
#Checkpoint objects
self.ckpt = tf.train.Checkpoint(step=tf.Variable(0),
optimizer=self.gen.optimizer,
model=self)
self.manager = tf.train.CheckpointManager(self.ckpt,self.model_path
,max_to_keep=100,
keep_checkpoint_every_n_hours=(10/60))
#For fit function initialization
self.fit_init=False
#For switching D and G mechanism
#self.switch_loss = tf.keras.metrics.Mean(name='switch_loss')
self.switch_loss = tf.keras.metrics.Sum(name='switch_loss')
self.D_switch=True #start with Disc training
self.switch_loss_thres=-1. #0.5 #decide aptly
self.val_loss_min=1e7
#self.switch_loss_reduction=0.02
self.last_preserved_timestamp=-10
self.D_G_iter_ratio = 5
#self.trans2dB=lambda x: 20*(rescale_values(x,-30,5,-1,1))*np.log10(2)
self.T_steps = T_steps
return
def find_z_shape(self,x):
x_shape=tf.shape(x)
if type(x.shape)==type((1,)):
self.T_dim=x.shape[-2]
else:
self.T_dim=x.shape.as_list()[-2]
assert (self.T_dim%self.z_up_factor==0), ("z_up_factor"
" must be a factor of input's time dimension")
if self.n_dims_x is None:
self.n_dims_x=x_shape.shape.as_list()[0]
z_shape = [x_shape[i] for i in range(self.n_dims_x)]
#z_shape = [x_shape[i] for i in range(x_shape.shape.as_list()[0])]
z_shape[-2]=int(self.T_dim/self.z_up_factor) #(For correlated z)
z_shape[-1]=self.latent_size*1
return z_shape
def sample_z_like(self,x):
#z_shape=[x_shape[0],x_shape[1],self.latent_size]#TODO: change this base on need
z=tf.random.normal(shape=self.z_shape,mean=0.,stddev=1.,
dtype=tf.dtypes.float32)
return z
def update_D_switch(self):
if self.switch_loss.result()<=self.switch_loss_thres:
#print('\n Switch loss = {} is below threshold. Switching between G<->D\n'.format(self.switch_loss.result()))
#values=([int(self.ckpt.step)]+[m.result() for m in self.metrics_list[:3]]
# +[time.time()-self.start])
#print(self.template.format(*values))
self.D_switch=not(self.D_switch) #flip the switch
#self.train_loss1.reset_states()
#self.train_loss2.reset_states()
# =============================================================================
# if self.D_switch:
# print('Switch loss = {} is below threshold. Training Disc...\n'.format(self.switch_loss.result()))
# else:
# print('Switch loss = {} is below threshold. Training Gen...\n'.format(self.switch_loss.result()))
#
# =============================================================================
self.switch_loss.reset_states() #start new
return
@tf.function
def train_step_recon(self,data):
'''
This is a TensorFlow function, run once for each epoch for the
whole input. We move forward first, then calculate gradients
with Gradient Tape to move backwards.
'''
#noise = tf.random.normal([BATCH_SIZE, noise_dim])
x,y=data
z=self.sample_z_like(x)
generator=self.gen
with tf.GradientTape() as gen_tape:
#print(z.get_shape().as_list(),cond_sig.get_shape().as_list())
sig_hat = generator([z,x], training=True)
recon_loss = self.mse(y,sig_hat)
gradients = gen_tape.gradient(recon_loss, generator.trainable_variables)
generator.optimizer.apply_gradients(zip(gradients, generator.trainable_variables))
self.train_loss1(recon_loss)
return
def train_step(self,data):
x,y=data
if self.D_switch:
#Update Discriminator
gen_loss, disc_loss, cost=self.train_disc(x, y)
self.switch_loss(-1/self.D_G_iter_ratio)
else:
#print(x.get_shape().as_list(),self.var_z.get_shape().as_list())
#Update Generator
gen_loss, disc_loss, cost=self.train_gen(x, y)
self.switch_loss(-1.)
self.train_loss(cost)
self.update_D_switch()
self.train_loss1(gen_loss)
self.train_loss2(-disc_loss)
#print('batch_no. = {}'.format(self.batch_counter))
return
@tf.function
def train_gen(self,x,y):
z=self.sample_z_like(x)
#z = random.normal((self.batch_size, 1, 1, self.z_dim))
#self.train_inverse_program(y,x,n_steps=self.T_steps)#updates self.var_z
with tf.GradientTape() as t:
y_hat = self.gen([z,x], training=True)
fake_logits, fake_out_list = self.disc([y_hat,x], training=False,
return_all_out=True)
gen_loss = self.gen.loss(fake_logits)
#y_hat_opt = self.gen([self.var_z,x], training=True)
#maml_loss=self.mse(y,y_hat_opt)
#cost = gen_loss# + 1e3*maml_loss
cost=[gen_loss]
if self.aux_losses is not None:
aux_loss=[self.aux_losses_weights[i]*
self.aux_losses[i](y, y_hat)
for i in range(len(self.aux_losses))]
cost+=aux_loss
cost = sum(cost)
#No grad tracking needed for disc loss
real_logits, real_out_list = self.disc([y,x], training=False,
return_all_out=True)
feat_loss=sum([self.mse(real_out_list[i],fake_out_list[i])
for i in range(len(fake_out_list))])
cost+=(self.feat_loss_weight*feat_loss)
disc_loss = self.disc.loss(real_logits,fake_logits)
grad = t.gradient(cost, self.gen.trainable_variables)
self.gen.optimizer.apply_gradients(zip(grad, self.gen.trainable_variables))
return gen_loss, disc_loss, cost
@tf.function
def train_disc(self, x, y):
z=self.sample_z_like(x)
#z = random.normal((self.batch_size, 1, 1, self.z_dim))
f_op4gp=lambda y_interp: self.disc([y_interp,x], training=True)
with tf.GradientTape() as t:
y_hat = self.gen([z,x], training=True)
fake_logits = self.disc([y_hat,x], training=True)
real_logits = self.disc([y,x], training=True)
disc_loss = self.disc.loss(real_logits,fake_logits)
gp = self.gradient_penalty(f_op4gp, y, y_hat)
#print(type(disc_loss),type(gp))
cost = disc_loss + self.grad_penalty_weight * gp
#No grad tracking needed for gen loss
gen_loss = self.gen.loss(fake_logits)
grad = t.gradient(cost, self.disc.trainable_variables)
self.disc.optimizer.apply_gradients(zip(grad, self.disc.trainable_variables))
return gen_loss, disc_loss, cost
def gradient_penalty(self, f, real, fake):
real_shape=tf.shape(real)
n_dims= self.n_dims_y*1#real_shape.shape.as_list()[0]
alpha_shape=n_dims*[1]
alpha_shape[0]=real_shape[0]*1
alpha = tf.random.uniform(alpha_shape, 0., 1.,dtype=tf_dtype)
diff = fake - real
inter = real + (alpha * diff)
with tf.GradientTape() as t:
t.watch(inter)
pred = f(inter)
grad = t.gradient(pred, [inter])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(grad), axis=list(range(1,n_dims))))
gp = tf.reduce_mean((slopes - 1.)**2)
#print(gp.shape,gp)
return gp
def test_step_recon(self,data, in_prediction=False):
'''
x is either a condition or just a batch size indicator
'''
x,y=data
z=self.sample_z_like(x)
generator=self.gen
y_hat = generator([z,x], training=False)
if in_prediction:
return y_hat
recon_loss = self.mse(y,y_hat)
self.test_loss(recon_loss)
#self.test_metric(x, predictions)
return
def test_step(self,data, in_prediction=False, rnn_state=None):
'''
x is either a condition or just a batch size indicator
'''
x,y=data
z=self.sample_z_like(x)
y_hat = | |
[subent]
# Inset main menu entries.
self._menu_handler.insert(
MENU_OPTIONS['del'], callback=self._delete_primitive_cb)
self._grasp_menu_entries = []
if self._current_grasp_list:
self._menu_handler.insert(
MENU_OPTIONS['regen'], callback=self._regenerate_grasps_cb)
grasp_choice_entry = self._menu_handler.insert(
MENU_OPTIONS['choice'])
for i in range(len(self._current_grasp_list)):
grasp_ent = self._menu_handler.insert("grasp_" + str(i),
parent=grasp_choice_entry,
callback=self._switch_grasp_cb)
self._grasp_menu_entries += [grasp_ent]
for grasp_ent in self._grasp_menu_entries:
self._menu_handler.setCheckState(grasp_ent,
MenuHandler.UNCHECKED)
if not self._current_grasp_num is None:
self._menu_handler.setCheckState(
self._grasp_menu_entries[self._current_grasp_num],
MenuHandler.CHECKED)
else:
self._menu_handler.insert(
MENU_OPTIONS['gen'], callback=self._generate_grasps_cb)
# Make all unchecked to start.
for subent in self._sub_entries:
self._menu_handler.setCheckState(subent, MenuHandler.UNCHECKED)
# Check if necessary.
menu_id = self._get_menu_id(self._get_menu_ref())
if not menu_id is None:
# self.has_object = False
self._menu_handler.setCheckState(menu_id, MenuHandler.CHECKED)
def _get_menu_id(self, ref_name):
'''Returns the unique menu id from its name or None if the
object is not found.
Args:
ref_name (str)
Returns:
int (?)|None
'''
object_list = self._get_object_list_srv().object_list
refs = [obj.name for obj in object_list]
if self._number > 0:
refs.append(PREVIOUS_PRIMITIVE)
refs.append(BASE_LINK)
if ref_name in refs:
index = refs.index(ref_name)
if index < len(self._sub_entries):
return self._sub_entries[index]
else:
return None
else:
return None
def _get_menu_name(self, menu_id):
'''Returns the menu name from its unique menu id.
Args:
menu_id (int)
Returns:
str
'''
index = self._sub_entries.index(menu_id)
object_list = self._get_object_list_srv().object_list
refs = [obj.name for obj in object_list]
if self._number > 0:
refs.append(PREVIOUS_PRIMITIVE)
refs.append(BASE_LINK)
return refs[index]
def _set_ref(self, new_ref):
'''Changes the reference frame of the primitive to
new_ref_name.
Args:
new_ref_name
'''
# Get the id of the new ref (an int).
self._grasp_state.ref_type = ArmState.OBJECT
self._pre_grasp_state.ref_type = ArmState.OBJECT
new_ref_obj = self._get_object_from_name_srv(new_ref).obj
rospy.loginfo("Setting reference of primitive" +
"{} to object".format(self._number))
self._grasp_state.ref_landmark = new_ref_obj
self._pre_grasp_state.ref_landmark = new_ref_obj
self._grasp_state.ee_pose.header.frame_id = new_ref_obj.name
self._landmark_found = True
def _convert_ref_frame(self, new_landmark):
'''Convert grasp_state and pre_grasp_state to be in a different
reference frame
Args:
new_landmark (Landmark)
Returns:
ArmState
'''
ee_pose = PoseStamped()
if self._grasp_state.ref_type == ArmState.OBJECT:
rospy.loginfo("Relative to object")
if self._grasp_state.ref_landmark.name != new_landmark.name:
ee_pose = self._tf_listener.transformPose(
new_landmark.name,
self._grasp_state.ee_pose
)
self._grasp_state.ref_landmark = new_landmark
self._grasp_state.ee_pose = ee_pose
self._pre_grasp_state.ref_landmark = new_landmark
self._landmark_found = True
elif self._grasp_state.ref_type == ArmState.ROBOT_BASE:
ee_pose = self._tf_listener.transformPose(
BASE_LINK,
self._grasp_state.ee_pose
)
self._grasp_state.ee_pose = ee_pose
self._grasp_state.ref_landmark = Landmark()
self._pre_grasp_state.ref_landmark = Landmark()
self._landmark_found = False
elif self._grasp_state.ref_type == ArmState.PREVIOUS_TARGET:
prev_frame_name = "primitive_" + str(self._number - 1)
rospy.loginfo("Original pose: {}".format(self._grasp_state.ee_pose))
ee_pose = self._tf_listener.transformPose(
prev_frame_name,
self._grasp_state.ee_pose
)
rospy.loginfo("New pose: {}".format(ee_pose))
self._grasp_state.ee_pose = ee_pose
self._grasp_state.ref_landmark = Landmark()
self._pre_grasp_state.ref_landmark = Landmark()
self._landmark_found = False
self._set_pre_grasp_state_from_pose(ee_pose)
def _set_pre_grasp_state_from_pose(self, pose_stamped):
'''Sets pre_grasp_state based on a pose_stamped msg'''
rot_mat = Grasp._get_matrix_from_pose(pose_stamped)
x_axis = Vector3(rot_mat[0, 0], rot_mat[1, 0], rot_mat[2, 0])
self._pre_grasp_state.ee_pose = PoseStamped()
self._pre_grasp_state.ee_pose.header.frame_id = \
pose_stamped.header.frame_id
self._pre_grasp_state.ee_pose.pose.orientation.x = \
pose_stamped.pose.orientation.x
self._pre_grasp_state.ee_pose.pose.orientation.y = \
pose_stamped.pose.orientation.y
self._pre_grasp_state.ee_pose.pose.orientation.z = \
pose_stamped.pose.orientation.z
self._pre_grasp_state.ee_pose.pose.orientation.w = \
pose_stamped.pose.orientation.w
self._pre_grasp_state.ee_pose.pose.position.x = \
pose_stamped.pose.position.x \
- (x_axis.x * self._approach_dist)
self._pre_grasp_state.ee_pose.pose.position.y = \
pose_stamped.pose.position.y \
- (x_axis.y * self._approach_dist)
self._pre_grasp_state.ee_pose.pose.position.z = \
pose_stamped.pose.position.z \
- (x_axis.z * self._approach_dist)
def _get_marker_pose(self):
'''Returns the pose of the marker for the primitive.
Returns:
Pose
'''
rospy.loginfo("Grasp frame is: {}".format(self.get_ref_frame_name()))
try:
if self._current_grasp_num is None:
base_pose = PoseStamped()
base_pose.header.frame_id = self.get_ref_frame_name()
base_pose.pose.orientation.w = 1.0
self._tf_listener.waitForTransform(BASE_LINK,
base_pose.header.frame_id,
rospy.Time.now(),
rospy.Duration(4.0))
intermediate_pose = self._tf_listener.transformPose(
BASE_LINK,
base_pose)
return intermediate_pose
else:
self._tf_listener.waitForTransform(BASE_LINK,
self._grasp_state.ee_pose.header.frame_id,
rospy.Time.now(),
rospy.Duration(4.0))
intermediate_pose = self._tf_listener.transformPose(
BASE_LINK,
self._grasp_state.ee_pose)
offset_pose = Grasp._offset_pose(intermediate_pose)
# return self._tf_listener.transformPose(self.get_ref_frame_name(),
# offset_pose)
return offset_pose
except Exception, e:
rospy.logwarn(e)
rospy.logwarn(
"Frame not available yet: {}".format(self.get_ref_frame_name()))
return None
def _update_viz_core(self, check_reachable=True):
'''Updates visualization after a change.
Args:
check_reachable (bool) : Check reachability of
pose before drawing marker
'''
# Create a new IM control.
menu_control = InteractiveMarkerControl()
menu_control.interaction_mode = InteractiveMarkerControl.BUTTON
menu_control.always_visible = True
frame_id = BASE_LINK
pose = self._get_marker_pose()
if self._current_grasp_num is None:
marker = Marker()
marker.type = Marker.CUBE
marker.action = Marker.ADD
marker.scale = self._grasp_state.ref_landmark.dimensions
marker.pose = Pose()
marker.pose.orientation.w = 1.0
if self._selected:
marker.color = COLOR_MESH_REACHABLE_SELECTED
else:
marker.color = COLOR_MESH_REACHABLE
menu_control.markers.append(marker)
else:
if pose is None:
return
if check_reachable and not self._current_grasp_num is None:
self.is_reachable()
menu_control = self._make_gripper_markers(
menu_control)
# Make and add interactive marker.
int_marker = InteractiveMarker()
int_marker.name = self.get_name()
int_marker.header.frame_id = frame_id
int_marker.pose = pose.pose
int_marker.scale = INT_MARKER_SCALE
#self._add_6dof_marker(int_marker, True)
rospy.loginfo("Marker name: {}".format(self.get_name()))
int_marker.controls.append(menu_control)
prev_marker = self._im_server.get(self.get_name())
prev_color = None
if not prev_marker is None:
if len(prev_marker.controls) > 0:
if len(prev_marker.controls[-1].markers) > 0:
prev_color = prev_marker.controls[-1].markers[-1].color
new_color = None
if len(int_marker.controls) > 0:
if len(int_marker.controls[-1].markers) > 0:
new_color = int_marker.controls[-1].markers[-1].color
if not prev_marker:
self._im_server.insert(
int_marker, self._marker_feedback_cb)
rospy.logwarn("Adding marker for primitive {}".format(self.get_number()))
return True
elif (prev_marker.pose != int_marker.pose) or (prev_color != new_color):
rospy.loginfo("Updating marker")
self._im_server.insert(
int_marker, self._marker_feedback_cb)
return True
rospy.logwarn("Not updating marker for primitive {}".format(self.get_number()))
return False
def _add_6dof_marker(self, int_marker, is_fixed):
'''Adds a 6 DoF control marker to the interactive marker.
Args:
int_marker (InteractiveMarker)
is_fixed (bool): Looks like whether position is fixed (?).
Currently only passed as True.
'''
# Each entry in options is (name, orientation, is_move)
options = [
('rotate_x', Quaternion(1, 0, 0, 1), False),
('move_x', Quaternion(1, 0, 0, 1), True),
('rotate_z', Quaternion(0, 1, 0, 1), False),
('move_z', Quaternion(0, 1, 0, 1), True),
('rotate_y', Quaternion(0, 0, 1, 1), False),
('move_y', Quaternion(0, 0, 1, 1), True),
]
for opt in options:
name, orient, is_move = opt
control = self._make_6dof_control(name, orient, is_move, is_fixed)
int_marker.controls.append(control)
def _make_6dof_control(self, name, orientation, is_move, is_fixed):
'''Creates and returns one component of the 6dof controller.
Args:
name (str): Name for hte control
orientation (Quaternion): How the control should be
oriented.
is_move (bool): Looks like whether the marker moves the
object (?). Currently passed as True for moving markers,
False for rotating ones.
is_fixed (bool): Looks like whether position is fixed (?).
Currently always passed as True.
Returns:
InteractiveMarkerControl
'''
control = InteractiveMarkerControl()
control.name = name
control.orientation = orientation
control.always_visible = False
if self._is_control_visible:
if is_move:
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
else:
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
else:
control.interaction_mode = InteractiveMarkerControl.NONE
if is_fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
return control
def _set_new_pose(self, new_pose, frame_id):
'''Changes the pose of the primitive to new_pose.
Args:
new_pose (Pose)
'''
rospy.loginfo("Setting new pose for grasp primitive")
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = frame_id
pose_stamped.pose = new_pose
pose_stamped_transformed = self._tf_listener.transformPose(
self.get_ref_frame_name(),
pose_stamped)
self._grasp_state.ee_pose = Grasp._offset_pose(
pose_stamped_transformed,
-1)
self._set_pre_grasp_state_from_pose(self._grasp_state.ee_pose)
self.update_viz()
def _make_gripper_markers(self, control):
'''Makes a gripper marker, adds it to control, returns control.
Args:
control (InteractiveMarkerControl): IM Control we're using.
is_hand_open (bool, optional): Whether the gripper is open.
Defaults to False (closed).
Returns:
InteractiveMarkerControl: The passed control.
'''
if self._grasp_reachable:
grasp_mesh_color = self._color_mesh_reachable
else:
grasp_mesh_color = self._color_mesh_unreachable
if self._pre_grasp_reachable:
pre_grasp_mesh_color = self._color_mesh_reachable
else:
pre_grasp_mesh_color = self._color_mesh_unreachable
rospy.loginfo("Mesh color: {}".format(grasp_mesh_color))
# Make grasp marker
# Create mesh 1 (palm).
grasp_mesh1 = Grasp._make_mesh_marker(grasp_mesh_color)
grasp_mesh1.mesh_resource = STR_GRIPPER_PALM_FILE
grasp_mesh1.pose.position.x = Grasp._offset
grasp_mesh1.pose.orientation.w = 1
# Fingers
grasp_mesh2 = Grasp._make_mesh_marker(grasp_mesh_color)
grasp_mesh2.mesh_resource = STR_L_GRIPPER_FINGER_FILE
grasp_mesh2.pose.position.x = 0.08
grasp_mesh2.pose.position.y = -0.116
grasp_mesh2.pose.orientation.w = 1
grasp_mesh3 = Grasp._make_mesh_marker(grasp_mesh_color)
grasp_mesh3.mesh_resource = STR_R_GRIPPER_FINGER_FILE
grasp_mesh3.pose.position.x = 0.08
grasp_mesh3.pose.position.y = 0.116
grasp_mesh3.pose.orientation.w = 1
# make pre-grasp marker
pre_grasp_mesh1 = Grasp._make_mesh_marker(pre_grasp_mesh_color)
pre_grasp_mesh1.mesh_resource = STR_GRIPPER_PALM_FILE
pre_grasp_mesh1.pose.position.x = Grasp._offset - self._approach_dist
pre_grasp_mesh1.pose.orientation.w = 1
pre_grasp_mesh2 = Grasp._make_mesh_marker(pre_grasp_mesh_color)
pre_grasp_mesh2.mesh_resource = STR_L_GRIPPER_FINGER_FILE
pre_grasp_mesh2.pose.position.x = 0.08 - self._approach_dist
pre_grasp_mesh2.pose.position.y = -0.165
pre_grasp_mesh2.pose.orientation.w = 1
pre_grasp_mesh3 = Grasp._make_mesh_marker(pre_grasp_mesh_color)
pre_grasp_mesh3.mesh_resource = STR_R_GRIPPER_FINGER_FILE
pre_grasp_mesh3.pose.position.x = 0.08 - self._approach_dist
pre_grasp_mesh3.pose.position.y = 0.165
pre_grasp_mesh3.pose.orientation.w = 1
# Append all meshes we made.
control.markers.append(grasp_mesh1)
control.markers.append(grasp_mesh2)
control.markers.append(grasp_mesh3)
control.markers.append(pre_grasp_mesh1)
control.markers.append(pre_grasp_mesh2)
control.markers.append(pre_grasp_mesh3)
return control
def _delete_primitive_cb(self, feedback):
'''Callback for when delete is requested.
Args:
feedback (InteractiveMarkerFeedback): Unused
'''
self._marker_delete_cb(self._number)
def _regenerate_grasps_cb(self, feedback):
'''Callback for regenerating grasps upon request
Args:
feedback (InteractiveMarkerFeedback): Unused
'''
self.hide_marker()
resp = self._get_object_from_name_srv(self.get_ref_frame_name())
self._suggest_grasps(resp.obj)
self.update_viz()
self.show_marker()
self._action_change_cb()
self._pose_change_cb()
def _move_to_cb(self, feedback):
'''Callback for when moving to a pose is requested.
Args:
feedback (InteractiveMarkerFeedback): Unused
'''
# for now, "move to" this primitive will just mean execute
self.execute()
def _change_ref_cb(self, feedback):
'''Callback for when a reference frame change is requested.
Args:
feedback (InteractiveMarkerFeedback (?))
'''
self._menu_handler.setCheckState(
self._get_menu_id(self._get_menu_ref()), MenuHandler.UNCHECKED)
self._menu_handler.setCheckState(
feedback.menu_entry_id, MenuHandler.CHECKED)
new_ref = self._get_menu_name(feedback.menu_entry_id)
self._set_ref(new_ref)
rospy.loginfo(
'Switching reference frame to ' + new_ref + ' for primitive ' +
self.get_name())
self._menu_handler.reApply(self._im_server)
self._im_server.applyChanges()
self.update_viz(False)
self._action_change_cb()
def _marker_feedback_cb(self, feedback):
'''Callback for when an event occurs on the marker.
Args:
feedback (InteractiveMarkerFeedback)
'''
if feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:
# Set the visibility of the 6DOF controller.
# This happens a ton, and doesn't need to be logged like
# normal events (e.g. clicking on most marker controls
# fires here).
rospy.logdebug("Changing | |
'chat_id': chat_id,
'user_id': user_id,
}
data = await self._get('kickChatMember', args)
return data
async def leave_chat(self, chat_id: Union[int, str]) -> Awaitable[bool]:
args = {
'chat_id': chat_id,
}
data = await self._get('leaveChat', args)
return data
async def unban_chat_member(self, chat_id: Union[int, str],
user_id: int) -> Awaitable[bool]:
args = {
'chat_id': chat_id,
'user_id': user_id,
}
data = await self._get('unbanChatMember', args)
return data
async def get_chat(self, chat_id: Union[int, str]) -> Awaitable[types.Chat]:
args = {
'chat_id': chat_id,
}
data = await self._get('getChat', args)
return types.Chat(data)
async def get_chat_administrators(self, chat_id: Union[int, str]
) -> Awaitable[List[types.ChatMember]]:
args = {
'chat_id': chat_id,
}
data = await self._get('getChatAdministrators', args)
return [types.ChatMember(_) for _ in data]
async def get_chat_members_count(self, chat_id: Union[int, str]) -> Awaitable[int]:
args = {
'chat_id': chat_id,
}
data = await self._get('getChatMembersCount', args)
return data
async def get_chat_member(self, chat_id: Union[int, str],
user_id: int) -> Awaitable[types.ChatMember]:
args = {
'chat_id': chat_id,
'user_id': user_id,
}
data = await self._get('getChatMember', args)
return types.ChatMember(data)
async def answer_callback_query(self, callback_query_id: str,
text: str = None, show_alert: bool = None,
url: str = None, cache_time: int = None
) -> Awaitable[bool]:
args = {
'callback_query_id': callback_query_id,
}
if text is not None:
args['text'] = text
if show_alert is not None:
args['show_alert'] = show_alert
if url is not None:
args['url'] = url
if cache_time is not None:
args['cache_time'] = cache_time
data = await self._get('answerCallbackQuery', args)
return data
async def edit_message_text(self, text: str,
chat_id: Union[int, str] = None,
message_id: int = None,
inline_message_id: str = None,
parse_mode: str = None,
disable_web_page_preview: bool = None,
reply_markup: types.InlineKeyboardMarkup = None
) -> Awaitable[Union[types.Message, bool]]:
args = {
'text': text,
}
if chat_id is not None:
args['chat_id'] = chat_id
if message_id is not None:
args['message_id'] = message_id
if inline_message_id is not None:
args['inline_message_id'] = inline_message_id
if parse_mode is not None:
args['parse_mode'] = parse_mode
if disable_web_page_preview is not None:
args['disable_web_page_preview'] = disable_web_page_preview
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('editMessageText', args)
if isinstance(data, bool):
return data
return types.Message(data)
async def edit_message_caption(self, chat_id: Union[int, str] = None,
message_id: int = None,
inline_message_id: str = None,
caption: str = None,
reply_markup:
types.InlineKeyboardMarkup = None
) -> Awaitable[Union[types.Message, bool]]:
args = {}
if chat_id is not None:
args['chat_id'] = chat_id
if message_id is not None:
args['message_id'] = message_id
if inline_message_id is not None:
args['inline_message_id'] = inline_message_id
if caption is not None:
args['caption'] = caption
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('editMessageCaption', args)
if isinstance(data, bool):
return data
return types.Message(data)
async def edit_message_reply_markup(self, chat_id: Union[int, str] = None,
message_id: int = None,
inline_message_id: str = None,
reply_markup:
types.InlineKeyboardMarkup = None
) -> Awaitable[
Union[types.Message, bool]]:
args = {}
if chat_id is not None:
args['chat_id'] = chat_id
if message_id is not None:
args['message_id'] = message_id
if inline_message_id is not None:
args['inline_message_id'] = inline_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('editMessageReplyMarkup', args)
if isinstance(data, bool):
return data
return types.Message(data)
async def delete_message(self, chat_id: Union[int, str],
message_id: int) -> Awaitable[bool]:
args = {
'chat_id': chat_id,
'message_id': message_id,
}
data = await self._get('deleteMessage', args)
return data
async def send_sticker(self, chat_id: Union[int, str],
sticker: Union[types.InputFile, str],
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'sticker': sticker,
}
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(sticker, str):
data = await self._get('sendSticker', args)
else:
data = await self._post('sendSticker', args)
return types.Message(data)
async def get_sticker_set(self, name: str) -> Awaitable[types.StickerSet]:
args = {
'name': name,
}
data = await self._get('getStickerSet', args)
return types.StickerSet(data)
async def upload_sticker_file(self, user_id: int,
png_sticker: types.InputFile
) -> Awaitable[types.File]:
args = {
'user_id': user_id,
'png_sticker': png_sticker,
}
data = await self._post('uploadStickerFile', args)
return types.File(data)
async def create_new_sticker_set(self, user_id: int, name: str, title: str,
png_sticker: Union[types.InputFile, str],
emojis: str, contains_masks: bool = None,
mask_position: types.MaskPosition = None
) -> Awaitable[bool]:
args = {
'user_id': user_id,
'name': name,
'title': title,
'png_sticker': png_sticker,
'emojis': emojis,
}
if contains_masks is not None:
args['contains_masks'] = contains_masks
if mask_position is not None:
args['mask_position'] = mask_position
if isinstance(png_sticker, str):
data = await self._get('createNewStickerSet', args)
else:
data = await self._post('createNewStickerSet', args)
return data
async def add_sticker_to_set(self, user_id: int, name: str,
png_sticker: Union[types.InputFile, str],
emojis: str,
mask_position: types.MaskPosition = None
) -> Awaitable[types.Message]:
args = {
'user_id': user_id,
'name': name,
'png_sticker': png_sticker,
'emojis': emojis,
}
if mask_position is not None:
args['mask_position'] = mask_position
if isinstance(png_sticker, str):
data = await self._get('addStickerToSet', args)
else:
data = await self._post('addStickerToSet', args)
return data
async def set_sticker_position_in_set(self, sticker: str,
position: int) -> Awaitable[bool]:
args = {
'sticker': sticker,
'position': position,
}
data = await self._get('setStickerPositionInSet', args)
return data
async def delete_sticker_from_set(self, sticker: str) -> Awaitable[bool]:
args = {
'sticker': sticker,
}
data = await self._get('deleteStickerFromSet', args)
return data
async def answer_inline_query(self, inline_query_id: str,
results: List[types.InlineQueryResult],
cache_time: int = None,
is_personal: bool = None,
next_offset: str = None,
switch_pm_text: str = None,
switch_pm_parameter: str = None
) -> Awaitable[bool]:
args = {
'inline_query_id': inline_query_id,
'results': results,
}
if cache_time is not None:
args['cache_time'] = cache_time
if is_personal is not None:
args['is_personal'] = is_personal
if next_offset is not None:
args['next_offset'] = next_offset
if switch_pm_text is not None:
args['switch_pm_text'] = switch_pm_text
if switch_pm_parameter is not None:
args['switch_pm_parameter'] = switch_pm_parameter
data = await self._post('answerInlineQuery', args)
return data
async def send_game(self, chat_id: int, game_short_name: str,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: types.InlineKeyboardMarkup = None,
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'game_short_name': game_short_name,
}
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('sendGame', args)
return types.Message(data)
async def set_game_score(self, user_id: int, score: int, force: bool = None,
disable_edit_message: bool = None,
chat_id: int = None, message_id: int = None,
inline_message_id: str = None
) -> Awaitable[Union[types.Message, bool]]:
args = {
'user_id': user_id,
'score': score,
}
if force is not None:
args['force'] = force
if disable_edit_message is not None:
args['disable_edit_message'] = disable_edit_message
if chat_id is not None:
args['chat_id'] = chat_id
if message_id is not None:
args['message_id'] = message_id
if inline_message_id is not None:
args['inline_message_id'] = inline_message_id
data = await self._get('setGameScore', args)
if isinstance(data, bool):
return data
return types.Message(data)
async def get_game_high_scores(self, user_id: int, chat_id: int = None,
message_id: int = None,
inline_message_id: str = None
) -> Awaitable[List[types.GameHighScore]]:
args = {
'user_id': user_id,
}
if chat_id is not None:
args['chat_id'] = chat_id
if message_id is not None:
args['message_id'] = message_id
if inline_message_id is not None:
args['inline_message_id'] = inline_message_id
data = await self._get('getGameHighScores', args)
return [types.GameHighScore(_) for _ in data]
def _get_api_url(self, api_method):
return _API_TEMPLATE.format(api_token=self._api_token,
api_method=api_method)
@staticmethod
def _parse_response(response):
data = response.body.decode('utf-8')
data = json.loads(data)
if not data['ok']:
raise BotError(data['description'])
return data['result']
async def _get(self, api_method, args=None):
url = self._get_api_url(api_method)
if args is not None:
args = util.normalize_args(args)
url = thu.url_concat(url, args)
link = thc.AsyncHTTPClient()
request = thc.HTTPRequest(url)
response = await link.fetch(request)
return self._parse_response(response)
async def _post(self, api_method, args):
url = self._get_api_url(api_method)
args = util.normalize_args(args)
content_type, stream = util.generate_multipart_formdata(args.items())
link = thc.AsyncHTTPClient()
request = thc.HTTPRequest(url, method='POST', headers={
'Content-Type': content_type,
}, body_producer=stream, request_timeout=0.0)
response = await link.fetch(request)
return self._parse_response(response)
class _DispatcherMixin(object):
def __init__(self, *args, **kwargs) -> None:
super(_DispatcherMixin, self).__init__()
async def on_text(self, message: types.Message) -> None:
pass
async def on_audio(self, message: types.Message) -> None:
pass
async def on_document(self, message: types.Message) -> None:
pass
async def on_game(self, message: types.Message) -> None:
pass
async def on_photo(self, message: types.Message) -> None:
pass
async def on_sticker(self, message: types.Message) -> None:
pass
async def on_video(self, message: types.Message) -> None:
pass
async def on_voice(self, message: types.Message) -> None:
pass
async def on_video_note(self, message: types.Message) -> None:
pass
async def on_caption(self, message: types.Message) -> None:
pass
async def on_contact(self, message: types.Message) -> None:
pass
async def on_location(self, message: types.Message) -> None:
pass
async def | |
<filename>grblas/_agg.py<gh_stars>0
from functools import partial
import numpy as np
from . import agg, binary, monoid, semiring, unary
from .dtypes import lookup_dtype, unify
from .matrix import Matrix
from .monoid import any as _any
from .operator import _normalize_type
from .scalar import Scalar
from .ss import diag
from .vector import Vector
def _get_types(ops, initdtype):
"""Determine the input and output types of an aggregator based on a list of ops"""
if initdtype is None:
prev = dict(ops[0].types)
else:
initdtype = lookup_dtype(initdtype)
prev = {key: unify(lookup_dtype(val), initdtype).name for key, val in ops[0].types.items()}
for op in ops[1:]:
cur = {}
types = op.types
for in_type, out_type in prev.items():
if out_type not in types: # pragma: no cover
continue
cur[in_type] = types[out_type]
prev = cur
return prev
class Aggregator:
opclass = "Aggregator"
def __init__(
self,
name,
*,
initval=None,
monoid=None,
semiring=None,
switch=False,
semiring2=None,
finalize=None,
composite=None,
custom=None,
types=None,
):
self.name = name
self._initval_orig = initval
self._initval = False if initval is None else initval
self._initdtype = lookup_dtype(type(self._initval))
self._monoid = monoid
self._semiring = semiring
self._semiring2 = semiring2
self._switch = switch
self._finalize = finalize
self._composite = composite
self._custom = custom
if types is None:
if monoid is not None:
types = [monoid]
elif semiring is not None:
types = [semiring, semiring2]
if finalize is not None:
types.append(finalize)
initval = self._initval
else: # pragma: no cover
raise TypeError("types must be provided for composite and custom aggregators")
self._types_orig = types
self._types = None
self._typed_ops = {}
@property
def types(self):
if self._types is None:
if type(self._semiring) is str:
self._semiring = semiring.from_string(self._semiring)
if type(self._types_orig[0]) is str: # pragma: no branch
self._types_orig[0] = semiring.from_string(self._types_orig[0])
self._types = _get_types(
self._types_orig, None if self._initval_orig is None else self._initdtype
)
return self._types
def __getitem__(self, dtype):
dtype = _normalize_type(dtype)
if dtype not in self.types:
raise KeyError(f"{self.name} does not work with {dtype}")
if dtype not in self._typed_ops:
self._typed_ops[dtype] = TypedAggregator(self, dtype)
return self._typed_ops[dtype]
def __contains__(self, dtype):
dtype = _normalize_type(dtype)
return dtype in self.types
def __repr__(self):
return f"agg.{self.name}"
class TypedAggregator:
opclass = "Aggregator"
def __init__(self, agg, dtype):
self.name = agg.name
self.parent = agg
self.type = dtype
self.return_type = agg.types[dtype]
def __repr__(self):
return f"agg.{self.name}[{self.type}]"
def _new(self, updater, expr, *, in_composite=False):
agg = self.parent
if agg._monoid is not None:
x = expr.args[0]
expr = getattr(x, expr.method_name)(agg._monoid[self.type])
if expr.output_type is Scalar and x._nvals == 0:
# Don't set scalar output to monoid identity if empty
expr = Scalar.new(expr.dtype)
updater << expr
if in_composite:
parent = updater.parent
if not parent._is_scalar:
return parent
rv = Vector.new(parent.dtype, size=1)
if parent._nvals != 0:
rv[0] = parent
return rv
return
if agg._composite is not None:
# Masks are applied throughout the aggregation, including composite aggregations.
# Aggregations done while `in_composite is True` should return the updater parent
# if the result is not a Scalar. If the result is a Scalar, then there can be no
# output mask, and a Vector of size 1 should be returned instead.
results = []
mask = updater.kwargs.get("mask")
for cur_agg in agg._composite:
cur_agg = cur_agg[self.type] # Hopefully works well enough
arg = expr.construct_output(cur_agg.return_type)
results.append(cur_agg._new(arg(mask=mask), expr, in_composite=True))
final_expr = agg._finalize(*results)
if expr.cfunc_name == "GrB_Matrix_reduce_Aggregator":
updater << final_expr
elif expr.cfunc_name.startswith("GrB_Vector_reduce") or expr.cfunc_name.startswith(
"GrB_Matrix_reduce"
):
final = final_expr.new()
expr = final.reduce(_any)
if final._nvals == 0:
expr = Scalar.new(expr.dtype)
updater << expr
else:
raise NotImplementedError(f"{agg.name} with {expr.cfunc_name}")
if in_composite:
parent = updater.parent
if not parent._is_scalar:
return parent
rv = Vector.new(parent.dtype, size=1)
if parent._nvals != 0:
rv[0] = parent
return rv
return
if agg._custom is not None:
return agg._custom(self, updater, expr, in_composite=in_composite)
dtype = unify(lookup_dtype(self.type), lookup_dtype(agg._initdtype))
semiring = agg._semiring[dtype]
if expr.cfunc_name == "GrB_Matrix_reduce_Aggregator":
# Matrix -> Vector
A = expr.args[0]
orig_updater = updater
if agg._finalize is not None:
step1 = expr.construct_output(semiring.return_type)
updater = step1(mask=updater.kwargs.get("mask"))
if expr.method_name == "reduce_columnwise":
A = A.T
size = A._ncols
init = Vector.new(agg._initdtype, size=size)
init[...] = agg._initval # O(1) dense vector in SuiteSparse 5
if agg._switch:
updater << semiring(init @ A.T)
else:
updater << semiring(A @ init)
if agg._finalize is not None:
orig_updater << agg._finalize[semiring.return_type](step1)
if in_composite:
return orig_updater.parent
elif expr.cfunc_name.startswith("GrB_Vector_reduce"):
# Vector -> Scalar
v = expr.args[0]
step1 = Vector.new(semiring.return_type, size=1)
init = Matrix.new(agg._initdtype, nrows=v._size, ncols=1)
init[...] = agg._initval # O(1) dense column vector in SuiteSparse 5
if agg._switch:
step1 << semiring(init.T @ v)
else:
step1 << semiring(v @ init)
if agg._finalize is not None:
finalize = agg._finalize[semiring.return_type]
if step1.dtype == finalize.return_type:
step1 << finalize(step1)
else:
step1 = finalize(step1).new(finalize.return_type)
if in_composite:
return step1
expr = step1.reduce(_any)
if step1._nvals == 0:
expr = Scalar.new(expr.dtype)
updater << expr
elif expr.cfunc_name.startswith("GrB_Matrix_reduce"):
# Matrix -> Scalar
A = expr.args[0]
# We need to compute in two steps: Matrix -> Vector -> Scalar.
# This has not been benchmarked or optimized.
# We may be able to intelligently choose the faster path.
init1 = Vector.new(agg._initdtype, size=A._ncols)
init1[...] = agg._initval # O(1) dense vector in SuiteSparse 5
step1 = Vector.new(semiring.return_type, size=A._nrows)
if agg._switch:
step1 << semiring(init1 @ A.T)
else:
step1 << semiring(A @ init1)
init2 = Matrix.new(agg._initdtype, nrows=A._nrows, ncols=1)
init2[...] = agg._initval # O(1) dense vector in SuiteSparse 5
semiring2 = agg._semiring2[semiring.return_type]
step2 = Vector.new(semiring2.return_type, size=1)
step2 << semiring2(step1 @ init2)
if agg._finalize is not None:
finalize = agg._finalize[semiring2.return_type]
if step2.dtype == finalize.return_type:
step2 << finalize(step2)
else:
step2 = finalize(step2).new(finalize.return_type)
if in_composite:
return step2
expr = step2.reduce(_any)
if step2._nvals == 0:
expr = Scalar.new(expr.dtype)
updater << expr
else:
raise NotImplementedError(f"{agg.name} with {expr.cfunc_name}")
# Monoid-only
agg.sum = Aggregator("sum", monoid=monoid.plus)
agg.prod = Aggregator("prod", monoid=monoid.times)
agg.all = Aggregator("all", monoid=monoid.land)
agg.any = Aggregator("any", monoid=monoid.lor)
agg.min = Aggregator("min", monoid=monoid.min)
agg.max = Aggregator("max", monoid=monoid.max)
agg.any_value = Aggregator("any_value", monoid=monoid.any)
agg.bitwise_all = Aggregator("bitwise_all", monoid=monoid.band)
agg.bitwise_any = Aggregator("bitwise_any", monoid=monoid.bor)
# Other monoids: bxnor bxor eq lxnor lxor
# Semiring-only
agg.count = Aggregator("count", semiring=semiring.plus_pair, semiring2=semiring.plus_first)
agg.count_nonzero = Aggregator(
"count_nonzero", semiring=semiring.plus_isne, semiring2=semiring.plus_first
)
agg.count_zero = Aggregator(
"count_zero", semiring=semiring.plus_iseq, semiring2=semiring.plus_first
)
agg.sum_of_squares = Aggregator(
"sum_of_squares", initval=2, semiring=semiring.plus_pow, semiring2=semiring.plus_first
)
agg.sum_of_inverses = Aggregator(
"sum_of_inverses",
initval=-1.0,
semiring=semiring.plus_pow,
semiring2=semiring.plus_first,
)
agg.exists = Aggregator("exists", semiring=semiring.any_pair, semiring2=semiring.any_pair)
# Semiring and finalize
agg.hypot = Aggregator(
"hypot",
initval=2,
semiring=semiring.plus_pow,
semiring2=semiring.plus_first,
finalize=unary.sqrt,
)
agg.logaddexp = Aggregator(
"logaddexp",
initval=np.e,
semiring=semiring.plus_pow,
switch=True,
semiring2=semiring.plus_first,
finalize=unary.log,
)
agg.logaddexp2 = Aggregator(
"logaddexp2",
initval=2,
semiring=semiring.plus_pow,
switch=True,
semiring2=semiring.plus_first,
finalize=unary.log2,
)
# Alternatives
# logaddexp = Aggregator('logaddexp', monoid=semiring.numpy.logaddexp)
# logaddexp2 = Aggregator('logaddexp2', monoid=semiring.numpy.logaddexp2)
# hypot as monoid doesn't work if single negative element!
# hypot = Aggregator('hypot', monoid=semiring.numpy.hypot)
agg.L0norm = agg.count_nonzero
agg.L1norm = Aggregator("L1norm", semiring="plus_absfirst", semiring2=semiring.plus_first)
agg.L2norm = agg.hypot
agg.Linfnorm = Aggregator("Linfnorm", semiring="max_absfirst", semiring2=semiring.max_first)
# Composite
def _mean_finalize(c, x):
return binary.truediv(x & c)
def _ptp_finalize(max, min):
return binary.minus(max & min)
def _varp_finalize(c, x, x2):
# <x2> / n - (<x> / n)**2
left = binary.truediv(x2 & c).new()
right = binary.truediv(x & c).new()
right << binary.pow(right, 2)
return binary.minus(left & right)
def _vars_finalize(c, x, x2):
# <x2> / (n-1) - <x>**2 / (n * (n-1))
x << binary.pow(x, 2)
right = binary.truediv(x & c).new()
c << binary.minus(c, 1)
right << binary.truediv(right & c)
left = binary.truediv(x2 & c).new()
return binary.minus(left & right)
def _stdp_finalize(c, x, x2):
val = _varp_finalize(c, x, x2).new()
return unary.sqrt(val)
def _stds_finalize(c, x, x2):
val = _vars_finalize(c, x, x2).new()
return unary.sqrt(val)
def _geometric_mean_finalize(c, x):
right = unary.minv["FP64"](c).new()
return binary.pow(x & right)
def _harmonic_mean_finalize(c, x):
return binary.truediv(c & x)
def _root_mean_square_finalize(c, x2):
val = binary.truediv(x2 & c).new()
return unary.sqrt(val)
agg.mean = Aggregator(
"mean",
composite=[agg.count, agg.sum],
finalize=_mean_finalize,
types=[binary.truediv],
)
agg.peak_to_peak = Aggregator(
"peak_to_peak",
composite=[agg.max, agg.min],
finalize=_ptp_finalize,
types=[monoid.min],
)
agg.varp = Aggregator(
"varp",
composite=[agg.count, agg.sum, agg.sum_of_squares],
finalize=_varp_finalize,
types=[binary.truediv],
)
agg.vars = Aggregator(
"vars",
composite=[agg.count, agg.sum, agg.sum_of_squares],
finalize=_vars_finalize,
types=[binary.truediv],
)
agg.stdp = Aggregator(
"stdp",
composite=[agg.count, agg.sum, agg.sum_of_squares],
finalize=_stdp_finalize,
types=[binary.truediv, unary.sqrt],
)
agg.stds = Aggregator(
"stds",
composite=[agg.count, agg.sum, agg.sum_of_squares],
finalize=_stds_finalize,
types=[binary.truediv, unary.sqrt],
)
agg.geometric_mean = Aggregator(
"geometric_mean",
composite=[agg.count, agg.prod],
finalize=_geometric_mean_finalize,
types=[binary.truediv],
)
agg.harmonic_mean = Aggregator(
"harmonic_mean",
composite=[agg.count, agg.sum_of_inverses],
finalize=_harmonic_mean_finalize,
types=[agg.sum_of_inverses, binary.truediv],
)
agg.root_mean_square = Aggregator(
"root_mean_square",
composite=[agg.count, agg.sum_of_squares],
finalize=_root_mean_square_finalize,
types=[binary.truediv, unary.sqrt],
)
# Special recipes
def _argminmaxij(
agg,
updater,
expr,
*,
in_composite,
monoid,
col_semiring,
row_semiring,
):
if expr.cfunc_name == "GrB_Matrix_reduce_Aggregator":
A = expr.args[0]
if expr.method_name == "reduce_rowwise":
step1 = A.reduce_rowwise(monoid).new()
# i, j = step1.to_values()
# D = Matrix.from_values(i, i, j, nrows=A._nrows, ncols=A._nrows)
D = diag(step1)
masked = semiring.any_eq(D @ A).new()
masked(mask=masked.V, replace=True) << masked # Could use select
init = Vector.new(bool, size=A._ncols)
init[...] = False # O(1) dense vector in SuiteSparse 5
updater << row_semiring(masked @ init)
if in_composite:
return updater.parent
else:
step1 = A.reduce_columnwise(monoid).new()
# i, j = step1.to_values()
# D = Matrix.from_values(i, i, j, nrows=A._ncols, ncols=A._ncols)
D = diag(step1)
masked = semiring.any_eq(A @ D).new()
masked(mask=masked.V, replace=True) << masked # Could | |
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_z003_mg_z003_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : test derivation by ext. with all with
base=empty content
"""
assert_bindings(
schema="msData/modelGroups/mgZ003.xsd",
instance="msData/modelGroups/mgZ003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q020_mg_q020_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'choice', one under 'sequence'
inside 'choice' of 'choice'
"""
assert_bindings(
schema="msData/modelGroups/mgQ020.xsd",
instance="msData/modelGroups/mgQ020.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q019_mg_q019_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'choice', one under 'choice'
inside 'sequence' of 'choice'
"""
assert_bindings(
schema="msData/modelGroups/mgQ019.xsd",
instance="msData/modelGroups/mgQ019.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q018_mg_q018_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'choice', one under 'sequence'
inside 'sequence' of 'choice'
"""
assert_bindings(
schema="msData/modelGroups/mgQ018.xsd",
instance="msData/modelGroups/mgQ018.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q017_mg_q017_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'choice'
inside 'choice' of 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ017.xsd",
instance="msData/modelGroups/mgQ017.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q016_mg_q016_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'sequence'
inside 'choice' of 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ016.xsd",
instance="msData/modelGroups/mgQ016.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q015_mg_q015_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'choice'
inside 'sequence' of 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ015.xsd",
instance="msData/modelGroups/mgQ015.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q014_mg_q014_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'sequence'
inside 'sequence' of 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ014.xsd",
instance="msData/modelGroups/mgQ014.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q009_mg_q009_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'choice', one under 'sequence'
inside 'choice'
"""
assert_bindings(
schema="msData/modelGroups/mgQ009.xsd",
instance="msData/modelGroups/mgQ009.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q008_mg_q008_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'choice', one under 'choice'
inside 'choice'
"""
assert_bindings(
schema="msData/modelGroups/mgQ008.xsd",
instance="msData/modelGroups/mgQ008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q007_mg_q007_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'sequence'
inside 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ007.xsd",
instance="msData/modelGroups/mgQ007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q006_mg_q006_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), one under 'sequence', one under 'choice'
inside 'sequence'
"""
assert_bindings(
schema="msData/modelGroups/mgQ006.xsd",
instance="msData/modelGroups/mgQ006.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q003_mg_q003_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), both under choice
"""
assert_bindings(
schema="msData/modelGroups/mgQ003.xsd",
instance="msData/modelGroups/mgQ003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_q002_mg_q002_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 2 particles with idendical element
declarations (same type), both under sequence
"""
assert_bindings(
schema="msData/modelGroups/mgQ002.xsd",
instance="msData/modelGroups/mgQ002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o038_mg_o038_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'choice'
with maxOccurs=minOccurs=1 , which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO038.xsd",
instance="msData/modelGroups/mgO038.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o037_mg_o037_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'sequence'
with maxOccurs=minOccurs=1, which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO037.xsd",
instance="msData/modelGroups/mgO037.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o036_mg_o036_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under
'restriction', whiche is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO036.xsd",
instance="msData/modelGroups/mgO036.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o034_mg_o034_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'redefine',
which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO034.xsd",
instance="msData/modelGroups/mgO034.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o031_mg_o031_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'choice'
with maxOccurs=minOccurs=1 , which is part of a complexType, and group
ref has minOccurs=0, maxOccurs=1
"""
assert_bindings(
schema="msData/modelGroups/mgO031.xsd",
instance="msData/modelGroups/mgO031.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o030_mg_o030_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 'all', and has minOccurs=0, maxOccurs=1
"""
assert_bindings(
schema="msData/modelGroups/mgO030.xsd",
instance="msData/modelGroups/mgO030.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o029_mg_o029_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : 'all', appear under 'restriction', which is
part of a complexType, and has minOccurs=0, maxOccurs=1
"""
assert_bindings(
schema="msData/modelGroups/mgO029.xsd",
instance="msData/modelGroups/mgO029.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o017_mg_o017_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'choice'
with maxOccurs=minOccurs=1 , whiche is part of a complexType, and
particles in all has maxOccurs=minOccurs (absent)
"""
assert_bindings(
schema="msData/modelGroups/mgO017.xsd",
instance="msData/modelGroups/mgO017.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o016_mg_o016_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'sequence'
with maxOccurs=minOccurs=1, , whiche is part of a complexType, and
particles in all has maxOccurs=minOccurs (absent)
"""
assert_bindings(
schema="msData/modelGroups/mgO016.xsd",
instance="msData/modelGroups/mgO016.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o015_mg_o015_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under
'restriction', whiche is part of a complexType, and particles in all
has maxOccurs=minOccurs (absent)
"""
assert_bindings(
schema="msData/modelGroups/mgO015.xsd",
instance="msData/modelGroups/mgO015.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o011_mg_o011_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under
'complexType', whiche is part of a complexType, and particles in all
has maxOccurs=minOccurs (absent)
"""
assert_bindings(
schema="msData/modelGroups/mgO011.xsd",
instance="msData/modelGroups/mgO011.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o010_mg_o010_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'choice'
which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO010.xsd",
instance="msData/modelGroups/mgO010.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o009_mg_o009_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'sequence'
which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO009.xsd",
instance="msData/modelGroups/mgO009.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o008_mg_o008_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under
'restriction', which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO008.xsd",
instance="msData/modelGroups/mgO008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o006_mg_o006_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'redefine',
which is part of a complexType
"""
assert_bindings(
schema="msData/modelGroups/mgO006.xsd",
instance="msData/modelGroups/mgO006.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o005_mg_o005_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : group' with 'all', appear under 'schema',
which is part of a complexType and has maxOccurs=minOccurs (0 | 1)
"""
assert_bindings(
schema="msData/modelGroups/mgO005.xsd",
instance="msData/modelGroups/mgO005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o004_mg_o004_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : all appear under 'complexType', which is
part of a complexType, and particles in all has maxOccurs=minOccurs (0
| 1)
"""
assert_bindings(
schema="msData/modelGroups/mgO004.xsd",
instance="msData/modelGroups/mgO004.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_o002_mg_o002_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : all has particle with minOccurs=maxOccur =
1
"""
assert_bindings(
schema="msData/modelGroups/mgO002.xsd",
instance="msData/modelGroups/mgO002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_n012_mg_n012_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : parent is sequence, has 2 sequence (E1, E2)
(F1,F2), and 2 choice (C1 | C2) (D1 | D2), in the instant XML document
( E1, E2, F1, F2, C1, D1)
"""
assert_bindings(
schema="msData/modelGroups/mgN012.xsd",
instance="msData/modelGroups/mgN012.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_n005_mg_n005_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : parent is sequence, has 2 sequence as child
(E1,E2) (F1,F2), in the instant XML document, there is no element
specified
"""
assert_bindings(
schema="msData/modelGroups/mgN005.xsd",
instance="msData/modelGroups/mgN005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_n001_mg_n001_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : parent is sequence, more than one child
sequences, each of them again have more than one sequence child node,
instant XML conform to the declaration
"""
assert_bindings(
schema="msData/modelGroups/mgN001.xsd",
instance="msData/modelGroups/mgN001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_mg_m013_mg_m013_v(mode, save_output, output_format):
"""
TEST :model groups (ALL) : all: with 2 | |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests for the ast2select module."""
import datetime
import time
import unittest
from framework import sql
from proto import ast_pb2
from proto import tracker_pb2
from search import ast2select
from search import query2ast
from tracker import tracker_bizobj
BUILTIN_ISSUE_FIELDS = query2ast.BUILTIN_ISSUE_FIELDS
ANY_FIELD = query2ast.BUILTIN_ISSUE_FIELDS['any_field']
class AST2SelectTest(unittest.TestCase):
def setUp(self):
self.config = tracker_bizobj.MakeDefaultProjectIssueConfig(789)
def testBuildSQLQuery_EmptyAST(self):
ast = ast_pb2.QueryAST(conjunctions=[ast_pb2.Conjunction()]) # No conds
left_joins, where = ast2select.BuildSQLQuery(ast)
self.assertEqual([], left_joins)
self.assertEqual([], where)
def testBuildSQLQuery_Normal(self):
owner_field = BUILTIN_ISSUE_FIELDS['owner']
reporter_id_field = BUILTIN_ISSUE_FIELDS['reporter_id']
conds = [
ast_pb2.MakeCond(
ast_pb2.QueryOp.TEXT_HAS, [owner_field], ['example.com'], []),
ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [reporter_id_field], [], [111L])]
ast = ast_pb2.QueryAST(conjunctions=[ast_pb2.Conjunction(conds=conds)])
left_joins, where = ast2select.BuildSQLQuery(ast)
self.assertEqual(
[('User AS Cond0 ON (Issue.owner_id = Cond0.user_id '
'OR Issue.derived_owner_id = Cond0.user_id)', [])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('(LOWER(Cond0.email) LIKE %s)', ['%example.com%']),
('Issue.reporter_id = %s', [111L])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockingIDCond_SingleValue(self):
fd = BUILTIN_ISSUE_FIELDS['blocking_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [fd], ['1'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1L])
for cond, expected in ((txt_cond, '1'), (num_cond, 1L)):
left_joins, where = ast2select._ProcessBlockingIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
'Cond1.kind = %s AND Cond1.issue_id = %s',
['blockedon', expected])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.dst_issue_id IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockingIDCond_NegatedSingleValue(self):
fd = BUILTIN_ISSUE_FIELDS['blocking_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NE, [fd], ['1'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1L])
for cond, expected in ((txt_cond, '1'), (num_cond, 1L)):
left_joins, where = ast2select._ProcessBlockingIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
'Cond1.kind = %s AND Cond1.issue_id = %s',
['blockedon', expected])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.dst_issue_id IS NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockingIDCond_MultiValue(self):
fd = BUILTIN_ISSUE_FIELDS['blocking_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [fd], ['1', '2', '3'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1L, 2L, 3L])
for cond, expected in ((txt_cond, ['1', '2', '3']),
(num_cond, [1L, 2L, 3L])):
left_joins, where = ast2select._ProcessBlockingIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
'Cond1.kind = %s AND Cond1.issue_id IN (%s,%s,%s)',
['blockedon'] + expected)],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.dst_issue_id IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockingIDCond_NegatedMultiValue(self):
fd = BUILTIN_ISSUE_FIELDS['blocking_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NE, [fd], ['1', '2', '3'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1L, 2L, 3L])
for cond, expected in ((txt_cond, ['1', '2', '3']),
(num_cond, [1L, 2L, 3L])):
left_joins, where = ast2select._ProcessBlockingIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
'Cond1.kind = %s AND Cond1.issue_id IN (%s,%s,%s)',
['blockedon'] + expected)],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.dst_issue_id IS NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockedOnIDCond_SingleValue(self):
fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [fd], ['1'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1L])
for cond, expected in ((txt_cond, '1'), (num_cond, 1L)):
left_joins, where = ast2select._ProcessBlockedOnIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s AND Cond1.dst_issue_id = %s',
['blockedon', expected])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.issue_id IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockedOnIDCond_NegatedSingleValue(self):
fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NE, [fd], ['1'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1L])
for cond, expected in ((txt_cond, '1'), (num_cond, 1L)):
left_joins, where = ast2select._ProcessBlockedOnIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s AND Cond1.dst_issue_id = %s',
['blockedon', expected])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.issue_id IS NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockedIDCond_MultiValue(self):
fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [fd], ['1', '2', '3'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1L, 2L, 3L])
for cond, expected in ((txt_cond, ['1', '2', '3']),
(num_cond, [1L, 2L, 3L])):
left_joins, where = ast2select._ProcessBlockedOnIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
['blockedon'] + expected)],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.issue_id IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testBlockedIDCond_NegatedMultiValue(self):
fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NE, [fd], ['1', '2', '3'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.NE, [fd], [], [1L, 2L, 3L])
for cond, expected in ((txt_cond, ['1', '2', '3']),
(num_cond, [1L, 2L, 3L])):
left_joins, where = ast2select._ProcessBlockedOnIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
['blockedon'] + expected)],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.issue_id IS NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testMergedIntoIDCond_MultiValue(self):
fd = BUILTIN_ISSUE_FIELDS['mergedinto_id']
txt_cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.EQ, [fd], ['1', '2', '3'], [])
num_cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [1L, 2L, 3L])
for cond, expected in ((txt_cond, ['1', '2', '3']),
(num_cond, [1L, 2L, 3L])):
left_joins, where = ast2select._ProcessMergedIntoIDCond(
cond, 'Cond1', 'Issue1')
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s AND Cond1.dst_issue_id IN (%s,%s,%s)',
['mergedinto'] + expected)],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.issue_id IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testHasBlockedCond(self):
for op, expected in ((ast_pb2.QueryOp.IS_DEFINED, 'IS NOT NULL'),
(ast_pb2.QueryOp.IS_NOT_DEFINED, 'IS NULL')):
fd = BUILTIN_ISSUE_FIELDS['blockedon_id']
cond = ast_pb2.MakeCond(op, [fd], [], [])
left_joins, where = ast2select._ProcessBlockedOnIDCond(
cond, 'Cond1', None)
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.issue_id AND '
'Cond1.kind = %s', ['blockedon'])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual([('Cond1.issue_id %s' % expected, [])], where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testHasBlockingCond(self):
for op, expected in ((ast_pb2.QueryOp.IS_DEFINED, 'IS NOT NULL'),
(ast_pb2.QueryOp.IS_NOT_DEFINED, 'IS NULL')):
fd = BUILTIN_ISSUE_FIELDS['blocking_id']
cond = ast_pb2.MakeCond(op, [fd], [], [])
left_joins, where = ast2select._ProcessBlockingIDCond(cond, 'Cond1', None)
self.assertEqual(
[('IssueRelation AS Cond1 ON Issue.id = Cond1.dst_issue_id AND '
'Cond1.kind = %s', ['blockedon'])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual([('Cond1.dst_issue_id %s' % expected, [])], where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessOwnerCond(self):
fd = BUILTIN_ISSUE_FIELDS['owner']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
left_joins, where = ast2select._ProcessOwnerCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('User AS Cond1 ON (Issue.owner_id = Cond1.user_id '
'OR Issue.derived_owner_id = Cond1.user_id)', [])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessOwnerIDCond(self):
fd = BUILTIN_ISSUE_FIELDS['owner_id']
cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111L])
left_joins, where = ast2select._ProcessOwnerIDCond(cond, 'Cond1', 'User1')
self.assertEqual([], left_joins)
self.assertEqual(
[('(Issue.owner_id = %s OR Issue.derived_owner_id = %s)',
[111L, 111L])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessOwnerLastVisitCond(self):
fd = BUILTIN_ISSUE_FIELDS['ownerlastvisit']
NOW = 1234567890
cond = ast_pb2.MakeCond(ast_pb2.QueryOp.LT, [fd], [], [NOW])
left_joins, where = ast2select._ProcessOwnerLastVisitCond(
cond, 'Cond1', 'User1')
self.assertEqual(
[('User AS Cond1 ON (Issue.owner_id = Cond1.user_id OR '
'Issue.derived_owner_id = Cond1.user_id)',
[])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('Cond1.last_visit_timestamp < %s',
[NOW])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessIsOwnerBouncing(self):
fd = BUILTIN_ISSUE_FIELDS['ownerbouncing']
cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [])
left_joins, where = ast2select._ProcessIsOwnerBouncing(
cond, 'Cond1', 'User1')
self.assertEqual(
[('User AS Cond1 ON (Issue.owner_id = Cond1.user_id OR '
'Issue.derived_owner_id = Cond1.user_id)',
[])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('(Cond1.email_bounce_timestamp IS NOT NULL AND'
' Cond1.email_bounce_timestamp != %s)',
[0])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessReporterCond(self):
fd = BUILTIN_ISSUE_FIELDS['reporter']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
left_joins, where = ast2select._ProcessReporterCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('User AS Cond1 ON Issue.reporter_id = Cond1.user_id', [])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('(LOWER(Cond1.email) LIKE %s)', ['%example.com%'])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessReporterIDCond(self):
fd = BUILTIN_ISSUE_FIELDS['reporter_id']
cond = ast_pb2.MakeCond(ast_pb2.QueryOp.EQ, [fd], [], [111L])
left_joins, where = ast2select._ProcessReporterIDCond(
cond, 'Cond1', 'User1')
self.assertEqual([], left_joins)
self.assertEqual(
[('Issue.reporter_id = %s', [111L])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessCcCond_SinglePositive(self):
fd = BUILTIN_ISSUE_FIELDS['cc']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.TEXT_HAS, [fd], ['example.com'], [])
left_joins, where = ast2select._ProcessCcCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('(Issue2Cc AS Cond1 JOIN User AS User1 '
'ON Cond1.cc_id = User1.user_id AND (LOWER(User1.email) LIKE %s)) '
'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
['%example.com%'])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('User1.email IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessCcCond_MultiplePositive(self):
fd = BUILTIN_ISSUE_FIELDS['cc']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.TEXT_HAS, [fd], ['.com', '.org'], [])
left_joins, where = ast2select._ProcessCcCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('(Issue2Cc AS Cond1 JOIN User AS User1 '
'ON Cond1.cc_id = User1.user_id AND '
'(LOWER(User1.email) LIKE %s OR LOWER(User1.email) LIKE %s)) '
'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
['%.com%', '%.org%'])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('User1.email IS NOT NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessCcCond_SingleNegative(self):
fd = BUILTIN_ISSUE_FIELDS['cc']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['example.com'], [])
left_joins, where = ast2select._ProcessCcCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('(Issue2Cc AS Cond1 JOIN User AS User1 '
'ON Cond1.cc_id = User1.user_id AND (LOWER(User1.email) LIKE %s)) '
'ON Issue.id = Cond1.issue_id AND Issue.shard = Cond1.issue_shard',
['%example.com%'])],
left_joins)
self.assertTrue(sql._IsValidJoin(left_joins[0][0]))
self.assertEqual(
[('User1.email IS NULL', [])],
where)
self.assertTrue(sql._IsValidWhereCond(where[0][0]))
def testProcessCcCond_Multiplenegative(self):
fd = BUILTIN_ISSUE_FIELDS['cc']
cond = ast_pb2.MakeCond(
ast_pb2.QueryOp.NOT_TEXT_HAS, [fd], ['.com', '.org'], [])
left_joins, where = ast2select._ProcessCcCond(cond, 'Cond1', 'User1')
self.assertEqual(
[('(Issue2Cc AS Cond1 JOIN User AS User1 '
'ON Cond1.cc_id = User1.user_id AND '
'(LOWER(User1.email) LIKE %s OR LOWER(User1.email) LIKE %s)) '
'ON Issue.id | |
import pkgutil
import traceback
import re
import sys
from os import path
import types
import threading
__running = {}
REPOS = ['https://github.com/martinpihrt/OSPy-plugins/archive/master.zip'] # repository with plugins
################################################################################
# Plugin Options #
################################################################################
class PluginOptions(dict):
def __init__(self, plugin, defaults):
super(PluginOptions, self).__init__(defaults.iteritems())
self._defaults = defaults.copy()
from ospy.options import options
my_dir = path.dirname(path.abspath(__file__))
plugin = 'plugin_unknown'
stack = traceback.extract_stack()
for tb in reversed(stack):
abspath = path.dirname(path.abspath(tb[0]))
if abspath.startswith(my_dir) and abspath != path.abspath(__file__):
parts = abspath[len(my_dir):].split(path.sep)
while parts and not parts[0]:
del parts[0]
if parts:
plugin = 'plugin_' + parts[0]
break
if plugin in options:
for key, value in options[plugin].iteritems():
if key in self:
value_type = type(value)
if value_type == unicode:
value_type = str
default_type = type(self[key])
if default_type == unicode:
default_type = str
if value_type == default_type:
self[key] = value
self._plugin = plugin
def __setitem__(self, key, value):
try:
super(PluginOptions, self).__setitem__(key, value)
if hasattr(self, '_plugin'):
from ospy.options import options
options[self._plugin] = self.copy()
except ValueError: # No index available yet
pass
def web_update(self, qdict, skipped=None):
for key in self.keys():
try:
if skipped is not None and key in skipped:
continue
default_value = self._defaults[key]
old_value = self[key]
if isinstance(default_value, bool):
self[key] = True if qdict.get(key, 'off') == 'on' else False
elif isinstance(default_value, int):
self[key] = int(qdict.get(key, old_value))
elif isinstance(default_value, float):
self[key] = float(qdict.get(key, old_value))
elif isinstance(default_value, str) or isinstance(old_value, unicode):
self[key] = qdict.get(key, old_value)
elif isinstance(default_value, list):
self[key] = [int(x) for x in qdict.get(key, old_value)]
except ValueError:
import web
raise web.badrequest(_(u'Invalid value for') + ' ' + '%s:%s' % (key, qdict.get(key)))
################################################################################
# Plugin Repositories #
################################################################################
class _PluginChecker(threading.Thread):
def __init__(self):
super(_PluginChecker, self).__init__()
self.daemon = True
self._sleep_time = 0
self._repo_data = {}
self._repo_contents = {}
self.start()
def update(self):
self._sleep_time = 10
def _sleep(self, secs):
import time
self._sleep_time = secs
while self._sleep_time > 0:
time.sleep(1)
self._sleep_time -= 1
def run(self):
from ospy.options import options
from ospy.log import log
import logging
while True:
try:
if options.use_plugin_update:
for repo in REPOS:
self._repo_data[repo] = self._download_zip(repo)
self._repo_contents[repo] = self.zip_contents(self._get_zip(repo))
status = options.plugin_status
if options.auto_plugin_update and options.use_plugin_update and not log.active_runs():
for plugin in available():
update = self.available_version(plugin)
if update is not None and plugin in status and status[plugin]['hash'] != update['hash']:
logging.info(_(u'Updating the {} plug-in.').format(plugin))
self.install_repo_plugin(update['repo'], plugin)
except Exception:
logging.error(_(u'Failed to update the plug-ins information:') + ' ' + str(traceback.format_exc()))
finally:
self._sleep(3600)
def available_version(self, plugin):
result = None
for repo_index, repo in enumerate(REPOS):
repo_contents = self.get_repo_contents(repo)
if plugin in repo_contents:
result = repo_contents[plugin]
result['repo_index'] = repo_index
result['repo'] = repo
break
return result
@staticmethod
def _download_zip(repo):
import urllib2
import logging
import io
response = urllib2.urlopen(repo)
zip_data = response.read()
logging.debug(_(u'Downloaded') + ' ' + str(repo))
return io.BytesIO(zip_data)
def _get_zip(self, repo):
if repo not in self._repo_data:
self._repo_data[repo] = self._download_zip(repo)
return self._repo_data[repo]
@staticmethod
def zip_contents(zip_file_data, load_read_me=True):
import zipfile
import os
import datetime
import hashlib
import logging
from ospy.options import options
import web
from ospy.helpers import template_globals
result = {}
try:
zip_file = zipfile.ZipFile(zip_file_data)
infos = zip_file.infolist()
files = zip_file.namelist()
inits = [f for f in files if f.endswith('__init__.py')]
for init in inits:
init_dir = os.path.dirname(init)
plugin_id = os.path.basename(init_dir)
read_me = ''
# Version information:
plugin_hash = ''
plugin_date = datetime.datetime(1970, 1, 1)
if init_dir + '/README.md' in files:
# Check all files:
for zip_info in infos:
zip_name = zip_info.filename
if zip_name.startswith(init_dir):
relative_name = zip_name[len(init_dir):].lstrip('/')
if relative_name and not relative_name.endswith('/'):
plugin_date = max(plugin_date, datetime.datetime(*zip_info.date_time))
plugin_hash += hex(zip_info.CRC)
has_error = False
if load_read_me:
try:
import markdown2
converted = markdown2.markdown(zip_file.read(init_dir + '/README.md').decode('utf-8'))
read_me = web.template.Template(converted, globals=template_globals())()
except Exception:
has_error = True
converted = zip_file.read(init_dir + '/README.md').decode('utf-8')
read_me = web.template.Template(converted, globals=template_globals())()
logging.error(_(u'Failed in markdown:') + ' ' + str(traceback.format_exc()))
if options.plugin_readme_error != has_error:
options.plugin_readme_error = has_error
result[plugin_id] = {
'name': _plugin_name(zip_file.read(init).splitlines()),
'hash': hashlib.md5(plugin_hash).hexdigest(),
'date': plugin_date,
'read_me': read_me,
'dir': init_dir
}
except Exception:
pass
logging.error(_(u'Failed to read a plug-in zip file:') + ' ' + str(traceback.format_exc()))
return result
def get_repo_contents(self, repo):
import logging
try:
if repo not in self._repo_contents:
self._repo_contents[repo] = self.zip_contents(self._get_zip(repo))
except Exception:
logging.error(_(u'Failed to get contents of') + ': ' + str(repo) + str(traceback.format_exc()))
return {}
return self._repo_contents[repo]
@staticmethod
def _install_plugin(zip_file_data, plugin, p_dir):
import os
import shutil
import zipfile
import datetime
import hashlib
from ospy.helpers import mkdir_p
from ospy.helpers import del_rw
from ospy.options import options
# First stop it if it is running:
enabled = plugin in options.enabled_plugins
if enabled:
options.enabled_plugins.remove(plugin)
start_enabled_plugins()
# Clean the target directory and create it if needed:
target_dir = plugin_dir(plugin)
if os.path.exists(target_dir):
old_files = os.listdir(target_dir)
for old_file in old_files:
if old_file != 'data':
shutil.rmtree(os.path.join(target_dir, old_file), onerror=del_rw)
else:
mkdir_p(target_dir)
# Load the zip file:
zip_file = zipfile.ZipFile(zip_file_data)
infos = zip_file.infolist()
# Version information:
plugin_hash = ''
plugin_date = datetime.datetime(1970, 1, 1)
# Extract all files:
for zip_info in infos:
zip_name = zip_info.filename
if zip_name.startswith(p_dir):
relative_name = zip_name[len(p_dir):].lstrip('/')
target_name = os.path.join(target_dir, relative_name)
if relative_name:
if relative_name.endswith('/'):
mkdir_p(target_name)
else:
plugin_date = max(plugin_date, datetime.datetime(*zip_info.date_time))
plugin_hash += hex(zip_info.CRC)
contents = zip_file.read(zip_name)
with open(target_name, 'wb') as fh:
fh.write(contents)
options.plugin_status[plugin] = {
'hash': hashlib.md5(plugin_hash).hexdigest(),
'date': plugin_date
}
options.plugin_status = options.plugin_status
# Start again if needed:
if enabled:
options.enabled_plugins.append(plugin)
start_enabled_plugins()
def install_repo_plugin(self, repo, plugin_filter):
self.install_custom_plugin(self._get_zip(repo), plugin_filter)
def install_custom_plugin(self, zip_file_data, plugin_filter=None):
contents = self.zip_contents(zip_file_data, False)
for plugin, info in contents.iteritems():
if plugin_filter is None or plugin == plugin_filter:
self._install_plugin(zip_file_data, plugin, info['dir'])
checker = _PluginChecker()
################################################################################
# Plugin App #
################################################################################
def get_app():
import web
class PluginApp(web.application):
def handle(self):
from ospy.server import session
mapping = []
for module in running():
import_name = __name__ + '.' + module
plugin = get(module)
mapping += _get_urls(import_name, plugin)
fn, args = self._match(mapping, web.ctx.path)
if session['category'] == 'admin':
return self._delegate(fn, self.fvars, args)
else:
return ''
return PluginApp(fvars=locals())
################################################################################
# Plugin directories #
################################################################################
def plugin_dir(module=None):
my_dir = path.dirname(path.abspath(__file__))
if module is not None:
if module.startswith('plugins.'):
module = module[8:]
else:
stack = traceback.extract_stack()
module = ''
for tb in reversed(stack):
tb_dir = path.dirname(path.abspath(tb[0]))
if 'plugins' in tb_dir and tb_dir != my_dir:
module = path.basename(tb_dir)
break
return path.join(my_dir, module)
def plugin_data_dir(module=None):
return path.join(plugin_dir(module), 'data')
def plugin_docs_dir(module=None):
return path.join(plugin_dir(module), 'docs')
################################################################################
# Plugin information + urls #
################################################################################
def available():
plugins = []
for imp, module, is_pkg in pkgutil.iter_modules(['plugins']):
_protect(module)
if plugin_name(module) is not None:
plugins.append(module)
return plugins
def _plugin_name(lines):
result = None
for line in lines:
if 'NAME' in line:
match = re.search('NAME\\s=\\s("|\')([^"\']+)("|\')', line)
if match is not None:
result = match.group(2)
return result
__name_cache = {}
def plugin_name(plugin):
"""Tries to find the name of the given plugin without importing it yet."""
if plugin not in __name_cache:
__name_cache[plugin] = None
filename = path.join(path.dirname(__file__), plugin, '__init__.py')
try:
with open(filename) as fh:
__name_cache[plugin] = _plugin_name(fh)
except Exception:
pass
return __name_cache[plugin]
def plugin_names():
return {plugin: (plugin_name(plugin)) for plugin in available() if plugin_name(plugin)}
def plugin_url(cls, prefix='/plugins/'):
from ospy.webpages import WebPage
import inspect
if cls is None:
result = cls
else:
if inspect.isclass(cls) and issubclass(cls, WebPage):
cls = cls.__module__ + '.' + cls.__name__
parts = cls.split('.')
if len(parts) >= 3:
result = prefix + '/'.join(parts[1:])
elif len(parts) >= 2:
result = prefix + '/'.join(parts)
else:
result = prefix + cls
if result.endswith('_page'):
result = result[:-5]
if result.endswith('_json'):
result = result[:-5] + '.json'
if result.endswith('_csv'):
result = result[:-4] + '.csv'
return result
__urls_cache = {}
def _get_urls(import_name, plugin):
if plugin not in __urls_cache:
from ospy.webpages import WebPage
import inspect
result = []
for element in dir(plugin):
if inspect.isclass(getattr(plugin, element)) and issubclass(getattr(plugin, element), WebPage):
if import_name == getattr(plugin, element).__module__:
classname = import_name + '.' + element
result.append((plugin_url(classname, '/'), classname))
__urls_cache[plugin] = result
return __urls_cache[plugin]
################################################################################
# Plugin start/stop #
################################################################################
def start_enabled_plugins():
from ospy.helpers import mkdir_p
from ospy.options import options
import logging
import time
for module in available():
if module in options.enabled_plugins and module not in __running:
plugin_n = module
import_name = __name__ + '.' + module
try:
time.sleep(0.1)
plugin = getattr(__import__(import_name), module)
plugin = reload(plugin)
plugin_n = plugin.NAME
mkdir_p(plugin_data_dir(module))
mkdir_p(plugin_docs_dir(module))
plugin.start()
__running[module] = plugin
logging.info(_(u'Started the') + ': ' + str(plugin_n))
if plugin.LINK is not None and not (plugin.LINK.startswith(module) or plugin.LINK.startswith(__name__)):
plugin.LINK = module + '.' + plugin.LINK
except Exception:
logging.error(_(u'Failed to load the') + ' ' + str(plugin_n) + ' ' + str(traceback.format_exc()))
options.enabled_plugins.remove(module)
for module, plugin in __running.copy().iteritems():
if module not in options.enabled_plugins:
plugin_n = plugin.NAME
try:
plugin.stop()
del | |
<reponame>robszewczyk/openweave-tlv-schema
#!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Unit tests for schema qualifiers.
#
import unittest
from .testutils import TLVSchemaTestCase
class Test_Qualifiers(TLVSchemaTestCase):
_quals = ['extensible',
'optional',
'private',
'invariant',
'nullable',
'tag-order',
'schema-order',
'any-order',
'range 0..100',
'length 0..100',
'tag 42',
'id 42']
_qualNames = [ qual.split(' ', 1)[0] for qual in _quals ]
_allQuals = ', '.join(_quals)
def _checkQualifierNotAllowedErrors(self, errs, allowedQuals, construct):
errText = ", ".join((str(err) for err in errs))
for qual in self._qualNames:
qualAllowed = qual in allowedQuals
qualAccepted = not (('%s qualifier not allowed' % qual) in errText)
if qualAccepted and not qualAllowed:
self.fail('%s qualifier unexpectedly *allowed* on %s' % (qual, construct))
elif not qualAccepted and qualAllowed:
self.fail('%s qualifier unexpectedly *disallowed* on %s' % (qual, construct))
def test_Qualifiers_AllowedQualifiers_TypeDef(self):
schemaText = 'test [ %s ] => INTEGER' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('tag'),
construct='Type definition')
def test_Qualifiers_AllowedQualifiers_VENDOR(self):
schemaText = 'test => VENDOR [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('id'),
construct='VENDOR definition')
def test_Qualifiers_AllowedQualifiers_PROFILE(self):
schemaText = 'test => PROFILE [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('id'),
construct='PROFILE definition')
def test_Qualifiers_AllowedQualifiers_MESSAGE(self):
schemaText = '''
profile1 => PROFILE [ id 1 ]
{
test => MESSAGE [ %s ]
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('id'),
construct='MESSAGE definition')
def test_Qualifiers_AllowedQualifiers_STATUS_CODE(self):
schemaText = '''
profile1 => PROFILE [ id 1 ]
{
test => STATUS CODE [ %s ]
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('id'),
construct='STATUS CODE definition')
def test_Qualifiers_AllowedQualifiers_STRUCTURE(self):
schemaText = 'test => STRUCTURE [ %s ] { }' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('extensible', 'private', 'nullable', 'invariant', 'tag-order', 'schema-order', 'any-order'),
construct='STRUCTURE type')
def test_Qualifiers_AllowedQualifiers_FIELD_GROUP(self):
schemaText = 'test => FIELD GROUP [ %s ] { }' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=(),
construct='FIELD GROUP type')
def test_Qualifiers_AllowedQualifiers_ARRAY(self):
schemaText = 'test => ARRAY [ %s ] { }' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='ARRAY type')
def test_Qualifiers_AllowedQualifiers_ARRAY_OF(self):
schemaText = 'test => ARRAY [ %s ] OF ANY' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='ARRAY OF type')
def test_Qualifiers_AllowedQualifiers_LIST(self):
schemaText = 'test => LIST [ %s ] { }' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='LIST type')
def test_Qualifiers_AllowedQualifiers_LIST_OF(self):
schemaText = 'test => LIST [ %s ] OF ANY' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='LIST OF type')
def test_Qualifiers_AllowedQualifiers_CHOICE_OF(self):
schemaText = 'test => CHOICE [ %s ] OF { }' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable'),
construct='CHOICE OF type')
def test_Qualifiers_AllowedQualifiers_INTEGER(self):
schemaText = 'test => INTEGER [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'range'),
construct='INTEGER type')
def test_Qualifiers_AllowedQualifiers_UNSIGNED_INTEGER(self):
schemaText = 'test => UNSIGNED INTEGER [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'range'),
construct='UNSIGNED INTEGER type')
def test_Qualifiers_AllowedQualifiers_FLOAT(self):
schemaText = 'test => FLOAT [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'range'),
construct='FLOAT type')
def test_Qualifiers_AllowedQualifiers_BOOLEAN(self):
schemaText = 'test => BOOLEAN [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable'),
construct='BOOLEAN type')
def test_Qualifiers_AllowedQualifiers_STRING(self):
schemaText = 'test => STRING [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='STRING type')
def test_Qualifiers_AllowedQualifiers_BYTE_STRING(self):
schemaText = 'test => BYTE STRING [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('nullable', 'length'),
construct='BYTE STRING type')
def test_Qualifiers_AllowedQualifiers_ANY(self):
schemaText = 'test => ANY [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=(),
construct='ANY type')
def test_Qualifiers_AllowedQualifiers_NULL(self):
schemaText = 'test => NULL [ %s ]' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=(),
construct='NULL type')
def test_Qualifiers_AllowedQualifiers_Fields(self):
schemaText = '''
test => STRUCTURE
{
field1 [ %s ] : INTEGER
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('tag', 'optional'),
construct='STRUCTURE field')
schemaText = '''
test => FIELD GROUP
{
field1 [ %s ] : INTEGER
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('tag', 'optional'),
construct='FIELD GROUP field')
def test_Qualifiers_AllowedQualifiers_Elements(self):
schemaText = '''
test => ARRAY
{
elem1 [ %s ] : INTEGER
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=(),
construct='ARRAY element')
schemaText = '''
test => LIST
{
elem1 [ %s ] : INTEGER
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('tag'),
construct='LIST element')
def test_Qualifiers_AllowedQualifiers_Alternates(self):
schemaText = '''
test => CHOICE OF
{
alt1 [ %s ] : INTEGER
}
''' % self._allQuals
(tlvSchema, errs) = self.loadValidate(schemaText)
self._checkQualifierNotAllowedErrors(errs,
allowedQuals=('tag'),
construct='CHOICE alternate')
def test_Qualifiers_DuplicateQualifiers(self):
schemaText = 'test => STRUCTURE [ extensible, extensible ] { }'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertEqual(len(errs), 1, msg='Expected 1 error');
self.assertError(errs, 'duplicate qualifier');
def test_Qualifiers_RangeArguments(self):
schemaText = '''
test => ARRAY
{
INTEGER [ range 0..1 ],
INTEGER [ range 0..18446744073709551618 ],
INTEGER [ range -100..100 ],
INTEGER [ range -100.0..100.00000000 ],
INTEGER [ range -18446744073709551618..18446744073709551618 ],
INTEGER [ range -18446744073709551618..-18446744073709551616 ],
INTEGER [ range 8bit ],
INTEGER [ range 16bit ],
INTEGER [ range 32bit ],
INTEGER [ range 64bit ],
UNSIGNED INTEGER [ range 0..1 ],
UNSIGNED INTEGER [ range 0..18446744073709551618 ],
UNSIGNED INTEGER [ range -100..100 ],
UNSIGNED INTEGER [ range -18446744073709551618..18446744073709551618 ],
UNSIGNED INTEGER [ range -18446744073709551618..-18446744073709551616 ],
UNSIGNED INTEGER [ range 8bit ],
UNSIGNED INTEGER [ range 16bit ],
UNSIGNED INTEGER [ range 32bit ],
UNSIGNED INTEGER [ range 64bit ],
FLOAT [ range 0..1 ],
FLOAT [ range 0..18446744073709551618 ],
FLOAT [ range -100..100 ],
FLOAT [ range -100.5..100.5 ],
FLOAT [ range -18446744073709551618..18446744073709551618 ],
FLOAT [ range -18446744073709551618..-18446744073709551616 ],
FLOAT [ range -18446744073709551618.5..18446744073709551618.00007 ],
FLOAT [ range 32bit ],
FLOAT [ range 64bit ]
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
schemaText = 'test => INTEGER [ range 1..0 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'must be >=')
schemaText = 'test => INTEGER [ range 100..-100 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'must be >=')
schemaText = 'test => INTEGER [ range 0..1.5 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'must be integers')
schemaText = 'test => FLOAT [ range 8bit ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'only 32bit and 64bit range')
schemaText = 'test => FLOAT [ range 16bit ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'only 32bit and 64bit range')
def test_Qualifiers_LengthArguments(self):
schemaText = '''
test => ARRAY
{
STRING [ length 42 ],
STRING [ length 0..1 ],
STRING [ length 100..18446744073709551618 ],
STRING [ length 0.. ],
BYTE STRING [ length 0 ],
BYTE STRING [ length 0..1 ],
BYTE STRING [ length 100..18446744073709551618 ],
BYTE STRING [ length 100.. ],
ARRAY [ length 18446744073709551618 ] OF BOOLEAN,
ARRAY [ length 1..1 ] OF BOOLEAN,
ARRAY [ length 100..18446744073709551618 ] OF NULL,
ARRAY [ length 0..0 ] { ANY * },
ARRAY [ length 18446744073709551618.. ] { },
LIST [ length 1 ] OF ANY,
LIST [ length 100..101 ] OF INTEGER,
LIST [ length 100..18446744073709551618 ] OF BYTE STRING,
LIST [ length 18446744073709551618..18446744073709551618 ] { },
LIST [ length 1.. ] OF STRUCTURE { },
}
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
schemaText = 'test => STRING [ length 1..0 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'must be >= lower bound')
schemaText = 'test => STRING [ length -1..0 ]'
(tlvSchema, errs) = | |
(self.time_shifts_df.round(1) <= self.time_slider.v_model[0])) | \
((self.time_shifts_df.round(1) >= self.time_slider.v_model[1]) &
(self.time_shifts_df.round(1) <= self.time_slider.max))
else:
boolean_time_shifts = ((self.time_shifts_df.round(1) >= self.time_slider.v_model[0]) &
(self.time_shifts_df.round(1) <= self.time_slider.v_model[1]))
boolean_df = boolean_coeffs & boolean_time_shifts # type: pd.DataFrame
return boolean_df
def update_display(self):
# get output_values
boolean_df = self.get_boolean_df() # type: pd.DataFrame
# noinspection PyArgumentList
self.current_signals = list(
set(
list(boolean_df.columns[boolean_df.any(axis='columns')]) +
list(boolean_df.columns[boolean_df.any(axis='index')])
)
)
self.save_dialog.target_dropdown.items = self.current_signals
if self.output_values_toggle.v_model == 0:
time_shift_plot = False
table_df = self.coeffs_df.loc[self.current_signals][self.current_signals]
primary_df = self.coeffs_df
secondary_df = self.time_shifts_df
elif self.output_values_toggle.v_model == 1:
time_shift_plot = True
table_df = self.time_shifts_df.loc[self.current_signals][self.current_signals]
primary_df = self.time_shifts_df
secondary_df = self.coeffs_df
else:
time_shift_plot = None
table_df = None
primary_df = None
secondary_df = None
if self.output_type_toggle.v_model == 0:
# get the new plot
heatmap_fig = _heatmap_plot(pickle.dumps(primary_df), pickle.dumps(secondary_df),
time_unit=self.time_unit, lags_plot=time_shift_plot, boolean_df=boolean_df)
self.create_displayed_fig(heatmap_fig)
self.visualization.children = [self.graph]
if self.output_type_toggle.v_model == 1:
# let's try to keep the same signal order as in the heatmap
signals_ordered = [x for x in primary_df.columns if x in table_df.columns]
self.visualization.children = [
self.table_widget(table_df[signals_ordered].reindex(signals_ordered), time_shift_plot)]
def recalculate_coeffs_and_time_shifts(self):
self.validate_units_()
self.max_time_shifts.error_messages = self.max_time_str_error
if self.max_time_str_error != '':
self.max_time_shifts.error = True
return
self.max_time_shifts.error = False
self.coeffs_time_shifts_calc(self.max_time_str, self.time_output_unit)
def time_shifts_switch_events(self, *_):
if self.time_shifts_switch.v_model:
self.max_time_shifts.disabled = False
self.max_time_shifts.style_ = self.max_time_shifts.style_.replace('display: none;', '')
if self.max_time_shifts.v_model is None or self.max_time_shifts.v_model == '':
self.max_time_shifts.v_model = 'auto'
else:
self.max_time_shifts.style_ = self.max_time_shifts.style_ + 'display: none;'
self.max_time_shifts.disabled = True
self.max_time_shifts.v_model = None
self.visualization.children = [self.progress]
self.recalculate_coeffs_and_time_shifts()
self.update_display()
def max_shifts_events(self, *_):
if self.max_time_str == self.max_time_shifts.v_model:
return
self.visualization.children = [self.progress]
self.recalculate_coeffs_and_time_shifts()
if self.max_time_shifts.error:
return
self.update_display()
def slider_colors(self, checkbox_name, slider_name):
checkbox = getattr(self, checkbox_name)
slider = getattr(self, slider_name)
if checkbox.v_model:
slider.color = self.colors['slider_track_unselected']
slider.track_color = self.colors['slider_selected']
else:
slider.color = self.colors['slider_selected']
slider.track_color = self.colors['slider_track_unselected']
def coeff_range_checkbox_events(self, *_):
self.slider_colors('coeff_range_checkbox', 'coeff_slider')
self.update_display()
def time_range_checkbox_events(self, *_):
self.slider_colors('time_range_checkbox', 'time_slider')
self.update_display()
def coeff_slider_events(self, *_):
self.coeff_lower_bound.v_model = self.coeff_slider.v_model[0]
self.coeff_upper_bound.v_model = self.coeff_slider.v_model[1]
self.update_display()
def time_slider_events(self, *_):
self.time_lower_bound.v_model = self.time_slider.v_model[0]
self.time_upper_bound.v_model = self.time_slider.v_model[1]
self.update_display()
def bound_boxes_events(self, *_):
bounds = [self.coeff_lower_bound,
self.coeff_upper_bound,
self.time_lower_bound,
self.time_upper_bound]
for x in bounds:
try:
float(x.v_model)
except ValueError:
x.error_messages = 'not a number'
return
if float(self.coeff_lower_bound.v_model) < -1:
self.coeff_lower_bound.error_messages = 'out of range'
return
if float(self.coeff_upper_bound.v_model) > 1:
self.coeff_upper_bound.error_messages = 'out of range'
return
if float(self.time_lower_bound.v_model) < self.time_shift_min:
self.time_lower_bound.error_messages = 'out of range'
return
if float(self.time_upper_bound.v_model) > self.time_shift_max:
self.time_upper_bound.error_messages = 'out of range'
return
self.coeff_lower_bound.error_messages = ''
self.coeff_upper_bound.error_messages = ''
self.time_lower_bound.error_messages = ''
self.time_upper_bound.error_messages = ''
self.coeff_slider.v_model = [float(self.coeff_lower_bound.v_model), float(self.coeff_upper_bound.v_model)]
self.time_slider.v_model = [float(self.time_lower_bound.v_model), float(self.time_upper_bound.v_model)]
self.update_display()
def output_toggle_events(self, *_):
self.visualization.children = [self.progress]
self.update_display()
def output_type_events(self, *_):
self.visualization.children = [self.progress]
self.update_display()
def get_start_end_times(self):
self.start_time = self.current_df.start.tz_convert('utc').isoformat().replace('+00:00', 'Z')
self.end_time = self.current_df.end.tz_convert('utc').isoformat().replace('+00:00', 'Z')
def signal_pairs_selected(self):
self.signals_dict = self.current_df.query_df.set_index('New Name').to_dict('index')
self.signal_pairs_ids = []
bool_df = self.get_boolean_df().copy()
bool_df.columns = [self.signals_dict[x]['ID'] for x in bool_df.columns]
bool_df.index = [self.signals_dict[x]['ID'] for x in bool_df.index]
time_shifts_df = self.time_shifts_df.copy()
time_shifts_df.columns = bool_df.columns
time_shifts_df.index = bool_df.index
for col in bool_df.columns:
trues = bool_df.index[bool_df[col]].tolist()
# By convention, the shifted signal is item 0 in the tuple and it's shifted by the time in pair_time_shifts
pair_ids = [(col, sig) for sig in trues if (sig, col) not in self.signal_pairs_ids and col != sig]
self.signal_pairs_ids.extend(pair_ids)
def worksheet_input_params(self):
self.signals_dict = self.current_df.query_df.set_index('New Name').to_dict('index')
self.signal_ids = [self.signals_dict[x]['ID'] for x in self.current_signals]
self.signal_names = [self.signals_dict[x]['Name'] for x in self.current_signals]
self.time_shifts = self.time_shifts_df[self.current_signals].loc[
self.save_dialog.target_dropdown.v_model].values
def dialog_button_on_click(self, *_):
self.save_dialog.create_signals_dropdown_events()
self.save_dialog.v_model = True
def close_snackbar_events(self, *_):
self.signals_created.v_model = False
def run(self):
# noinspection PyTypeChecker
display(HTML("<style>.container { width:100% !important; }</style>"))
display(HTML(self.additional_styles))
self.app.children = [self.appBar, self.controls, self.visualization]
callbacks = dict(
time_shifts_switch=dict(event_name='change', callback_fn='time_shifts_switch_events'),
max_time_shifts=dict(event_name='change', callback_fn='max_shifts_events'),
output_values_toggle=dict(event_name='change', callback_fn='output_toggle_events'),
coeff_range_checkbox=dict(event_name='change', callback_fn='coeff_range_checkbox_events'),
time_range_checkbox=dict(event_name='change', callback_fn='time_range_checkbox_events'),
coeff_slider=dict(event_name='change', callback_fn='coeff_slider_events'),
time_slider=dict(event_name='change', callback_fn='time_slider_events'),
coeff_lower_bound=dict(event_name='change', callback_fn='bound_boxes_events'),
coeff_upper_bound=dict(event_name='change', callback_fn='bound_boxes_events'),
time_lower_bound=dict(event_name='change', callback_fn='bound_boxes_events'),
time_upper_bound=dict(event_name='change', callback_fn='bound_boxes_events'),
output_type_toggle=dict(event_name='change', callback_fn='output_toggle_events'),
dialog_button=dict(event_name='click', callback_fn='dialog_button_on_click'),
close_snackbar=dict(event_name='click', callback_fn='close_snackbar_events'),
)
for widget_name, event_props in callbacks.items():
widget = getattr(self, widget_name)
widget.on_event(event_props['event_name'], getattr(self, event_props['callback_fn']))
return self.app
class CreateSignalsMenu(v.Dialog):
"""
This class creates an ipyvuetify Dialog window with the options required
to create correlation and time shifted signals in Seeq
"""
def __init__(self, parent, **kwargs):
self.parent = parent
self._signal_writing_counter = {signal: 0 for signal in self.parent.df.columns}
self._condition_id = None
self.dialog_instructions = v.Html(tag='p', children=[])
self.target_dropdown = v.Select(label="Target signal", items=self.parent.current_signals, dense=True,
outlined=True,
color=self.parent.colors['seeq_primary'], filled=True, item_color='primary',
v_model='',
disabled=self.parent.export_disabled, class_='mt-3')
self.create_signals = v.Btn(color='success', children=['Create signals'], v_on='tooltip.on',
target="_blank", disabled=True, loading=False,
class_='', style_='text-transform: capitalize;')
self.create_signals_tooltip = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': self.create_signals
}], children=['Save time shifted signals to the Analysis worksheet'])
self.output_display = v.Html(tag='p', children=[])
self.create_signals_dropdown = v.Select(label="Select type of signals to create",
items=['Create Correlation or Time Shift Signals',
'Shift Signals with Respect to a Target'],
dense=True,
outlined=True,
color=self.parent.colors['seeq_primary'], filled=True,
item_color='primary',
v_model='Create Correlation or Time Shift Signals',
disabled=self.parent.export_disabled, class_='mt-3')
signal_options_btns = [
dict(name='Cross-Correlations', v_model='', style_='text-transform: capitalize; min-width: 200px',
tooltip='Creates one signal of the Pearson correlation coefficient per signal pair selected'),
dict(name='Time Shifts', v_model='', style_='text-transform: capitalize; min-width: 150px',
tooltip='Creates one signal per signal pair of the time shifts needed to maximize cross correlation'),
dict(name='Correlations and Time Shifts', v_model='', style_='text-transform: capitalize; min-width: 250px',
tooltip='Creates one signal per signal pair of the maximized Pearson coefficient resulted '
'from dynamically shifting the signals')
]
self.rolling_window_options = ToggleButtons(signal_options_btns, v_model=0, mandatory=True, tile=True,
color=self.parent.colors['seeq_primary'],
borderless=False, dense=True, class_='flex-wrap pt-1 pb-4',
style_='background: transparent;')
self.rolling_window_options_container = v.Html(
tag='div',
class_='d-flex flex-row flex-wrap justify-space-between',
children=[])
self.signals_type_option_card = v.CardText(style_=self.parent.info_style, class_='pa-0',
children=[])
# Input box for timespan of the sliding window
self.window_size, self.window_size_tooltip = create_input_param_box(
v_model='24 h', label="Window Size", color=self.parent.colors['seeq_primary'],
style_='max-width: 120px; font-size: small; text-align-last: end;', class_='mr-5',
tooltip='Enter the timespan of the sliding window (e.g. 1h, 2min) ')
# Input box for period of the sliding window
self.window_period, self.window_period_tooltip = create_input_param_box(
v_model='6 h', label="Window Period", color=self.parent.colors['seeq_primary'],
style_='max-width: 130px; font-size: small; text-align-last: end;', class_='mr-5',
tooltip='Enter the period of the sliding window (e.g. 1h, 2min) ')
# Input box for minimum correlation threshold
self.corr_thrs, self.corr_thrs_tooltip = create_input_param_box(
v_model='0.8', label="Correlation Threshold", color=self.parent.colors['seeq_primary'],
style_='max-width: 130px; font-size: small; text-align-last: end;', class_='mr-5',
tooltip='Enter the minimum acceptable correlation coefficient value to determine the time shifts')
self.seeq_output_time_unit = v.Select(label="Signal time units",
items=['seconds', 'minutes', 'hours', 'days', 'years'],
dense=True,
outlined=True,
color=self.parent.colors['seeq_primary'], filled=True,
item_color='primary',
v_model='minutes',
style_='max-width: 150px; font-size: small; text-align-last: end;',
disabled=self.parent.export_disabled, class_='')
self.create_signals_inputs = v.Html(tag='div',
class_='d-flex flex-row flex-wrap pa-0',
children=[])
super().__init__(children=[
v.Card(children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Create signals"
]),
v.CardText(style_=self.parent.info_style, children=[
self.parent.info_message,
self.create_signals_dropdown,
self.rolling_window_options_container,
self.signals_type_option_card,
v.Html(tag='div', class_='d-flex flex-row justify-end',
children=[self.create_signals_tooltip]),
self.output_display
]),
])
], **kwargs)
# callbacks
self.target_dropdown.on_event('change', self.dropdown_events)
self.corr_thrs.on_event('change', self.input_box_events)
self.window_size.on_event('change', self.input_box_events)
self.window_period.on_event('change', self.input_box_events)
self.rolling_window_options.on_event('change', self.signal_type_checkboxes_events)
self.create_signals_dropdown.on_event('change', self.create_signals_dropdown_events)
self.create_signals.on_event('click', self.shifted_signals_btn_on_click)
@property
def condition_id(self):
return self._condition_id
@condition_id.setter
def condition_id(self, value):
self._condition_id = value
@property
def signal_writing_counter(self):
return self._signal_writing_counter
@signal_writing_counter.setter
def signal_writing_counter(self, value):
self._signal_writing_counter = value
def check_invalid_input_boxes(self):
self.create_signals.disabled = False
if self.window_period.error_messages != '' or self.window_size.error_messages != '':
self.create_signals.disabled = True
if self.create_signals_dropdown.v_model in self.create_signals_dropdown.items:
if self.corr_thrs.error_messages != '':
self.create_signals.disabled = True
def toggle_button_loading(self):
self.create_signals.loading = not self.create_signals.loading
self.create_signals.disabled = self.create_signals.loading
def dropdown_events(self, *_):
if self.target_dropdown.v_model != '':
self.create_signals.disabled = False
else:
self.create_signals.disabled = True
def input_box_events(self, widget, __, ___):
self.create_signals.disabled = False
if widget.label == 'Window Size' or widget.label == 'Window Period':
widget.v_model, widget.error_messages, _ = validate_units(widget.v_model,
time_shifts_on=True,
auto_allowed=False)
if widget.label == 'Correlation Threshold':
try:
widget.error_messages = ''
val = float(widget.v_model)
if val < 0:
widget.error_messages = 'Value must be greater than or equal to 0'
if val > 1:
widget.error_messages = 'Value must be less than or equal to 1'
except ValueError:
widget.error_messages = 'Value must be float'
self.check_invalid_input_boxes()
def signal_type_checkboxes_events(self, *_):
if self.rolling_window_options.v_model == 1 or self.rolling_window_options.v_model == 2:
self.create_signals_inputs.children = [self.window_size_tooltip,
self.window_period_tooltip,
self.corr_thrs_tooltip,
self.seeq_output_time_unit]
self.signals_type_option_card.children = ['Adjust the sliding window parameters and time '
'shifts options',
v.Html(tag='p'),
self.create_signals_inputs,
v.Html(tag='p')]
elif self.rolling_window_options.v_model == 0:
self.create_signals_inputs.children = [self.window_size_tooltip,
self.window_period_tooltip]
self.signals_type_option_card.children = ['Adjust the sliding window parameters',
v.Html(tag='p'),
self.create_signals_inputs,
v.Html(tag='p')]
self.check_invalid_input_boxes()
def create_signals_dropdown_events(self, *_):
if self.parent.export_disabled:
self.export_disabled()
return
self.create_signals.disabled = False
self.target_dropdown.disabled = False
self.rolling_window_options.correlations_and_time_shifts.disabled = False
self.rolling_window_options.time_shifts.disabled = False
if self.create_signals_dropdown.v_model == self.create_signals_dropdown.items[0]:
if not self.parent.time_shifts_switch.v_model:
self.rolling_window_options.v_model = 0
self.rolling_window_options.correlations_and_time_shifts.disabled = True
self.rolling_window_options.time_shifts.disabled = True
self.rolling_window_options_container.children = ["Select the type of signals to create",
self.rolling_window_options]
self.create_signals_inputs.children = []
self.signals_type_option_card.children = []
self.signal_type_checkboxes_events()
elif self.create_signals_dropdown.v_model == self.create_signals_dropdown.items[1]:
self.rolling_window_options_container.children = []
self.create_signals_inputs.children = [self.target_dropdown]
if not self.parent.time_shifts_switch.v_model:
self.signals_type_option_card.children = ['This option is not available if Time Shift is turned off']
return
self.signals_type_option_card.children = ['Shifts signals with respect to a target signal to '
'maximize cross | |
<filename>yaxil/__init__.py
import io
import os
import csv
import sys
import gzip
import json
import time
import arrow
import random
import sqlite3
import zipfile
import logging
import requests
from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
import itertools
import getpass as gp
import tempfile as tf
import subprocess as sp
import collections as col
from argparse import Namespace
from contextlib import contextmanager
import xml.etree.ElementTree as etree
import yaxil.commons as commons
import yaxil.functools as functools
from .session import Session
from .exceptions import (AuthError, MultipleAccessionError, NoAccessionError,
AccessionError, DownloadError, ResultSetError,
ScanSearchError, EQCNotFoundError, RestApiError,
AutoboxError, NoExperimentsError, NoSubjectsError,
CommandNotFoundError)
# Whether to verify SSL certificates. Primarily of use during testing.
CHECK_CERTIFICATE = True
logger = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
class Format(object):
'''
A container to hold possible XNAT response formats: Format.JSON,
Format.XML, and Format.CSV.
'''
JSON = "json"
XML = "xml"
CSV = "csv"
XnatAuth = col.namedtuple("XnatAuth", [
"url",
"username",
"password"
])
'''
Container to hold XNAT authentication information. Fields include the ``url``,
``username``, and ``password``.
'''
def test_auth(auth):
'''
Validate auth input against XNAT.
'''
url = '{0}/data/version'.format(auth.url.rstrip('/'))
r = requests.get(url, auth=basicauth(auth))
if r.status_code == requests.codes.UNAUTHORIZED:
return False
return True
def basicauth(auth):
'''
Create basic auth tuple for requests.
'''
if auth.username and auth.password:
return (auth.username, auth.password)
return None
@contextmanager
def session(auth):
'''
Create a session context to avoid explicitly passing authentication to
every function.
Example:
.. code-block:: python
import yaxil
auth = yaxil.XnatAuth(url='...', username='...', password='...')
with yaxil.session(auth) as sess:
aid = sess.accession('AB1234C')
experiment = sess.experiment('AB1234C')
sess.download('AB1234C', [1,3,14], out_dir='./dicomz')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:returns: YAXIL session object
:rtype: :mod:`yaxil.session.Session`
'''
sess = Session(auth)
yield sess
def auth2(alias=None, host=None, username=None, password=None, cfg='~/.xnat_auth'):
result = tuple()
# First, look for authentication data in ~/.xnat_auth
if alias:
logger.debug('returning authentication data from %s', cfg)
return auth(alias)
# Second, look for authentication data from --host, --user, --password function arguments
authargs = (host, username)
if any(authargs):
if not all(authargs):
raise AuthError('you must supply --host, --username and --password (or password prompt)')
logger.debug('returning authentication data from command line')
if not password:
password = <PASSWORD>('Enter XNAT passphrase:')
return XnatAuth(url=host, username=username, password=password)
# Third, look for authentication data in environment variables
host = os.environ.get('XNAT_HOST', None)
username = os.environ.get('XNAT_USER', None)
password = os.environ.get('XNAT_PASS', None)
authargs = (host, username)
if any(authargs):
if not all(authargs):
raise AuthError('you must set $XNAT_HOST, $XNAT_USER, and $XNAT_PASS (or password prompt)')
logger.debug('returning authentication data from environment variables')
if not password:
password = <PASSWORD>('Enter XNAT passphrase:')
return XnatAuth(url=host, username=username, password=password)
raise AuthError('you must provide authentication data using xnat_auth, command line, or environment variables')
def auth(alias=None, url=None, cfg="~/.xnat_auth"):
'''
Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth`
'''
if not alias and not url:
raise ValueError('you must provide an alias or url argument')
if alias and url:
raise ValueError('cannot provide both alias and url arguments')
# check and parse config file
cfg = os.path.expanduser(cfg)
if not os.path.exists(cfg):
raise AuthError("could not locate auth file %s" % cfg)
tree = etree.parse(os.path.expanduser(cfg))
# search by alias or url
res = None
if alias:
res = tree.findall("./%s" % alias)
if url:
res = tree.findall("./*/[url='%s']" % url)
if not res:
raise AuthError("failed to locate xnat credentials within %s" % cfg)
elif len(res) > 1:
raise AuthError("found too many sets of credentials within %s" % cfg)
res = res.pop()
# get url
url = res.findall("url")
if not url:
raise AuthError("no url for %s in %s" % (alias, cfg))
elif len(url) > 1:
raise AuthError("too many urls for %s in %s" % (alias, cfg))
# get username
username = res.findall("username")
if not username:
raise AuthError("no username for %s in %s" % (alias, cfg))
elif len(username) > 1:
raise AuthError("too many usernames for %s in %s" % (alias, cfg))
# get password
password = res.findall("password")
if not password:
password = <PASSWORD>pass('Enter XNAT passphrase:')
elif len(password) > 1:
raise AuthError("too many passwords for %s in %s" % (alias, cfg))
else:
password = password.pop().text
return XnatAuth(
url=url.pop().text,
username=username.pop().text,
password=password
)
Subject = col.namedtuple('Subject', [
'uri',
'label',
'id',
'project'
])
'''
Container to hold XNAT Subject information. Fields include the Subject URI
(``uri``), Accession ID (``id``), Project (``project``), and Label (``label``).
'''
def subjects(auth, label=None, project=None):
'''
Retrieve Subject tuples for subjects returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.subjects(auth, 'AB1234C')
Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001',
project=u'MyProject')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Subject label
:type label: str
:param project: XNAT Subject Project
:type project: str
:returns: Subject objects
:rtype: :mod:`yaxil.Subject`
'''
url = '{0}/data/subjects'.format(auth.url.rstrip('/'))
logger.debug('issuing http request %s', url)
# compile query string
columns = [
'ID',
'label',
'project'
]
payload = {
'columns': ','.join(columns)
}
if label:
payload['label'] = label
if project:
payload['project'] = project
# submit the request
r = requests.get(url, params=payload, auth=basicauth(auth),
verify=CHECK_CERTIFICATE)
# validate response
if r.status_code != requests.codes.ok:
raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url))
try:
results = r.json()
__quick_validate(results)
except ResultSetError as e:
raise ResultSetError('{0} from {1}'.format(e.message, r.url))
results = results['ResultSet']
if int(results['totalRecords']) == 0:
raise NoSubjectsError('no records returned from {0}'.format(r.url))
# start generating consumable results for the caller
for item in results['Result']:
yield Subject(uri=item['URI'],
id=item['ID'],
project=item['project'],
label=item['label'])
Experiment = col.namedtuple('Experiment', [
'uri',
'label',
'id',
'project',
'subject_id',
'subject_label',
'archived_date'
])
'''
Container to hold XNAT Experiment information. Fields include the Experiment URI
(``uri``), Accession ID (``id``), Project (``project``), Label (``label``),
Subject Accession ID (``subject_id``), Subject label (``subject_label``), and
archived date (``archived_date``).
'''
def experiments(auth, label=None, project=None, subject=None, daterange=None):
'''
Retrieve Experiment tuples for experiments returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.experiment(auth, 'AB1234C')
Experiment(uri=u'/data/experiments/XNAT_E0001', label=u'AB1234C', id=u'XNAT_E0001',
project=u'MyProject', subject_id=u'XNAT_S0001', subject_label='ABC')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Experiment label
:type label: str
:param project: XNAT Experiment Project
:type project: str
:param subject: YAXIL Subject
:type subject: :mod:`yaxil.Subject`
:param daterange: Start and end dates
:type daterange: tuple
:returns: Experiment object
:rtype: :mod:`yaxil.Experiment`
'''
if subject and (label or project):
raise ValueError('cannot provide subject with label or project')
url = '{0}/data/experiments'.format(auth.url.rstrip('/'))
logger.debug('issuing http request %s', url)
# compile query string
columns = [
'ID',
'label',
'project',
'xnat:subjectassessordata/subject_id',
'subject_label',
'insert_date'
]
payload = {
'columns': ','.join(columns)
}
if label:
payload['label'] = label
if project:
payload['project'] = project
if subject:
payload['project'] = subject.project
payload['xnat:subjectassessordata/subject_id'] = subject.id
if daterange:
start = arrow.get(daterange[0]).format('MM/DD/YYYY')
stop = arrow.get(daterange[1]).format('MM/DD/YYYY')
payload['date'] = '{0}-{1}'.format(start, stop)
# submit request
r = requests.get(url, params=payload, auth=basicauth(auth), verify=CHECK_CERTIFICATE)
# validate response
if r.status_code != requests.codes.ok:
raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url))
try:
results = r.json()
__quick_validate(results)
except ResultSetError as e:
raise ResultSetError('{0} from {1}'.format(e.message, r.url))
results = results['ResultSet']
if int(results['totalRecords']) == 0:
raise NoExperimentsError('no records returned for {0}'.format(r.url))
for item in results['Result']:
yield Experiment(uri=item['URI'],
id=item['ID'],
project=item['project'],
label=item['label'],
subject_id=item['subject_ID'],
subject_label=item['subject_label'],
archived_date=item['insert_date'])
@functools.lru_cache
def accession(auth, label, project=None):
'''
Get the Accession ID for any Experiment label.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.accession(auth, 'AB1234C')
u'XNAT_E00001'
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Experiment label
:type label: str
:param project: XNAT Experiment Project
:type project: str
:returns: Accession ID
:rtype: str
'''
expts = list(experiments(auth, label, project))
if len(expts) > 1:
raise MultipleAccessionError(f'label={label}, project={project}')
return expts[0].id
def download(auth, label, scan_ids=None, project=None, aid=None,
out_dir='.', in_mem=True, progress=False, attempts=1,
out_format='flat'):
'''
Download scan data from XNAT.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.download(auth, 'AB1234C', ['1', '2'], out_dir='./data')
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return; use None for all
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:param out_dir: Output directory
:type out_dir: str
:param out_format: Extract all files or leave native structure
:type output_format: str
:param in_mem: Keep download content in memory; faster but uses more memory
:type in_mem: bool
:param progress: Show download progress every N bytes
:type progress: int
:param attempts: Number of download attempts
:type attempts: int
'''
if not scan_ids:
scan_ids = | |
t_chunks.pop()
t_chunks[-1] = np.hstack([t_chunks[-1], last_chunk])
n_chunks = len(t_chunks)
self.logger.info(
f"simulate with n_times_per_chunk={n_times_per_chunk}"
f" n_times={len(t)} n_chunks={n_chunks}")
# construct the simulator payload
def data_generator():
with simobj.mapping_context(
mapping=mapping, sources=sources
) as obs, simobj.probe_context(
fp=None, sources=sources,
f_smp=obs_params['f_smp_probing']) as probe:
n_chunks = len(t_chunks)
for i, t in enumerate(t_chunks):
s, obs_info = obs(t)
self.logger.info(
f'simulate chunk {i} of {n_chunks}:'
f' t=[{t.min()}, {t.max()}] '
f's=[{s.min()} {s.max()}]')
rs, xs, iqs, probe_info = probe(
s, alt=obs_info['alt'].T)
data = {
'time': t,
'flux': s,
'rs': rs,
'xs': xs,
'iqs': iqs,
'obs_info': obs_info,
'probe_info': probe_info,
}
yield data
return SimulatorResult(
simctx=self,
config=self.config,
simobj=simobj,
obs_params=obs_params,
sources=sources,
mapping=mapping,
data_generator=data_generator
)
def run_mapping_only(self):
"""Run the simulator to generate mapping file only."""
simobj = self.get_instrument_simulator()
mapping = self.get_mapping_model()
self.logger.info(f"mapping: {mapping}")
obs_params = self.get_obs_params()
t0 = mapping.t0
ref_frame = mapping.ref_frame
ref_coord = mapping.target
# make t grid
t = np.arange(
0, obs_params['t_exp'].to_value(u.s),
(1 / obs_params['f_smp_mapping']).to_value(u.s)) << u.s
time_obs = t0 + t
_ref_frame = resolve_sky_map_ref_frame(
ref_frame, observer=simobj.observer, time_obs=time_obs)
_ref_coord = ref_coord.transform_to(_ref_frame)
obs_coords = mapping.evaluate_at(_ref_coord, t)
# transform all obs_coords to equitorial
obs_coords_icrs = obs_coords.transform_to('icrs')
self.logger.debug(f"time_obs size: {time_obs.shape}")
return SimulatorResult(
simctx=self,
config=self.config,
simobj=simobj,
obs_params=obs_params,
obs_info=locals(),
mapping=mapping,
)
def run_coverage_only(self, write_output=True, mask_with_holdflags=False):
"""Run the simualtor to generate an approximate coverage map."""
simobj = self.get_instrument_simulator()
self.logger.debug(f"simobj: {simobj}")
mapping = self.get_mapping_model()
self.logger.info(f"mapping: {mapping}")
obs_params = self.get_obs_params()
t0 = mapping.t0
target_icrs = simobj.resolve_target(
mapping.target,
mapping.t0,
).transform_to('icrs')
# make -t grid
f_smp = obs_params['f_smp_mapping']
dt_smp = (1 / f_smp).to(u.s)
t_exp = obs_params['t_exp']
t_pattern = mapping.get_total_time()
self.logger.debug(f"mapping pattern time: {t_pattern}")
if t_exp.unit.is_equivalent(u.ct):
ct_exp = t_exp.to_value(u.ct)
t_exp = t_pattern * ct_exp
self.logger.info(f"resolve t_exp={t_exp} from count={ct_exp}")
t = np.arange(
0, t_exp.to_value(u.s),
dt_smp.to_value(u.s)) << u.s
time_obs = t0 + t
_ref_frame = simobj.resolve_sky_map_ref_frame(
ref_frame=mapping.ref_frame, time_obs=time_obs)
target_in_ref_frame = target_icrs.transform_to(_ref_frame)
obs_coords = mapping.evaluate_at(target_in_ref_frame, t)
hold_flags = mapping.evaluate_holdflag(t)
obs_coords_icrs = obs_coords.transform_to('icrs')
if isinstance(_ref_frame, AltAz):
target_in_altaz = target_in_ref_frame
else:
target_in_altaz = target_icrs.transform_to(
simobj.resolve_sky_map_ref_frame(
ref_frame='altaz', time_obs=time_obs)
)
apt = simobj.table
def get_detector_coords(array_name, approximate=True):
mapt = apt[apt['array_name'] == array_name]
if approximate:
m_proj = simobj.get_sky_projection_model(
ref_coord=target_icrs,
time_obs=np.mean(t) + t0)
a_ra, a_dec = m_proj(mapt['x_t'], mapt['y_t'], frame='icrs')
else:
m_proj = simobj.get_sky_projection_model(
ref_coord=obs_coords,
time_obs=time_obs)
n_samples = len(time_obs)
x_t = np.tile(mapt['x_t'], (n_samples, 1))
y_t = np.tile(mapt['y_t'], (n_samples, 1))
a_ra, a_dec = m_proj(x_t, y_t, frame='icrs')
return a_ra, a_dec
def get_sky_bbox(lon, lat):
lon = Angle(lon).wrap_at(360. << u.deg)
lon_180 = Angle(lon).wrap_at(180. << u.deg)
w, e = np.min(lon), np.max(lon)
w1, e1 = np.min(lon_180), np.max(lon_180)
if (e1 - w1) < (e - w):
# use wrapping at 180d
w = w1
e = e1
lon = lon_180
self.logger.debug("re-wrapping coordinates at 180d")
s, n = np.min(lat), np.max(lat)
self.logger.debug(
f"data bbox: w={w} e={e} s={s} n={n} "
f"size=[{(e-w).to(u.arcmin)}, {(n-s).to(u.arcmin)}]")
return w, e, s, n
def make_wcs(pixscale, bbox):
delta_pix = (1 << u.pix).to(u.arcsec, equivalencies=pixscale)
w, e, s, n = bbox
pad = 4 << u.arcmin
nx = ((e - w + pad) / delta_pix).to_value(u.dimensionless_unscaled)
ny = ((n - s + pad) / delta_pix).to_value(u.dimensionless_unscaled)
nx = int(np.ceil(nx))
ny = int(np.ceil(ny))
self.logger.debug(f"wcs pixel shape: {nx=} {ny=} {delta_pix=}")
# to avoid making too large map, we limit the output data
# size to 200MB, which is 5000x5000
# TODO add this to config
size_max = 25e6
if nx * ny > size_max:
scale = nx * ny / size_max
nx = nx / scale
ny = ny / scale
delta_pix = delta_pix * scale
self.logger.debug(
f"wcs adjusted pixel shape: {nx=} {ny=} {delta_pix=}")
# base the wcs on these values
wcsobj = WCS(naxis=2)
wcsobj.pixel_shape = (nx, ny)
wcsobj.wcs.crpix = [nx / 2, ny / 2]
wcsobj.wcs.cdelt = np.array([
-delta_pix.to_value(u.deg),
delta_pix.to_value(u.deg),
])
wcsobj.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcsobj.wcs.crval = [target_icrs.ra.degree, target_icrs.dec.degree]
return wcsobj
def make_cov_hdu(pixscale, array_name, approximate=True):
a_ra, a_dec = get_detector_coords(
array_name, approximate=approximate)
# this is ugly...
# get the common bbox of the array and mapping pattern
w, e, s, n = get_sky_bbox(a_ra, a_dec)
w1, e1, s1, n1 = get_sky_bbox(
obs_coords_icrs.ra,
obs_coords_icrs.dec,
)
bbox = get_sky_bbox(
list(map(
lambda v: v.to_value(u.deg),
[w, w, e, e, w1, w1, e1, e1])) << u.deg,
list(map(
lambda v: v.to_value(u.deg),
[s, n, s, n, s1, n1, s1, n1])) << u.deg)
wcsobj = make_wcs(pixscale, bbox)
if mask_with_holdflags:
m = hold_flags
xy_tel = wcsobj.world_to_pixel_values(
obs_coords_icrs.ra.degree[m == 0],
obs_coords_icrs.dec.degree[m == 0],
)
else:
xy_tel = wcsobj.world_to_pixel_values(
obs_coords_icrs.ra.degree,
obs_coords_icrs.dec.degree,
)
xy_array = wcsobj.world_to_pixel_values(a_ra, a_dec)
xbins = np.arange(wcsobj.pixel_shape[0])
ybins = np.arange(wcsobj.pixel_shape[1])
xbins_array = np.arange(
np.floor(xy_array[0].min()),
np.ceil(xy_array[0].max()) + 1
)
ybins_array = np.arange(
np.floor(xy_array[1].min()),
np.ceil(xy_array[1].max()) + 1
)
im_tel, _, _ = np.histogram2d(
xy_tel[1],
xy_tel[0],
bins=[ybins, xbins])
im_tel *= dt_smp.to_value(u.s) # scale to coverage
im_array, _, _ = np.histogram2d(
xy_array[1],
xy_array[0],
bins=[ybins_array, xbins_array]
)
# convolve
with timeit("convolve with array layout"):
im_cov = convolve_fft(
im_tel, im_array,
normalize_kernel=False, allow_huge=True)
with timeit("convolve with beam"):
fwhm_x = simobj.beam_model_cls.get_fwhm('x', array_name)
fwhm_y = simobj.beam_model_cls.get_fwhm('y', array_name)
g = Gaussian2DKernel(
(fwhm_x / GAUSSIAN_SIGMA_TO_FWHM).to_value(
u.pix, equivalencies=pixscale),
(fwhm_y / GAUSSIAN_SIGMA_TO_FWHM).to_value(
u.pix, equivalencies=pixscale),
)
im_cov = convolve_fft(im_cov, g, normalize_kernel=False)
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(1, 3)
# axes[0].imshow(im_tel)
# axes[1].imshow(im_array)
# axes[2].imshow(im_cov)
# plt.show()
self.logger.debug(
f'total time from coverage map: {im_cov.sum()} s')
self.logger.debug(
f'total time expected: {im_array.sum() * t_exp}')
imhdr = wcsobj.to_header()
return fits.ImageHDU(data=im_cov, header=imhdr)
# create output
phdr = fits.Header()
phdr.append((
'ORIGIN', 'The TolTEC Project',
'Organization generating this FITS file'
))
phdr.append((
'CREATOR', 'tolteca.simu',
'The software used to create this FITS file'
))
phdr.append((
'TELESCOP', 'LMT',
'Large Millimeter Telescope'
))
phdr.append((
'INSTRUME', 'TolTEC',
'TolTEC Camera'
))
phdr.append((
'EXPTIME', f'{t_exp.to_value(u.s):.3g}',
'Exposure time (s)'
))
phdr.append((
'OBSDUR', f'{t_exp.to_value(u.s):g}',
'Observation duration (s)'
))
phdr.append((
'MEANALT', '{0:f}'.format(
target_in_altaz.alt.mean().to_value(u.deg)),
'Mean altitude of the observation (deg)'))
hdulist = [fits.PrimaryHDU(header=phdr)]
pixscale = u.pixel_scale(1. << u.arcsec / u.pix)
for array_name in apt.meta['array_names']:
hdu = make_cov_hdu(pixscale, array_name, approximate=True)
hdulist.append(hdu)
hdulist = fits.HDUList(hdulist)
if write_output:
output_dir = self.get_or_create_output_dir()
output_path = output_dir.joinpath(
f'{output_dir.name}_coverage.fits')
hdulist.writeto(output_path, overwrite=True)
return hdulist
def update(self, config):
cfg = self.config_backend._override_config
rupdate(cfg, config)
self.config_backend.set_override_config(cfg)
if 'config' in self.__dict__:
del self.__dict__['config']
@cached_property
def config(self):
cfg = super().config
return self.config_schema().validate(cfg)
@timeit
def cli_run(self, args=None):
"""Run the simulator and save the result.
"""
if args is not None:
self.logger.debug(f"update config with command line args: {args}")
parser = argparse.ArgumentParser()
n_args = len(args)
re_arg = re.compile(r'^--(?P<key>[a-zA-Z_](\w|.|_)*)')
for i, arg in enumerate(args):
m = re_arg.match(arg)
if m is None:
continue
# g = m.groupdict()
next_arg = args[i + 1] if i < n_args - 1 else None
arg_kwargs = dict()
if next_arg is None:
arg_kwargs['action'] = 'store_true'
else:
arg_kwargs['type'] = yaml.safe_load
parser.add_argument(arg, **arg_kwargs)
args = parser.parse_args(args)
self.logger.debug(f'parsed config: {pformat_yaml(args.__dict__)}')
self.update({'simu': args.__dict__})
cfg = self.config['simu']
# configure the logging to log to file
logfile = self.logdir.joinpath('simu.log')
self.logger.info(f'setup logging to file {logfile}')
with log_to_file(
filepath=logfile, level='DEBUG', disable_other_handlers=False):
mapping_only = cfg['mapping_only']
coverage_only = cfg['coverage_only']
exports_only = cfg['exports_only']
if exports_only:
exports = cfg['exports']
if not exports:
raise ValueError("no export settings found.")
result = list()
for export_kwargs in exports:
result.append(self.export(**export_kwargs))
return
if coverage_only:
result = self.run_coverage_only()
return
if mapping_only:
result = self.run_mapping_only()
else:
result = self.run()
if cfg['plot']:
result.plot_animation()
if cfg['save']:
result.save(
self.get_or_create_output_dir(), mapping_only=mapping_only)
def export(self, format, **kwargs):
"""Export the simulator context as various external formats.
Supported `format`:
* "lmtot": The script used by the LMT observation tool.
"""
if format not in _simu_runtime_exporters:
raise ValueError(f"invalid export format: {format}")
return _simu_runtime_exporters[format](self)
class SimulatorResult(Namespace):
"""A class to hold simulator results."""
logger = get_logger()
outdir_lockfile = 'simresult.lock'
outdir_statefile = 'simresult.state'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if hasattr(self, 'data_generator') and hasattr(self, 'data'):
raise ValueError("invalid result. can only have data"
"or data_generator")
self._lazy = hasattr(self, 'data_generator')
# wrap data in an iterator so we have a uniform implementation
if not self._lazy:
def _data_gen():
yield self.data
self.data_generator = _data_gen
self.reset_iterdata()
def reset_iterdata(self):
"""Reset the data iterator."""
self._iterdata = self.data_generator()
return self._iterdata
def iterdata(self, reset=False):
"""Return data from the data iterator."""
if reset:
self.reset_iterdata()
return next(self._iterdata)
def _save_lmt_tcs_tel(self, outdir):
simctx = self.simctx
cfg = self.config['simu']
output_tel = outdir.joinpath('tel.nc')
nc_tel = netCDF4.Dataset(output_tel, 'w', format='NETCDF4')
def add_str(ds, name, s, dim=128):
if not isinstance(dim, str) or dim is None:
if dim is None:
dim = len(s)
dim_name = f'{name}_slen'
ds.createDimension(dim_name, dim)
else:
dim_name = dim
v = ds.createVariable(name, 'S1', (dim_name, ))
v[:] = netCDF4.stringtochar(np.array([s], dtype=f'S{dim}'))
add_str(
nc_tel,
| |
increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param random_seed: (int, default: `42`) a random seed that will be
used anywhere there is a call to a random number generator: data
splitting, parameter initialization and training set shuffling
:param debug: (bool, default: `False`) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (Tuple[dict, Union[dict, pd.DataFrame], str]) tuple containing
`(training_statistics, preprocessed_data, output_directory)`.
`training_statistics` is a dictionary of training statistics
for each output feature containing loss and metrics values
for each epoch.
`preprocessed_data` is the tuple containing these three data sets
`(training_set, validation_set, test_set)`.
`output_directory` filepath to where training results are stored.
"""
# setup directories and file names
if model_resume_path is not None:
if path_exists(model_resume_path):
output_directory = model_resume_path
else:
if self.backend.is_coordinator():
logger.info(
'Model resume path does not exists, '
'starting training from scratch'
)
model_resume_path = None
if model_resume_path is None:
if self.backend.is_coordinator():
output_directory = get_output_directory(
output_directory,
experiment_name,
model_name
)
else:
output_directory = None
# if we are skipping all saving,
# there is no need to create a directory that will remain empty
should_create_output_directory = not (
skip_save_training_description and
skip_save_training_statistics and
skip_save_model and
skip_save_progress and
skip_save_log and
skip_save_processed_input
)
output_url = output_directory
with upload_output_directory(output_directory) as (output_directory, upload_fn):
train_callbacks = self.callbacks
if upload_fn is not None:
# Upload output files (checkpoints, etc.) to remote storage at the end of
# each epoch, in case of failure in the middle of training.
class UploadOnEpochEndCallback(Callback):
def on_epoch_end(self, trainer, progress_tracker, save_path):
upload_fn()
train_callbacks = train_callbacks + \
[UploadOnEpochEndCallback()]
description_fn = training_stats_fn = model_dir = None
if self.backend.is_coordinator():
if should_create_output_directory:
makedirs(output_directory, exist_ok=True)
description_fn, training_stats_fn, model_dir = get_file_names(
output_directory)
if isinstance(training_set, Dataset) and training_set_metadata is not None:
preprocessed_data = (
training_set, validation_set, test_set, training_set_metadata)
else:
# save description
if self.backend.is_coordinator():
description = get_experiment_description(
self.config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
random_seed=random_seed
)
if not skip_save_training_description:
save_json(description_fn, description)
# print description
logger.info('Experiment name: {}'.format(experiment_name))
logger.info('Model name: {}'.format(model_name))
logger.info(
'Output directory: {}'.format(output_directory))
logger.info('\n')
for key, value in description.items():
logger.info('{}: {}'.format(
key, pformat(value, indent=4)))
logger.info('\n')
for callback in self.callbacks:
callback.on_preprocess_start(self.config)
try:
preprocessed_data = self.preprocess(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
output_directory=output_directory,
random_seed=random_seed,
debug=debug,
**kwargs,
)
(training_set,
validation_set,
test_set,
training_set_metadata) = preprocessed_data
finally:
for callback in self.callbacks:
callback.on_preprocess_end(
training_set, validation_set, test_set, training_set_metadata
)
self.training_set_metadata = training_set_metadata
if self.backend.is_coordinator():
logger.info('Training set: {0}'.format(len(training_set)))
if validation_set is not None:
logger.info('Validation set: {0}'.format(
len(validation_set)))
if test_set is not None:
logger.info('Test set: {0}'.format(len(test_set)))
if not skip_save_model:
# save train set metadata
os.makedirs(model_dir, exist_ok=True)
save_json(
os.path.join(
model_dir,
TRAIN_SET_METADATA_FILE_NAME
),
training_set_metadata
)
for callback in self.callbacks:
callback.on_train_init(
base_config=self.base_config,
experiment_directory=output_directory,
experiment_name=experiment_name,
model_name=model_name,
output_directory=output_directory,
resume=model_resume_path is not None
)
# Build model if not provided
# if it was provided it means it was already loaded
if not self.model:
if self.backend.is_coordinator():
print_boxed('MODEL', print_fun=logger.debug)
# update config with metadata properties
update_config_with_metadata(
self.config,
training_set_metadata
)
self.model = LudwigModel.create_model(self.config,
random_seed=random_seed)
# init trainer
with self.backend.create_trainer(
**self.config[TRAINING],
model=self.model,
resume=model_resume_path is not None,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
callbacks=train_callbacks,
random_seed=random_seed,
debug=debug
) as trainer:
for callback in self.callbacks:
callback.on_train_start(
model=self.model,
config=self.config,
config_fp=self.config_fp,
)
# auto tune batch size
if self.config[TRAINING][BATCH_SIZE] == AUTO or \
self.config[TRAINING][EVAL_BATCH_SIZE] == AUTO:
# TODO (ASN): add support for substitute_with_max parameter
tuned_batch_size = trainer.tune_batch_size(
self.config,
training_set,
random_seed=random_seed
)
# TODO(travis): pass these in as args to trainer when we call train,
# to avoid setting state on possibly remote trainer
if self.config[TRAINING][BATCH_SIZE] == AUTO:
self.config[TRAINING][BATCH_SIZE] = tuned_batch_size
trainer.batch_size = tuned_batch_size
if self.config[TRAINING][EVAL_BATCH_SIZE] == AUTO:
self.config[TRAINING][EVAL_BATCH_SIZE] = tuned_batch_size
trainer.eval_batch_size = tuned_batch_size
# auto tune learning rate
if self.config[TRAINING][LEARNING_RATE] == AUTO:
tuned_learning_rate = trainer.tune_learning_rate(
self.config,
LudwigModel.create_model(self.config, random_seed),
training_set,
random_seed=random_seed
)
self.config[TRAINING][LEARNING_RATE] = tuned_learning_rate
trainer.learning_rate = tuned_learning_rate
# train model
if self.backend.is_coordinator():
print_boxed('TRAINING')
if not skip_save_model:
self.save_config(model_dir)
try:
train_stats = trainer.train(
self.model,
training_set,
validation_set=validation_set,
test_set=test_set,
save_path=model_dir,
)
self.model, train_trainset_stats, train_valiset_stats, train_testset_stats = train_stats
train_stats = {
TRAINING: train_trainset_stats,
VALIDATION: train_valiset_stats,
TEST: train_testset_stats
}
# save training statistics
if self.backend.is_coordinator():
if not skip_save_training_statistics and path_exists(os.path.dirname(training_stats_fn)):
save_json(training_stats_fn, train_stats)
# grab the results of the model with highest validation test performance
validation_field = trainer.validation_field
validation_metric = trainer.validation_metric
validation_field_result = train_valiset_stats[validation_field]
best_function = get_best_function(validation_metric)
# results of the model with highest validation test performance
if self.backend.is_coordinator() and validation_set is not None:
epoch_best_vali_metric, best_vali_metric = best_function(
enumerate(
validation_field_result[validation_metric]),
key=lambda pair: pair[1]
)
logger.info(
'Best validation model epoch: {0}'.format(
epoch_best_vali_metric + 1)
)
logger.info(
'Best validation model {0} on validation set {1}: {2}'.format(
validation_metric, validation_field, best_vali_metric
))
if test_set is not None:
best_vali_metric_epoch_test_metric = train_testset_stats[
validation_field][validation_metric][
epoch_best_vali_metric]
logger.info(
'Best validation model {0} on test set {1}: {2}'.format(
validation_metric,
validation_field,
best_vali_metric_epoch_test_metric
)
)
logger.info(
'\nFinished: {0}_{1}'.format(experiment_name, model_name))
logger.info('Saved to: {0}'.format(output_directory))
finally:
for callback in self.callbacks:
callback.on_train_end(output_directory)
self.training_set_metadata = training_set_metadata
if not skip_save_model:
# Load the best weights from saved checkpoint
self.load_weights(model_dir)
return train_stats, preprocessed_data, output_url
def train_online(
self,
dataset: Union[str, dict, pd.DataFrame],
training_set_metadata: Union[str, dict] = None,
data_format: str = 'auto',
random_seed: int = default_random_seed,
debug: bool = False
) -> None:
"""Performs one epoch of training of the model on `dataset`.
# Inputs
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used in the experiment.
If it has a split column, it will be used for splitting (0 for train,
1 for validation, 2 for test), otherwise the dataset will be
randomly split.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param random_seed: (int, default: `42`) a random seed that is going to be
used anywhere there is a call to a random number generator: data
splitting, parameter initialization and training set shuffling
:param debug: (bool, default: `False`) If `True` turns on `tfdbg`
with `inf_or_nan` checks.
# Return
:return: (None) `None`
"""
training_set_metadata = training_set_metadata or self.training_set_metadata
training_dataset, _, _, training_set_metadata = preprocess_for_training(
self.config,
training_set=dataset,
training_set_metadata=training_set_metadata,
data_format=data_format,
skip_save_processed_input=True,
preprocessing_params=self.config[PREPROCESSING],
backend=self.backend,
random_seed=random_seed,
callbacks=self.callbacks
)
if not self.training_set_metadata:
self.training_set_metadata = training_set_metadata
if not self.model:
update_config_with_metadata(
self.config,
training_set_metadata
)
self.model = LudwigModel.create_model(self.config,
random_seed=random_seed)
if not self._online_trainer:
self._online_trainer = self.backend.create_trainer(
**self.config[TRAINING],
model=self.model,
random_seed=random_seed,
debug=debug
)
self.model = self._online_trainer.train_online(
self.model,
training_dataset,
)
def predict(
self,
dataset: Union[str, dict, pd.DataFrame] = None,
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
skip_save_unprocessed_output: bool = True,
skip_save_predictions: bool = True,
output_directory: str = 'results',
return_type: Union[str, dict, pd.DataFrame] = pd.DataFrame,
debug=False,
**kwargs
) -> Tuple[Union[dict, pd.DataFrame], str]:
"""
Using a trained model, make predictions from the provided dataset.
# Inputs
:param dataset: (Union[str, dict, pandas.DataFrame]) source containing
the entire dataset to be evaluated.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param: split: (str, default= `'full'`): if the input dataset contains
a split column, this parameter indicates which split of the data
to use. Possible values are `'full'`, `'training'`, `
'validation'`, `'test'`.
:param batch_size: (int, default: 128) | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Containing files tree class, to organize projects """
__authors__ = "<NAME>"
import os
import shutil
import io
import json
import raw.data_input as data_input
class project_error(Exception):
pass
class DecodeError(project_error):
"""
Dictionary keys have not correspondence with object parameters
"""
pass
class DictDB(dict):
"""
It is a DRM based on a dict-class and using json format.
This code is based on an Raymond Hettinger's `recipe`_.
Wed, 4 Feb 2009 (MIT)
:ivar filename: the db filename
:ivar flag: type of action:
* r=readonly,
* c=create (if file exists open), or
* n=new (if file exists truncate)
:ivar mode: the permission, you can choose None or octal triple like 0x666
.. _`recipe`:
http://code.activestate.com/recipes/576642/
"""
def __init__(self, filename, flag=None, mode=None, *args, **kwds):
self.flag = flag or 'c' # r=readonly, c=create, or n=new
self.mode = mode # None or octal triple like 0x666
self.filename = filename
if flag != 'n' and os.access(filename, os.R_OK):
file_handle = io.open(filename, 'rb')
try:
self.load(file_handle)
except OSError:
if flag == 'n' and os.access(filename, os.W_OK):
os.remove(filename)
else:
raise OSError, 'File exists: {0}'.format(os.path.abspath(filename))
finally:
file_handle.close()
self.update(*args, **kwds)
def sync(self):
"""
Synchronizes the dictionary with serialized file
All changes are tried to a temporary file before
"""
if self.flag == 'r':
return
filename = self.filename
tempname = '{0}.tmp'.format(filename)
file_handle = io.open(tempname, 'wb')
try:
self.dump(file_handle)
except Exception:
file_handle.close()
os.remove(tempname)
raise
file_handle.close()
shutil.move(tempname, self.filename) # atomic commit
if self.mode is not None:
os.chmod(self.filename, self.mode)
def close(self):
self.sync()
# --
# the following two magic methods permits
# whit as block
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# --
def dump(self, file_name):
try:
json.dump(
self,
file_name,
separators=(',', ':'),
sort_keys=True,
indent=4
)
except TypeError:
# to determine how serialization fault
for k,v in self.iteritems():
try:
print k
json.dumps(
v,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
except TypeError:
print "TypeError ", v
def load(self, file_name):
try:
return self.update(json.load(file_name))
except Exception:
pass
raise ValueError('File not in recognized format')
def dbopen(filename, flag=None, mode=None):
"""
A facade for the previous class DictDB
>>> s = dbopen('tmp.shl', flag='n')
>>> print s, 'start'
{} start
>>> s['abc'] = '123'
>>> s['num'] = 10000
>>> s.close()
>>> print s
{'abc': '123', 'num': 10000}
>>> import io
>>> f = io.open('tmp.shl', 'rb')
>>> print f.read()
{
"abc":"123",
"num":10000
}
>>> f.close()
"""
return DictDB(filename, flag, mode)
class EPPI_DictDB(DictDB):
"""
A version that:
* create also a folder to store the file and
* must have defined keyword to use imposed
* files: - the files added to the project
"""
def __init__(self, filename, flag='c', mode=None, *args, **kwds):
if flag == 'n':
filename = os.path.abspath(filename)
# this part is for create a
# a folder with the same name
# of the file, where store it.
old_folder, only_file = os.path.split(filename)
folder_name = os.path.splitext(only_file)[0]
new_folder = os.path.join(old_folder, folder_name)
if not os.path.exists(new_folder):
os.mkdir(new_folder)
filename = os.path.join(new_folder, only_file)
DictDB.__init__(self, filename, flag, mode, *args, **kwds)
self.__setitem__("files", [])
else:
DictDB.__init__(self, filename, flag=flag, mode=mode, *args, **kwds)
def append_file(self, f):
"""
Adds a file inside project file list
>>> foo = EPPI_DictDB('bar.prj', flag='n')
>>> print foo
{'files': []}
>>> import io
>>> baz = io.open('bar/quux', 'w')
>>> baz.close()
>>> foo.append_file("quux")
>>> print foo
{'files': ['quux']}
>>> import os
>>> os.unlink('bar/quux')
:param f: the file to append
:raises IOError: if the f is not in the path
"""
filepath = os.path.join(self.get_project_dir(), f)
if not os.path.exists(filepath):
msg = "[Errno 2] No such file or directory: {0}"
raise IOError, msg.format(filepath)
temp = self.__getitem__("files")
temp.append(f)
self.__setitem__("files",temp)
def extend_files(self, files):
"""
Adds more files inside project file list
>>> foo = EPPI_DictDB('bar.prj', flag='n')
>>> print foo
{'files': []}
>>> import io
>>> baz = io.open('bar/quux', 'w')
>>> baz.close()
>>> baz = io.open('bar/quuz', 'w')
>>> baz.close()
>>> foo.extend_files(["quux", "quuz"])
>>> print foo
{'files': ['quux', 'quuz']}
>>> import os
>>> os.unlink('bar/quux')
>>> os.unlink('bar/quuz')
:param files: the files for extending
:raises IOError: if any file is not in the path
exist
"""
not_here = lambda x: not(os.path.exists(os.path.join(self.get_project_dir(), x)))
if any(not_here(f) for f in files):
files_list = ', '.join([each_file for each_file in files if os.path.exists(each_file)])
msg = "[Errno 2] No such files or directories: {0}"
raise IOError, msg.format(files_list)
temp = self.__getitem__("files")
temp.extend(files)
self.__setitem__("files",temp)
def get_project_name(self):
"""
To obtain the project name
>>> s = EPPI_dbopen('tmp.prj', flag='n')
>>> print s.get_project_name()
tmp
:return project_name: the name of the project
"""
#print dir(self)
return os.path.splitext(os.path.basename(self.filename))[0]
def get_project_dir(self):
"""
To obtain the project directory
>>> from os import curdir
>>> import os.path as path
>>> from os.path import abspath
>>> s = EPPI_dbopen('tmp.prj', flag='n')
>>> s.get_project_dir() == path.join(abspath(curdir), 'tmp')
True
:return project_name: the directory of the project
"""
return os.path.split(os.path.abspath(self.filename))[0]
def set_parser(self, p):
"""
Add parser variable as dictionary items
>>> import json
>>> import raw.data_input as data_input
>>> p = data_input.parser()
>>> class seq():
... def __str__(self):
... return self.name
>>> data1 = seq()
>>> data1.name = "data1"
>>> data1.proteins = ['acc1','acc2']
>>> data1.peptides = [('acc1',['SEQTWO']),
... ('acc2',['SEQONE'])]
>>> p.parse(data1)
>>> s = EPPI_dbopen('tmp.prj', flag='n')
>>> s.set_parser(p)
>>> print json.dumps(obj=s, indent=4) # doctest: +NORMALIZE_WHITESPACE
{
"files": [],
"p_proteins": {
"acc1": 1,
"acc2": 1
},
"p_sources": [
"data1"
],
"p_spectral_count": false,
"p_peptides": {
"acc1": {
"SEQTWO": 1
},
"acc2": {
"SEQONE": 1
}
}
}
:param p: a parser
:type p: raw.data_input.parser
:raises TypeError: if p is not a raw.data_input.parser
"""
# this means that I kill the duck typing now... think about
if not isinstance(p, data_input.parser):
raise TypeError, "accepting only a parser"
result = p.__dict__
for k, v in result.iteritems():
self.__setitem__('p_{0}'.format(k), v)
def get_parser(self):
"""
To obtain a stored parser
>>> import json
>>> import raw.data_input as data_input
>>> p = data_input.parser()
>>> class seq():
... def __str__(self):
... return self.name
>>> data1 = seq()
>>> data1.name = "data1"
>>> data1.proteins = ['acc1','acc2']
>>> data1.peptides = [('acc1',['SEQTWO']),
... ('acc2',['SEQONE'])]
>>> p.parse(data1)
>>> s = EPPI_dbopen('tmp.prj', flag='n')
>>> s.set_parser(p)
>>> print s.get_parser() # doctest: +NORMALIZE_WHITESPACE
Spectral_count: False
accession freq_prot sequence freq_pept
acc1 1 SEQTWO 1
acc2 1 SEQONE 1
<BLANKLINE>
:return: a parser
:rtype: raw.data_input.parser
"""
p = data_input.parser()
p.proteins = self.__getitem__("p_proteins")
p.peptides = self.__getitem__("p_peptides")
p.spectral_count = self.__getitem__("p_spectral_count")
p.sources = self.__getitem__("p_sources")
return p
def set_selected(self, s):
"""
set parser variable as dictionary items
>>> import json
>>> import raw.data_input as data_input
>>> p = data_input.parser()
>>> class seq():
... def __str__(self):
... return self.name
>>> data1 = seq()
>>> data1.name = "data1"
>>> data1.proteins = ['acc1','acc2']
>>> data1.peptides = [('acc1',['SEQONE', 'SEQTWO','SEQTHREE']),
... ('acc2',['SEQONE', 'SEQTWO',])]
>>> p.parse(data1)
>>> data2 = seq()
>>> data2.name = "data2"
>>> data2.proteins = ['acc1','acc3']
>>> data2.peptides = [('acc1',['SEQONE', 'SEQTWO']),
... ('acc3',['SEQFIVE', 'SEQSIX'])]
>>> p.parse(data2)
>>> p_2 = data_input.selected(p, peptThr=1., protThr=1.)
>>> s = EPPI_dbopen('tmp.prj', flag='n')
>>> s.set_selected(p_2)
>>> print json.dumps(obj=s, indent=4) # doctest: +NORMALIZE_WHITESPACE
{
"files": [],
"s_peptThr": 1.0,
"s_protThr": 1.0,
"s_proteins": {
"acc1": 2
},
"s_sources": [
"data1",
"data2"
],
"s_spectral_count": false,
"s_peptides": {
"acc1": {
"SEQONE": 2,
"SEQTWO": 2
}
}
}
:param s: a selected parser
:type s: raw.data_input.selected
:raises TypeError: if s is not a raw.data_input.selected
"""
# this means that I kill the duck typing now... think about
if not isinstance(s, data_input.selected):
raise TypeError, "accepting only a selected parser"
result = s.__dict__
for k, v in result.iteritems():
self.__setitem__('s_{0}'.format(k), v)
def get_selected(self):
"""
To obtain a stored parser
>>> import json
>>> import raw.data_input as data_input
>>> p = data_input.parser()
>>> class seq():
... pass
>>> data1 = seq()
>>> data1.proteins = ['acc1','acc2']
>>> data1.peptides = [('acc1',['SEQONE', 'SEQTWO','SEQTHREE']),
... ('acc2',['SEQONE', 'SEQTWO',])]
>>> | |
- At first object of list...'
else:
newent = np.where(self.objlist == self.currentobj)[0]-1
self.currentobj = self.objlist[newent][0]
self.openpngs()
self.labelvar.set(self.infostring())
self.updateimage()
if self.Npa != 2: self.checkboxes2(self.cbpos2,disable=True) # disable checkboxes2 if Npa not 2
# load new data for plot and replot
self.dataPlot_loaddata()
self.dataPlot_plot(refresh=True,newobj=True)
self.DPxlow_full, self.DPxhigh_full, self.DPylow_full, self.DPyhigh_full = \
self.dataPlot_getwindowinfo() # store full window
if self.fitsauto: # loading fits files automatically
if self.xpa:
self.openfits_but_cmd_xpa()
else:
self.openfits_but_cmd()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def quit_but(self,position):
"""
Set up the quit button
"""
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT GiGz"
self.QUIT["command"] = self.quit_but_cmd
self.QUIT.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def quit_but_cmd(self):
"""
Command for quit button
"""
if self.quitting == False: self.reset() # Only reset if quit_but_cmd was activated by quit button
self.quitting = True
self.fout.close()
self.closewindows()
self.dataPlotManager.destroy()
if self.outcheck: self.checkoutput()
self.quit()
if self.vb: print ' - Quit GiGz successfully'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def checkoutput(self):
"""
Checking the output to see if it is as expected
"""
data = np.genfromtxt(self.outfile,comments='#',skip_header=2,names=True)
Nobjout = len(np.unique(data['ID']))
Npaout = len(np.unique(data['PA']))
if self.vb: print ' - OUTPUTCHECK: Found '+str(Nobjout)+' objects in output. '+\
'Input objlist contained '+str(len(self.objlist))+' objects'
if self.vb: print ' - OUTPUTCHECK: Found '+str(Npaout)+' PAs in output. '+\
'Input objlist had '+str(self.Npamax)+' PAs'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def subtractcontam(self,twodfits):
"""
Subtract continuum from science fram
"""
filename, fileext = os.path.splitext(twodfits)
output = filename+'_SCI-CONTAM'+fileext
if os.path.isfile(output): # check if file already exists
if self.vb: print ' - ',output,' already exists'
else:
if self.vb: print ' - Create ',output
hduimg = pyfits.open(twodfits) # Load the FITS hdulist
hdrsci = hduimg['SCI'].header # extracting science header
sci = hduimg['SCI'].data
contam = hduimg['CONTAM'].data
pyfits.writeto(output, sci-contam, hdrsci, clobber=False)
return output
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def keyboard_cmd(self,event):
"""
Commands for keyboard shortcuts
"""
cmd = event.char
focuson = self.focus_get() # check where the focus is
if (focuson == self.comments) or (focuson == self.comments2) or \
(focuson == self.byhandz):
pass
else:
keycmd = []
keynames = []
keynumber = []
for ii, key in enumerate(self.keys):
keycmd.append(key[1])
keynames.append(key)
keynumber.append(ii)
keycmd2 = []
keynames2 = []
keynumber2 = []
for ii, key2 in enumerate(self.keys2):
keycmd2.append(key2[1])
keynames2.append(key2)
keynumber2.append(ii)
if cmd in keycmd:
thiskey = keynames[np.where(np.asarray(cmd) == np.asarray(keycmd))[0]]
if cmd in self.sliders:
sliderval = int(self.keys[thiskey].get())
if sliderval == 4:
self.sliderdic[thiskey].set(0)
else:
self.sliderdic[thiskey].set(sliderval+1)
elif cmd in self.empty:
pass
else:
self.cbdic[thiskey].toggle()
elif (cmd in keycmd2) & (self.Npa == 2):
thiskey2 = keynames2[np.where(np.asarray(cmd) == np.asarray(keycmd2))[0]]
if cmd in self.sliders:
sliderval2 = int(self.keys2[thiskey2].get())
if sliderval2 == 4:
self.sliderdic2[thiskey2].set(0)
else:
self.sliderdic2[thiskey2].set(sliderval2+1)
elif cmd in self.empty:
pass
else:
self.cbdic2[thiskey2].toggle()
elif cmd == 'l':
self.comments.focus_set()
elif cmd == 'L':
self.comments2.focus_set()
elif cmd == 'm':
sliderval = float(self.slidersmooth.get())
self.slidersmooth.set(sliderval+0.1)
elif cmd == 'M':
sliderval = float(self.slidersmooth.get())
self.slidersmooth.set(sliderval-0.1)
elif cmd == 'n':
sliderval = float(self.sliderz.get())
self.sliderz.set(sliderval+0.1)
elif cmd == 'N':
sliderval = float(self.sliderz.get())
self.sliderz.set(sliderval-0.1)
elif cmd == 'o':
self.modelbox.toggle()
elif cmd == 'p':
self.GiGlinesbox.toggle()
elif cmd == 'q':
sliderval = int(self.sliderzqual.get())
if sliderval == 4:
self.sliderzqual.set(0)
else:
self.sliderzqual.set(sliderval+1)
elif cmd == 'r':
self.dataPlot_redrawbutton_cmd()
elif cmd == 's':
self.dataPlot_savebutton_cmd()
elif cmd == 't':
self.mzsbox.toggle()
elif cmd == 'u':
self.byhandz.focus_set()
elif cmd == 'z':
self.dataPlot_fullzoombutton_cmd()
elif cmd == '0':
if self.xpa:
self.openfits_but_cmd_xpa()
else:
self.openfits_but_cmd()
elif cmd == '7':
self.prev_but_cmd()
elif cmd == '8':
self.next_but_cmd()
else:
pass
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def infostring(self):
"""
Return string with information to display in GUI window
"""
self.cluster, redshift = vi.getclusterz(self.file)
infostr = "--- Currently looking at object "+str(self.currentobj)+\
', PA(s) = '+str(self.PAs)+\
' ('+self.cluster+' redshift = '+str(redshift)+') ---'
return infostr
#-------------------------------------------------------------------------------------------------------------
class Application_m(Frame):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self,pdir,outfile,master=None,infofile=None,objlist=None,clusters=None,verbose=True,iname='<NAME>',
ACSinspection=False,clobber=False,ds9xpa=False,openfitsauto=False,outputcheck=False,skipempty=False,
openpngseperately=False):
"""
Intitialize the GUI
-- INPUT --
pdir Directory containing the postage stamps
outfile Name of output file to create if it doesn't exists. Use clobber to overwrite.
master Provide another 'master' display. If None GUI created from scratch.
objlist List of objects to inspect. If 'None' all objects in 'dir' will be
inspected.
clusters If objlist is not None, provide the list of clusters the IDs correspond to
verbose Toggle verbosity.
iname Name of inspector to write in output file.
ACSinspection If inspecting ACS objects (not enabled as of 150423).
clobber Overwrites the output file if it already exists
ds9xpa If xpa is availbale for comunicating commands to ds9
set this keyword to tru and this will be used instead
of opening ds9 everytime the fits files are requested.
NB! XPA fix the number of frames. If more than Nframes images are available they
will not be shown. If all objects only have Nframes that's not a proble.
otherwise set ds9xpa = False
openfitsauto Automatically load the fits files into the DS9 window
when advancing to next (or previous) object.
outputcheck Checking the written output to see if it contains the expected number
of objects etc.
skipempty Set to True to ignore unedited objects when writing to output file.
Hence, if skipempty = True objects with no comments, flags set or sliders changed
will be written to the output
openpngseperately By default the pngs are not opened in Preview/GThumb to avoid biasing the inspections
However, setting this keyword to true, will do that.
"""
pp = subprocess.Popen('ds9 -version',shell=True,executable=os.environ["SHELL"],stdout=subprocess.PIPE)
ppout = pp.communicate()[0]
self.ds9version = ppout.split()
self.now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.vb = verbose
self.pdir = pdir
self.master = master
self.infofile = infofile
self.ds9open = False # set ds9 indicator (used for ds9xpa = False)
self.ds9windowopen = False # set ds9 indicator (used for ds9xpa = True)
self.ACSins = ACSinspection
self.quitting = False
self.xpa = ds9xpa # check if user indacetd that xpa was available for ds9
self.fitsauto = openfitsauto # Open fits files automatically?
self.outcheck = outputcheck
self.skipempty = skipempty
self.openpngssep = openpngseperately
if self.xpa:
self.ds9windowopen = False
if os.path.exists(self.pdir):
self.twodfits = glob.glob(self.pdir)
else:
sys.exit(' - The directory '+self.pdir+' does not exist --> ABORTING')
# -------- GET OBJIDS --------
if objlist == None:
searchext = '_rgb.png'
self.file_2Dpng = glob.glob(self.pdir+'*'+searchext)
self.clusterlist = np.asarray([self.file_2Dpng[jj].split('/')[-1].split('_')[0]
for jj in xrange(len(self.file_2Dpng))])
self.objlist = np.asarray([int(self.file_2Dpng[jj].split('/')[-1].split('_')[1])
| |
copying pass
"""
if self in memo:
return memo[self]
robot = memo.get(self.robot, self.robot) # copy.deepcopy(self.robot, memo)
joints = copy.deepcopy(self.joints)
bounds = copy.deepcopy(self.bounds)
kp = copy.deepcopy(self.kp)
kd = copy.deepcopy(self.kd)
max_force = copy.deepcopy(self.max_force)
discrete_values = copy.deepcopy(self.discrete_values)
action = self.__class__(robot=robot, joint_ids=joints, bounds=bounds, kp=kp, kd=kd, max_force=max_force,
discrete_values=discrete_values)
memo[self] = action
return action
class JointPositionChangeAction(JointPositionAction):
r"""Joint Position Change Action
Set the joint positions using position control; this class expect to receive a change in the joint positions
(i.e. instantaneous joint velocities). That is, the current joint positions are added to the given joint position
changes. If none are provided, it will stay at the current configuration.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), kp=None, kd=None, max_force=None,
discrete_values=None):
"""
Initialize the joint position change action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id(s). If None, it will take all the actuated joints.
bounds (tuple of 2 float / np.array[N] / None): lower and upper bound in the case of continuous action.
If None it will use the default joint position limits.
kp (float, np.array[N], None): position gain(s)
kd (float, np.array[N], None): velocity gain(s)
max_force (float, np.array[N], None, bool): maximum motor torques / forces. If True, it will apply the
default maximum force values.
discrete_values (np.array[M], np.array[N,M], list of np.array[M], None): discrete values for each joint.
Note that by specifying this, the joint action is no more continuous but becomes discrete. By default,
the first value along the first axis / dimension are the values by default that are set if no data
is provided.
"""
super(JointPositionChangeAction, self).__init__(robot, joint_ids, bounds=bounds, kp=kp, kd=kd,
max_force=max_force, discrete_values=discrete_values)
# set data if continuous
if self.discrete_values is None:
self.data = np.zeros(len(self.joints))
def _write_continuous(self, data):
"""apply the action data on the robot."""
# add the original joint positions
data += self.robot.get_joint_positions(self.joints)
super(JointPositionChangeAction, self)._write_continuous(data)
class JointVelocityAction(JointAction):
r"""Joint Velocity Action
Set the joint velocities using velocity control.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), discrete_values=None):
"""
Initialize the joint velocity action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id, or list of joint ids. If None, get all the actuated joints.
bounds (tuple of 2 float / np.array[N] / None): lower and upper bound in the case of continuous action.
If None it will use the default joint position limits.
discrete_values (np.array[M], np.array[N,M], list of np.array[M], None): discrete values for each joint.
Note that by specifying this, the joint action is no more continuous but becomes discrete. By default,
the first value along the first axis / dimension are the values by default that are set if no data
is provided.
"""
super(JointVelocityAction, self).__init__(robot, joint_ids, discrete_values=discrete_values)
# set data and space if continuous
if self.discrete_values is None:
self.data = robot.get_joint_velocities(self.joints)
bounds = self._check_continuous_bounds(bounds)
if bounds == (None, None):
bounds = self.robot.get_joint_max_velocities(self.joints)
if np.allclose(bounds, 0):
bounds = np.array([-np.infty * np.ones(len(self.joints)),
np.infty * np.ones(len(self.joints))])
self._space = gym.spaces.Box(low=bounds[:, 0], high=bounds[:, 1])
def _write_continuous(self, data):
"""apply the action data on the robot."""
self.robot.set_joint_velocities(data, self.joints)
class JointVelocityChangeAction(JointVelocityAction):
r"""Joint Velocity Change Action
Set the joint velocities using velocity control; this class expect to receive a change in the joint velocities
(i.e. instantaneous joint accelerations). That is, the current joint velocities are added to the given joint
velocity changes. If none are provided, it will keep the current joint velocities.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), discrete_values=None):
"""
Initialize the joint velocity change action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id, or list of joint ids. If None, get all the actuated joints.
bounds (tuple of 2 float / np.array[N] / None): lower and upper bound in the case of continuous action.
If None it will use the default joint position limits.
discrete_values (np.array[M], np.array[N,M], list of np.array[M], None): discrete values for each joint.
Note that by specifying this, the joint action is no more continuous but becomes discrete. By default,
the first value along the first axis / dimension are the values by default that are set if no data
is provided.
"""
super(JointVelocityChangeAction, self).__init__(robot, joint_ids, bounds=bounds,
discrete_values=discrete_values)
# set data if continuous
if self.discrete_values is None:
self.data = np.zeros(len(self.joints))
def _write_continuous(self, data):
"""apply the action data on the robot."""
data += self.robot.get_joint_velocities(self.joints)
super(JointVelocityChangeAction, self)._write_continuous(data)
class JointPositionAndVelocityAction(JointAction): # TODO: discrete values
r"""Joint position and velocity action
Set the joint position using position control using PD control, where the constraint error to be minimized is
given by: :math:`error = kp * (q^* - q) - kd * (\dot{q}^* - \dot{q})`.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), kp=None, kd=None, max_force=None,
discrete_values=None):
"""
Initialize the joint position and velocity action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id(s). If None, it will take all the actuated joints.
kp (float, np.array[N], None): position gain(s)
kd (float, np.array[N], None): velocity gain(s)
max_force (float, np.array[N], None, bool): maximum motor torques / forces. If True, it will apply the
default maximum force values.
discrete_values (np.array[M], np.array[N,M], list of np.array[M], None): discrete values for each joint.
Note that by specifying this, the joint action is no more continuous but becomes discrete. By default,
the first value along the first axis / dimension are the values by default that are set if no data
is provided.
"""
super(JointPositionAndVelocityAction, self).__init__(robot, joint_ids, discrete_values=discrete_values)
self.kp, self.kd, self.max_force = kp, kd, max_force
# set data if continuous
if self.discrete_values is None:
pos, vel = robot.get_joint_positions(self.joints), robot.get_joint_velocities(self.joints)
self.data = np.concatenate((pos, vel))
self.idx = len(self.joints)
def _write_continuous(self, data):
"""apply the action data on the robot."""
self.robot.set_joint_positions(data[:self.idx], self.joints, kp=self.kp, kd=self.kd,
velocities=data[self.idx:], forces=self.max_force)
def __copy__(self):
"""Return a shallow copy of the action. This can be overridden in the child class."""
return self.__class__(robot=self.robot, joint_ids=self.joints, kp=self.kp, kd=self.kd, max_force=self.max_force)
def __deepcopy__(self, memo={}):
"""Return a deep copy of the action. This can be overridden in the child class.
Args:
memo (dict): memo dictionary of objects already copied during the current copying pass
"""
if self in memo:
return memo[self]
robot = memo.get(self.robot, self.robot) # copy.deepcopy(self.robot, memo)
joints = copy.deepcopy(self.joints)
kp = copy.deepcopy(self.kp)
kd = copy.deepcopy(self.kd)
max_force = copy.deepcopy(self.max_force)
action = self.__class__(robot=robot, joint_ids=joints, kp=kp, kd=kd, max_force=max_force)
memo[self] = action
return action
class JointPositionAndVelocityChangeAction(JointPositionAndVelocityAction):
r"""Joint position and velocity action
Set the joint position using position control using PD control, where the constraint error to be minimized is
given by: :math:`error = kp * (q^* - q) - kd * (\dot{q}^* - \dot{q})`.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), kp=None, kd=None, max_force=None,
discrete_values=None):
"""
Initialize the joint position and velocity change action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id(s). If None, it will take all the actuated joints.
kp (float, np.array[N], None): position gain(s)
kd (float, np.array[N], None): velocity gain(s)
max_force (float, np.array[N], None, bool): maximum motor torques / forces. If True, it will apply the
default maximum force values.
discrete_values (np.array[M], np.array[N,M], list of np.array[M], None): discrete values for each joint.
Note that by specifying this, the joint action is no more continuous but becomes discrete. By default,
the first value along the first axis / dimension are the values by default that are set if no data
is provided.
"""
super(JointPositionAndVelocityChangeAction, self).__init__(robot, joint_ids, kp=kp, kd=kd, max_force=max_force,
discrete_values=discrete_values)
# set data if continuous
if self.discrete_values is None:
self.data = np.zeros(2*len(self.joints))
def _write_continuous(self, data):
"""apply the action data on the robot."""
pos, vel = self.robot.get_joint_positions(self.joints), self.robot.get_joint_velocities(self.joints)
data += np.concatenate((pos, vel))
super(JointPositionAndVelocityChangeAction, self)._write_continuous(data)
# class JointPositionVelocityAccelerationAction(JointAction):
# r"""Set the joint positions, velocities, and accelerations.
#
# Set the joint positions, velocities, and accelerations by computing the necessary torques / forces using inverse
# dynamics.
# """
# pass
class JointTorqueAction(JointAction):
r"""Joint Torque/Force Action
Set the joint force/torque using force/torque control.
"""
def __init__(self, robot, joint_ids=None, bounds=(None, None), discrete_values=None):
"""
Initialize the joint torque/force action.
Args:
robot (Robot): robot instance.
joint_ids (int, list of int, None): joint id, or list of joint ids. If None, get all the actuated joints.
| |
<gh_stars>1-10
"""
Script to verify all examples in the readme.
Run from the project directory (i.e. parent) with
python test_readme_examples.py
"""
from __future__ import print_function, division
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from scipy import misc
def main():
example_standard_situation()
example_heavy_augmentations()
example_show()
example_grayscale()
example_determinism()
example_keypoints()
example_single_augmenters()
example_unusual_distributions()
example_hooks()
def example_standard_situation():
print("Example: Standard Situation")
# -------
# dummy functions to make the example runnable here
def load_batch(batch_idx):
return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)
def train_on_images(images):
pass
# -------
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
])
for batch_idx in range(1000):
# 'images' should be either a 4D numpy array of shape (N, height, width, channels)
# or a list of 3D numpy arrays, each having shape (height, width, channels).
# Grayscale images must have shape (height, width, 1) each.
# All images must have numpy's dtype uint8. Values are expected to be in
# range 0-255.
images = load_batch(batch_idx)
images_aug = seq.augment_images(images)
train_on_images(images_aug)
# -----
# Make sure that the example really does something
if batch_idx == 0:
assert not np.array_equal(images, images_aug)
def example_heavy_augmentations():
print("Example: Heavy Augmentations")
import imgaug as ia
from imgaug import augmenters as iaa
# random example images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
st = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Grayscale((0.0, 1.0))), # blend with grayscale image
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
cval=(0, 1.0), # if mode is constant, use a cval between 0 and 1.0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=True # do all of the above in random order
)
images_aug = seq.augment_images(images)
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_show():
print("Example: Show")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# show an image with 8*8 augmented versions of image 0
seq.show_grid(images[0], cols=8, rows=8)
# Show an image with 8*8 augmented versions of image 0 and 8*8 augmented
# versions of image 1. The identical augmentations will be applied to
# image 0 and 1.
seq.show_grid([images[0], images[1]], cols=8, rows=8)
def example_grayscale():
print("Example: Grayscale")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# The library expects a list of images (3D inputs) or a single array (4D inputs).
# So we add an axis to our grayscale array to convert it to shape (16, 128, 128, 1).
images_aug = seq.augment_images(images[:, :, :, np.newaxis])
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_determinism():
print("Example: Determinism")
from imgaug import augmenters as iaa
# Standard scenario: You have N RGB-images and additionally 21 heatmaps per image.
# You want to augment each image and its heatmaps identically.
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
heatmaps = np.random.randint(0, 255, (16, 128, 128, 21), dtype=np.uint8)
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)})])
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps)
# -----
# Make sure that the example really does something
import imgaug as ia
assert not np.array_equal(images, images_aug)
assert not np.array_equal(heatmaps, heatmaps_aug)
images_show = []
for img_idx in range(len(images)):
images_show.extend([images[img_idx], images_aug[img_idx], heatmaps[img_idx][..., 0:3], heatmaps_aug[img_idx][..., 0:3]])
ia.show_grid(images_show, cols=4)
def example_keypoints():
print("Example: Keypoints")
import imgaug as ia
from imgaug import augmenters as iaa
from scipy import misc
import random
images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)
# Generate random keypoints.
# The augmenters expect a list of imgaug.KeypointsOnImage.
keypoints_on_images = []
for image in images:
height, width = image.shape[0:2]
keypoints = []
for _ in range(4):
x = random.randint(0, width-1)
y = random.randint(0, height-1)
keypoints.append(ia.Keypoint(x=x, y=y))
keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
# augment keypoints and images
images_aug = seq_det.augment_images(images)
keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)
# Example code to show each image and print the new keypoints coordinates
for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
image_before = keypoints_before.draw_on_image(image_before)
image_after = keypoints_after.draw_on_image(image_after)
misc.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
x_old, y_old = keypoint_old.x, keypoint_old.y
x_new, y_new = keypoint.x, keypoint.y
print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new))
def example_single_augmenters():
print("Example: Single Augmenters")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
flipper = iaa.Fliplr(1.0) # always horizontally flip each input image
images[0] = flipper.augment_image(images[0]) # horizontally flip image 0
vflipper = iaa.Flipud(0.9) # vertically flip each input image with 90% probability
images[1] = vflipper.augment_image(images[1]) # probably vertically flip image 1
blurer = iaa.GaussianBlur(3.0)
images[2] = blurer.augment_image(images[2]) # blur image 2 by a sigma of 3.0
images[3] = blurer.augment_image(images[3]) # blur image 3 by a sigma of 3.0 too
translater = iaa.Affine(translate_px={"x": -16}) # move each input image by 16px to the left
images[4] = translater.augment_image(images[4]) # move image 4 to the left
scaler = iaa.Affine(scale={"y": (0.8, 1.2)}) # scale each input image to 80-120% on the y axis
images[5] = scaler.augment_image(images[5]) # scale image 5 by 80-120% on the y axis
def example_unusual_distributions():
print("Example: Unusual Distributions")
from imgaug import augmenters as iaa
from imgaug import parameters as iap
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Blur by a value sigma which is sampled from a uniform distribution
# of range 0.1 <= x < 3.0.
# The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
images_aug = blurer.augment_images(images)
# Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
# i.e. sample a value that is usually around 1.0.
# Clip the resulting value so that it never gets below 0.1 or above 3.0.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Same again, but this time the mean of the normal distribution is not constant,
# but comes itself from a | |
to also check if the `to` value is None
has_more = False
logger.info(f"No more results left to retrieve, ending loop after {loop_counter} iterations")
return results
else:
offset = response.get('to')
sleep_counter = 0
logger.debug(f"Results: {response}")
results.append(deepcopy(response))
logger.info(f"Loop count: {loop_counter}")
loop_counter += 1
else:
exc_str = f"API call to {url} failed. Request payload {params}, response code {r.status_code}, text {r.text}\n"
error = json.loads(r.text)
if error['code'] == 'err_too_many_requests':
# assuming failure due to API rate limiting, so sleep and try again
# implement a sleep counter after which we raise an exception
if sleep_counter == max_sleep:
logger.error(exc_str)
raise Exception(exc_str)
else:
sleep_counter += 1
logger.info("Sleeping for 5 seconds before re-trying last call")
sleep(5)
continue
else:
logger.info("Ignoring error and continuing")
logger.error(exc_str)
has_more = False
# figure out how to gracefully handle r.text.code == generic_err_command_not_found
return results
def get_api_versions(device_name: Text, device_ip: Text, credentials: Dict, output_path: Text, logger, ) -> Text:
results = []
url = get_url(device_name, HTTPS_PORT)
api_key = create_session(
url,
"login",
{"user": credentials['username'], "password": credentials['password'],},
"",
logger,
)
if api_key is not None:
logger.info(f"Login successful. Retrieved API key: {api_key}")
else:
exc_str = f"Unable to log into server and retrieve API key. api_call returned None"
logger.error(exc_str)
raise Exception(exc_str)
resource = "show-api-versions"
params = {}
headers = {"Content-Type": "application/json", "X-chkp-sid": api_key}
resource_url = f"{url}/{resource}"
logger.info(f"Attempting to retrieve {resource_url} with params {params}")
r = requests.post(
resource_url, data=json.dumps(params), headers=headers, verify=False)
if r.ok:
response = r.json()
logger.debug(f"Data Keys: {response.keys()}\n")
logger.info(f"Current API version: {response.get('current-version', None)}\n")
logger.debug(f"Supported API versions: {response.get('supported-versions', None)}\n")
api_version = response.get('current-version', None)
results.append(response)
else:
# should see if we can different the error types based on the text
# want to sleep only if error is err_too_many_requests
exc_str = f"API call to {url} failed. Request payload {params}, response code {r.status_code}, text {r.text}\n"
logger.error(exc_str)
raise Exception(exc_str)
save_output_to_file(
device_name, "Parent", resource, results, output_path,
)
# log-out
end_session(url, "logout", {}, api_key, logger)
if api_version is None:
exc_str = f"Empty API version returned by show-api-versions API call {results}"
logger.error(exc_str)
raise Exception(exc_str)
return api_version
def create_session(url: Text, resource: Text, body: Dict, api_key: Text, logger,) -> Text:
"""
:param url:
:param resource:
:param body:
:param api_key:
:return: output of REST API Post call
"""
url = f"{url}/{resource}"
params = body
# Header for API Key
headers = {"Content-Type": "application/json"}
r = requests.post(
url, data=json.dumps(params), headers=headers, verify=False)
if r.ok:
try:
api_key = r.json()['sid']
return api_key
except:
exc_str = f"Login succeeded, but failed to retrieve api-key. Return status {r}"
logger.error(exc_str)
else:
exc_str = f"Failed to login and retrieve api-key. Return status {r}"
logger.error(exc_str)
def end_session(url: Text, resource: Text, body: Dict, api_key: Text,logger, ) -> None:
"""
:param url:
:param resource:
:param body:
:param api_key:
:return: None
"""
url = f"{url}/{resource}"
params = body
logger.info(f"Attempting to logout with {url} with params {params}")
headers = {"Content-Type": "application/json", "X-chkp-sid": api_key}
r = requests.post(
url, data=json.dumps(params), headers=headers, verify=False)
if r.ok:
logger.info("Log-out successful")
else:
logger.error(f"{r}")
exc_str = "Session Logout failed"
logger.error(exc_str)
raise Exception(exc_str)
def get_url(device_name: Text, port: Text) -> Text:
"""
:param device_name:
:param port:
:return: URL for checkpoint manager
"""
return f"https://{device_name}:{port}/web_api"
def save_output_to_file(
device_name: Text, domain: Text, resource: Text, output: List, file_path: Text
) -> None:
"""
:param device_name:
:param domain:
:param resource:
:param output:
:param file_path:
:return: Save REST API call output to file
"""
file_path = f"{file_path}/{device_name}/{domain}/{resource}.json"
save_output_to_file_path(file_path, output)
def save_output_to_package_file(
device_name: Text, domain: Text, package: Text, resource: Text, output: List, file_path: Text
) -> None:
"""
:param device_name:
:param domain:
:param package:
:param resource:
:param output:
:param file_path:
:return: Save REST API call output to file
"""
file_path = f"{file_path}/{device_name}/{domain}/{package}/{resource}.json"
save_output_to_file_path(file_path, output)
def save_output_to_file_path(file_path: Text, output: List) -> None:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w") as f:
json.dump(output, f)
def get_domain_list(device_name: Text, device_ip: Text, credentials: Dict, output_path: Text, logger, ) -> List:
domain_list = []
url = get_url(device_name, HTTPS_PORT)
api_key = create_session(
url,
"login",
{"user": credentials['username'], "password": credentials['password'],},
"",
logger,
)
if api_key is not None:
logger.info(f"Login successful. Retrieved API key: {api_key}")
else:
exc_str = f"Unable to log into server and retrieve API key. api_call returned None"
logger.error(exc_str)
raise Exception(exc_str)
logger.info(f"Retrieving show-domains for device {device_name}")
resource = "show-domains"
body = {"details-level": "full"}
output = api_call(
url, resource, body, api_key, pagination=True, logger=logger
)
save_output_to_file(
device_name, "Parent", resource, output, output_path,
)
logger.info(f"Retrieved show-domains for device {device_name}")
logger.debug(f"show-domains output:\n {output}")
for entry in output:
domain_objects = entry.get("objects", [])
for domain_object in domain_objects:
domain_list.append(domain_object.get("name"))
# log-out
end_session(url, "logout", {}, api_key, logger)
return domain_list
def get_api_data(device_name: Text, device_ip: Text, credentials: Dict, output_path: Text, logger, ) -> None:
# todo: add handling to use device_ip if device_name is not resolvable via DNS
api_version = get_api_versions(device_name, device_ip, credentials, output_path, logger)
domains = get_domain_list(device_name, device_ip, credentials, output_path, logger)
MAX_LOGIN_RETRIES = 3
for domain in domains:
login_retries = 0
TRY_LOGIN = True
while TRY_LOGIN:
logger.info(f"Connecting to {device_name} domain {domain}")
url = get_url(device_name, HTTPS_PORT)
api_key = create_session(
url,
"login",
{"user": credentials['username'], "password": credentials['password'], "domain": domain},
"",
logger,
)
if api_key is not None:
logger.info(f"Login successful. Retrieved API key: {api_key}")
TRY_LOGIN = False
elif login_retries == MAX_LOGIN_RETRIES:
exc_str = f"Unable to log into server domain {domain} and retrieve API key. api_call returned None."
logger.error(exc_str)
TRY_LOGIN = False
else:
login_retries += 1
sleep(5)
if api_key is not None:
# retrieve all of the policy packages and associated data
get_policy_components(device_name, device_ip, domain, url, api_key, output_path, logger)
# retrieve data that is not embedded within the policy packages
for request_component in get_request_components_management_solo(api_version):
for resource, body in request_component.items():
output = api_call(
url, resource, body, api_key, pagination=True, logger=logger
)
save_output_to_file(
device_name, domain, resource, output, output_path,
)
# logout
end_session(url, "logout", {}, api_key, logger)
def main():
parser = configargparse.ArgParser()
parser.add_argument("--inventory", help="Absolute path to inventory file to use", required=True)
parser.add_argument("--output_dir", help="Absolute path to directory where results are to be written",
required=True)
parser.add_argument("--debug", help="set log_level to DEBUG instead of INFO",
action='store_true')
parser.add_argument("--vault", help="Vault file to use",
required=True)
parser.add_argument("--vault-password-file", help="Vault passowrd file to use",
required=True)
parser.add_argument("--max-threads", help="Set max threads for data collection. Default = 10, Maximum is 100",
type=int, default=10, choices=range(1,101))
args = parser.parse_args()
# set log level
DEBUG = args.debug
log_level = logging.INFO
if DEBUG:
log_level = logging.DEBUG
# check if inventory file exists
# inventory must be a valid ansible YAML inventory with this hierarchy
# example:
#
# all:
# children:
# checkpoint_mgmt:
# vars:
# ansible_connection: local
# device_os: checkpoint_mgmt
# hosts:
# fake11: null
# fake12: null
inv_file = args.inventory
if Path(inv_file).exists():
inventory = get_inventory(inv_file)
else:
raise Exception(f"{inv_file} does not exist")
# check if output directory exists
output_dir = args.output_dir
if not Path(output_dir).exists() or not Path(output_dir).is_dir():
raise Exception(f"{output_dir} does not exist or is not a directory")
# retrieve device credentials - username and password
# expected keys in vault are `svc_account_user` and `svc_account_password`
# which will get mapped to `username` and `password` respectively
# note: code assumes only a single account to connect to all devices
vault_file = args.vault
vault_pass_file = args.vault_password_file
credentials = get_device_credentials(vault_file, vault_pass_file)
if credentials.get("username") is None or credentials.get("password") is None:
raise Exception("Unable to retrieve credentials from Ansible Vault")
max_threads = args.max_threads
pool = ThreadPoolExecutor(max_threads)
future_list = []
start_time = datetime.now()
print(f"###Starting Checkpoint manager data collection: {start_time}")
for grp, grp_data in inventory.items():
device_os = grp_data['vars'].get('device_os')
if device_os is None or device_os != 'checkpoint_mgmt':
continue
for device_name, device_params in grp_data.get('hosts').items():
log_file = f"{output_dir}/{device_name}/cp_manager.log"
try:
os.makedirs(os.path.dirname(log_file), exist_ok=True)
except:
exc_str = f"Could not create directory for log_file {log_file}"
raise Exception(exc_str)
logger = my_custom_logger(device_name, log_file, log_level)
logger.info(f"Starting data collection for {device_name}")
logger.debug(f"Group {grp}, Group_data {grp_data}")
# REST API calls
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if device_params is None:
device_ip = None
else:
device_ip = device_params.get("ansible_host")
# code assumes DNS entry exists for the device name that is in the inventory
# code also assumes the use of standard HTTPS_PORT (443) and no need for a proxy
# device in the middle.
future = pool.submit(get_api_data, device_name=device_name,
device_ip=device_ip, credentials=credentials,
output_path=output_dir, logger=logger)
future_list.append(future)
count = 0
for future in as_completed(future_list):
try:
data = future.result()
except Exception as exc:
print(f"Exception generated: \n {exc}")
print(f"Finished device number {count}")
count += 1
end_time = datetime.now()
print(f"###Completed | |
#!/usr/bin/env python
#
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
# in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Following tests are covered to test BGP Graceful Restart functionality.
Basic Common Test steps for all the test case below :
- Create topology (setup module)
Creating 7 routers topology
- Bring up topology
- Verify for bgp to converge
- Configure BGP Graceful Restart on both the routers.
TC_1_2:
Verify that EOR message is sent out only after initial convergence
Verify whether EOR message is received from all the peers after restart
TC_3:
Verify the selection deferral timer functionality when EOR is not sent
by the helper router
TC_11:
Verify that selection-deferral timer sets the maximum time to
avoid deadlock during which the best-path
TC_10:
Test Objective : Test GR scenarios on helper router by enabling
Graceful Restart for multiple address families.
TC_15:
Test Objective : Test GR scenarios by enabling Graceful Restart
for multiple address families..
TC_16:
Test Objective : Verify BGP-GR feature when restarting node
is a transit router for it's iBGP peers.
TC_18:
Test Objective : Verify that GR helper router deletes stale routes
received from restarting node, if GR capability is not present in
TC_19:
Test Objective : Verify that GR routers keeps all the routes
received from restarting node if both the routers are
TC_26:
Test Objective : Test GR scenarios on helper router by enabling
Graceful Restart for multiple address families.
TC_28:
Test Objective : Verify if helper node goes down before restarting
node comes up online, helper node sets the R-bit to avoid dead-lock
TC_29:
Test Objective : Change timers on the fly, and
verify if it takes immediate effect.
TC_33:
Test Objective : Helper router receives same prefixes from two
different routers (GR-restarting and GR-disabled). Keeps the
TC_34_1:
Test Objective : Restarting node doesn't preserve forwarding
state, helper router should not keep the stale entries.
TC_34_2:
Test Objective : Restarting node doesn't preserve the forwarding
state verify the behaviour on helper node, if it still keeps the
TC_32:
Test Objective : Restarting node is connected to multiple helper
nodes, one of them doesn't send EOR to restarting router. Verify
TC_37:
Test Objective : Verify if helper node restarts before sending the
EOR message, restarting node doesn't wait until stale path timer
TC_30:
Test Objective : Restarting node removes stale routes from Zebra
after receiving an EOR from helper router.
"""
import os
import sys
import time
import pytest
from time import sleep
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join("../"))
sys.path.append(os.path.join("../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
# Import topoJson from lib, to create topology and initial configuration
from lib.topojson import build_config_from_json
from lib.bgp import (
clear_bgp,
verify_bgp_rib,
verify_graceful_restart,
create_router_bgp,
verify_r_bit,
verify_eor,
verify_f_bit,
verify_bgp_convergence,
verify_gr_address_family,
modify_bgp_config_when_bgpd_down,
verify_graceful_restart_timers,
verify_bgp_convergence_from_running_config,
)
from lib.common_config import (
write_test_header,
reset_config_on_routers,
start_topology,
kill_router_daemons,
start_router_daemons,
verify_rib,
check_address_types,
write_test_footer,
check_router_status,
step,
get_frr_ipv6_linklocal,
required_linux_kernel_version,
)
pytestmark = [pytest.mark.bgpd]
# Global variables
BGP_CONVERGENCE = False
GR_RESTART_TIMER = 5
GR_SELECT_DEFER_TIMER = 5
GR_STALEPATH_TIMER = 5
PREFERRED_NEXT_HOP = "link_local"
NEXT_HOP_4 = ["192.168.1.1", "192.168.4.2"]
NEXT_HOP_6 = ["fd00:0:0:1::1", "fd00:0:0:4::2"]
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
# Required linux kernel version for this suite to run.
result = required_linux_kernel_version("4.16")
if result is not True:
pytest.skip("Kernel requirements are not met")
global ADDR_TYPES
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/bgp_gr_topojson_topo2.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
# Api call verify whether BGP is converged
ADDR_TYPES = check_address_types()
for addr_type in ADDR_TYPES:
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error:" " {}".format(
BGP_CONVERGENCE
)
logger.info("Running setup_module() done")
def teardown_module(mod):
"""
Teardown the pytest environment
* `mod`: module name
"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
def configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut, peer):
"""
This function groups the repetitive function calls into one function.
"""
logger.info("configure_gr_followed_by_clear: dut %s peer %s", dut, peer)
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
neighbor = topo["routers"][peer]["links"][dut][addr_type].split("/")[0]
clear_bgp(tgen, addr_type, dut, neighbor=neighbor)
for addr_type in ADDR_TYPES:
neighbor = topo["routers"][dut]["links"][peer][addr_type].split("/")[0]
clear_bgp(tgen, addr_type, peer, neighbor=neighbor)
result = verify_bgp_convergence_from_running_config(tgen)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
return True
def next_hop_per_address_family(tgen, dut, peer, addr_type, next_hop_dict):
"""
This function returns link_local or global next_hop per address-family
"""
intferface = topo["routers"][peer]["links"]["{}-link1".format(dut)]["interface"]
if addr_type == "ipv6" and "link_local" in PREFERRED_NEXT_HOP:
next_hop = get_frr_ipv6_linklocal(tgen, peer, intf=intferface)
else:
next_hop = next_hop_dict[addr_type]
return next_hop
def test_BGP_GR_TC_1_2_p0(request):
"""
Verify that EOR message is sent out only after initial convergence
Verify whether EOR message is received from all the peers after restart
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Check router status
check_router_status(tgen)
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Creating configuration from JSON
reset_config_on_routers(tgen)
logger.info(
"Verify EOR Sent and Received : BGP_GR_TC_1_2 >> "
"BGP GR [Helper Mode]R3-----R1[Restart Mode] "
)
# Configure graceful-restart
input_dict = {
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
}
}
},
"r1": {
"bgp": {
"graceful-restart": {
"graceful-restart": True,
"preserve-fw-state": True,
},
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
},
}
},
}
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verifying BGP RIB routes received from router R3
dut = "r1"
input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verifying RIB routes
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("R1 goes for reload")
kill_router_daemons(tgen, "r1", ["bgpd"])
for addr_type in ADDR_TYPES:
# Verifying RIB routes
input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
# Verifying RIB routes
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("Starting bgpd process")
start_router_daemons(tgen, "r1", ["bgpd"])
logger.info("R1 is UP Now")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verifying BGP RIB routes received from router R3
input_dict_1 = {key: topo["routers"][key] for key in ["r3"]}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verifying | |
in Refs:
if k in D:
#print"k is", k, "value", D[k]
D[k] = document.Reference(D[k])
dict = PDFDictionary(D)
return format(dict, document)
def showOutline(self):
self.setPageMode("UseOutlines")
def showFullScreen(self):
self.setPageMode("FullScreen")
def setPageLayout(self,layout):
if layout:
self.PageLayout = PDFName(layout)
def setPageMode(self,mode):
if mode:
self.PageMode = PDFName(mode)
def check_format(self, document):
"""for use in subclasses"""
pass
class PDFPages(PDFCatalog):
"""PAGES TREE WITH ONE INTERNAL NODE, FOR "BALANCING" CHANGE IMPLEMENTATION"""
__Comment__ = "page tree"
__RefOnly__ = 1
# note: could implement page attribute inheritance...
__Defaults__ = {"Type": PDFName("Pages"),
}
__NoDefault__ = string.split("Kids Count Parent")
__Refs__ = ["Parent"]
def __init__(self):
self.pages = []
def __getitem__(self, item):
return self.pages[item]
def addPage(self, page):
self.pages.append(page)
def check_format(self, document):
# convert all pages to page references
pages = self.pages
kids = PDFArray(pages)
# make sure all pages are references
kids.References(document)
self.Kids = kids
self.Count = len(pages)
class PDFPage(PDFCatalog):
__Comment__ = "Page dictionary"
# all PDF attributes can be set explicitly
# if this flag is set, the "usual" behavior will be suppressed
Override_default_compilation = 0
__RefOnly__ = 1
__Defaults__ = {"Type": PDFName("Page"),
# "Parent": PDFObjectReference(Pages), # no! use document.Pages
}
__NoDefault__ = string.split(""" Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans
""")
__Refs__ = string.split("""
Contents Parent ID
""")
pagewidth = 595
pageheight = 842
stream = None
hasImages = 0
compression = 0
XObjects = None
_colorsUsed = {}
_shadingsUsed = {}
Trans = None
# transitionstring?
# xobjects?
# annotations
def __init__(self):
# set all nodefaults to None
for name in self.__NoDefault__:
setattr(self, name, None)
def setCompression(self, onoff):
self.compression = onoff
def setStream(self, code):
if self.Override_default_compilation:
raise ValueError, "overridden! must set stream explicitly"
from types import ListType
if type(code) is ListType:
code = string.join(code, LINEEND)+LINEEND
self.stream = code
def setPageTransition(self, tranDict):
self.Trans = PDFDictionary(tranDict)
def check_format(self, document):
# set up parameters unless usual behaviour is suppressed
if self.Override_default_compilation:
return
self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight])
if not self.Annots:
self.Annots = None
else:
#print self.Annots
#raise ValueError, "annotations not reimplemented yet"
if not hasattr(self.Annots,'__PDFObject__'):
self.Annots = PDFArray(self.Annots)
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
if self.compression:
S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
S.content = stream
S.__Comment__ = "page stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
if self.ExtGState:
resources.ExtGState = self.ExtGState
resources.setShading(self._shadingUsed)
resources.setColorSpace(self._colorsUsed)
self.Resources = resources
if not self.Parent:
pages = document.Pages
self.Parent = document.Reference(pages)
#this code contributed by <NAME> <<EMAIL>>
class PDFPageLabels(PDFCatalog):
__comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = ["Nums"]
__Refs__ = []
def __init__(self):
self.labels = []
def addPageLabel(self, page, label):
""" Adds a new PDFPageLabel to this catalog.
The 'page' argument, an integer, is the page number in the PDF document
with which the 'label' should be associated. Page numbering in the PDF
starts at zero! Thus, to change the label on the first page, '0' should be
provided as an argument, and to change the 6th page, '5' should be provided
as the argument.
The 'label' argument should be a PDFPageLabel instance, which describes the
format of the labels starting on page 'page' in the PDF and continuing
until the next encounter of a PDFPageLabel.
The order in which labels are added is not important.
"""
self.labels.append((page, label))
def format(self, document):
self.labels.sort()
labels = []
for page, label in self.labels:
labels.append(page)
labels.append(label)
self.Nums = PDFArray(labels) #PDFArray makes a copy with list()
return PDFCatalog.format(self, document)
class PDFPageLabel(PDFCatalog):
__Comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = "Type S P St".split()
__convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER'
ARABIC = 'D'
ROMAN_UPPER = 'R'
ROMAN_LOWER = 'r'
LETTERS_UPPER = 'A'
LETTERS_LOWER = 'a'
def __init__(self, style=None, start=None, prefix=None):
"""
A PDFPageLabel changes the style of page numbering as displayed in a PDF
viewer. PDF page labels have nothing to do with 'physical' page numbers
printed on a canvas, but instead influence the 'logical' page numbers
displayed by PDF viewers. However, when using roman numerals (i, ii,
iii...) or page prefixes for appendecies (A.1, A.2...) on the physical
pages PDF page labels are necessary to change the logical page numbers
displayed by the PDF viewer to match up with the physical numbers. A
PDFPageLabel changes the properties of numbering at the page on which it
appears (see the class 'PDFPageLabels' for specifying where a PDFPageLabel
is associated) and all subsequent pages, until a new PDFPageLabel is
encountered.
The arguments to this initialiser determine the properties of all
subsequent page labels. 'style' determines the numberings style, arabic,
roman, letters; 'start' specifies the starting number; and 'prefix' any
prefix to be applied to the page numbers. All these arguments can be left
out or set to None.
* style:
- None: No numbering, can be used to display the prefix only.
- PDFPageLabel.ARABIC: Use arabic numbers: 1, 2, 3, 4...
- PDFPageLabel.ROMAN_UPPER: Use upper case roman numerals: I, II, III...
- PDFPageLabel.ROMAN_LOWER: Use lower case roman numerals: i, ii, iii...
- PDFPageLabel.LETTERS_UPPER: Use upper case letters: A, B, C, D...
- PDFPageLabel.LETTERS_LOWER: Use lower case letters: a, b, c, d...
* start:
- An integer specifying the starting number for this PDFPageLabel. This
can be used when numbering style changes to reset the page number back
to one, ie from roman to arabic, or from arabic to appendecies. Can be
any positive integer or None. I'm not sure what the effect of
specifying None is, probably that page numbering continues with the
current sequence, I'd have to check the spec to clarify though.
* prefix:
- A string which is prefixed to the page numbers. Can be used to display
appendecies in the format: A.1, A.2, ..., B.1, B.2, ... where a
PDFPageLabel is used to set the properties for the first page of each
appendix to restart the page numbering at one and set the prefix to the
appropriate letter for current appendix. The prefix can also be used to
display text only, if the 'style' is set to None. This can be used to
display strings such as 'Front', 'Back', or 'Cover' for the covers on
books.
"""
if style:
if style.upper() in self.__convertible__: style = getattr(self,style.upper())
self.S = PDFName(style)
if start: self.St = PDFnumber(start)
if prefix: self.P = PDFString(prefix)
#ends code contributed by <NAME> <<EMAIL>>
def testpage(document):
P = PDFPage()
P.Contents = teststream()
pages = document.Pages
P.Parent = document.Reference(pages)
P.MediaBox = PDFArray([0, 0, 595, 841])
resources = PDFResourceDictionary()
resources.allProcs() # enable all procsets
resources.basicFonts()
P.Resources = resources
pages.addPage(P)
#### DUMMY OUTLINES IMPLEMENTATION FOR testing
DUMMYOUTLINE = """
<<
/Count
0
/Type
/Outlines
>>"""
class PDFOutlines0:
__PDFObject__ = True
__Comment__ = "TEST OUTLINE!"
text = string.replace(DUMMYOUTLINE, "\n", LINEEND)
__RefOnly__ = 1
def format(self, document):
return self.text
class OutlineEntryObject:
"an entry in an outline"
__PDFObject__ = True
Title = Dest = Parent = Prev = Next = First = Last = Count = None
def format(self, document):
D = {}
D["Title"] = PDFString(self.Title)
D["Parent"] = self.Parent
D["Dest"] = self.Dest
for n in ("Prev", "Next", "First", "Last", "Count"):
v = getattr(self, n)
if v is not None:
D[n] = v
PD = PDFDictionary(D)
return PD.format(document)
class PDFOutlines:
"""
takes a recursive list of outline destinations like::
out = PDFOutline1()
out.setNames(canvas, # requires canvas for name resolution
"chapter1dest",
("chapter2dest",
["chapter2section1dest",
"chapter2section2dest",
"chapter2conclusiondest"]
), # end of chapter2 description
"chapter3dest",
("chapter4dest", ["c4s1", "c4s2"])
)
Higher layers may build this structure incrementally. KISS at base level.
"""
__PDFObject__ = True
# first attempt, many possible features missing.
#no init for now
mydestinations = ready = None
counter = 0
currentlevel = -1 # ie, no levels yet
def __init__(self):
self.destinationnamestotitles = {}
self.destinationstotitles = {}
self.levelstack = []
self.buildtree = []
self.closedict = {} # | |
normalize):
self.normalize = normalize
def forward(self, input):
if self.mode == 'loss':
loss = self.crit(input, self.target)
if self.normalize:
loss = ScaleGradients.apply(loss, self.strength)
self.loss = loss * self.strength
elif self.mode == 'capture':
self.target = input.detach()
return input
######################################################
# Style Loss module (Gram/Covariance loss with masks)
class MaskedStyleLoss(nn.Module):
def __init__(self, strength, normalize):
super(MaskedStyleLoss, self).__init__()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = 'none'
self.blend_weight = None
self.set_statistic('gram')
self.set_masks(None, None)
self.normalize = normalize
def set_normalize_gradients(self, normalize):
self.normalize = normalize
def set_statistic(self, style_stat):
if style_stat == 'gram':
self.gram = GramMatrix()
elif style_stat == 'covariance':
self.gram = CovarianceMatrix()
def set_masks(self, content_masks, style_masks):
self.content_masks = copy.deepcopy(content_masks)
self.style_masks = copy.deepcopy(style_masks)
self.target_grams = []
self.masked_grams = []
self.masked_features = []
self.capture_count = 0
def forward(self, input):
if self.mode == 'capture':
if self.style_masks != None:
masks = self.style_masks[self.capture_count]
else:
masks = None
self.capture_count += 1
elif self.mode == 'loss':
masks = self.content_masks
self.style_masks = None
if self.mode != 'none':
if self.strength == 0:
self.loss = 0
return input
loss = 0.0
for j in range(self.capture_count):
if masks != None:
l_mask_ori = masks[j].clone()
l_mask = l_mask_ori.repeat(1,1,1).expand(input.size())
l_mean = l_mask_ori.mean()
masked_feature = l_mask.mul(input)
masked_gram = self.gram(masked_feature).clone()
if l_mean > 0:
masked_gram = masked_gram.div(input.nelement() * l_mean)
else:
l_mean = 1.0
masked_feature = input
masked_gram = self.gram(input).clone()
masked_gram = masked_gram.div(input.nelement())
if self.mode == 'capture':
if j >= len(self.target_grams):
self.target_grams.append(masked_gram.detach().mul(self.blend_weight))
self.masked_grams.append(self.target_grams[j].clone())
self.masked_features.append(masked_feature)
else:
self.target_grams[j] += masked_gram.detach().mul(self.blend_weight)
elif self.mode == 'loss':
self.masked_grams[j] = masked_gram
self.masked_features[j] = masked_feature
loss_ = self.crit(self.masked_grams[j], self.target_grams[j]) * l_mean
if self.normalize:
loss_ = ScaleGradients.apply(loss_, self.strength)
loss += loss_ #loss_self.crit(self.masked_grams[j], self.target_grams[j]) * l_mean# * self.strength
self.loss = self.strength * loss
return input
######################################################
# Style Loss module (histogram loss with masks)
class MaskedHistLoss_old(nn.Module):
def __init__(self, strength, normalize):
super(MaskedHistLoss, self).__init__()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = 'none'
self.blend_weight = 1.0
self.set_masks(None, None)
self.normalize = normalize
def set_masks(self, content_masks, style_masks):
self.content_masks = copy.deepcopy(content_masks)
self.style_masks = copy.deepcopy(style_masks)
self.target_hists = []
self.target_maxs = []
self.target_mins = []
self.capture_count = 0
def set_normalize_gradients(self, normalize):
self.normalize = normalize
def minmax(self, input):
return torch.min(input[0].view(input.shape[1], -1), 1)[0].data.clone(), \
torch.max(input[0].view(input.shape[1], -1), 1)[0].data.clone()
def calcHist(self, input, target, min_val, max_val):
res = input.data.clone()
cpp.matchHistogram(res, target.clone())
for c in range(res.size(0)):
res[c].mul_(max_val[c] - min_val[c])
res[c].add_(min_val[c])
return res.data.unsqueeze(0)
def forward(self, input):
if self.mode == 'capture':
if self.style_masks != None:
masks = self.style_masks[self.capture_count]
else:
masks = None
self.capture_count += 1
elif self.mode == 'loss':
masks = self.content_masks
self.style_masks = None
if self.mode != 'none':
if self.strength == 0:
self.loss = 0
return input
loss = 0
for j in range(self.capture_count):
if masks != None:
l_mask_ori = masks[j].clone()
l_mask = l_mask_ori.repeat(1,1,1).expand(input.size())
masked_feature = l_mask.mul(input)
else:
masked_feature = input
target_min, target_max = self.minmax(masked_feature)
target_hist = cpp.computeHistogram(masked_feature[0], 256)
if self.mode == 'capture':
if j >= len(self.target_hists):
self.target_mins.append(target_min)
self.target_maxs.append(target_max)
self.target_hists.append(target_hist.mul(self.blend_weight))
else:
self.target_hists[j] += target_hist.mul(self.blend_weight)
self.target_mins[j] = torch.min(self.target_mins[j], target_min)
self.target_maxs[j] = torch.max(self.target_maxs[j], target_max)
elif self.mode == 'loss':
target = self.calcHist(masked_feature[0], self.target_hists[j], self.target_mins[j], self.target_maxs[j])
loss += self.crit(masked_feature, target) # * self.strength
if self.normalize:
loss = ScaleGradients.apply(loss, self.strength)
self.loss = 0.01 * self.strength * loss
return input
class MaskedHistLoss(nn.Module):
def __init__(self, strength, normalize):
super(MaskedHistLoss, self).__init__()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = 'none'
self.blend_weight = 1.0
self.set_masks(None, None)
self.normalize = normalize
def set_normalize_gradients(self, normalize):
self.normalize = normalize
def double_mean(self, tensor):
tensor = tensor.squeeze(0).permute(2, 1, 0)
return tensor.mean(0).mean(0)
def set_masks(self, content_masks, style_masks):
self.content_masks = copy.deepcopy(content_masks)
self.style_masks = copy.deepcopy(style_masks)
self.targets = []
self.capture_count = 0
def forward(self, input):
if self.mode == 'capture':
if self.style_masks != None:
masks = self.style_masks[self.capture_count]
else:
masks = None
self.capture_count += 1
elif self.mode == 'loss':
masks = self.content_masks
self.style_masks = None
if self.mode != 'none':
if self.strength == 0:
self.loss = 0
return input
loss = 0
for j in range(self.capture_count):
if masks != None:
l_mask_ori = masks[j].clone()
l_mask = l_mask_ori.repeat(1,1,1).expand(input.size())
masked_feature = l_mask.mul(input)
else:
masked_feature = input
if self.mode == 'capture':
target = self.double_mean(masked_feature.detach())
if j >= len(self.targets):
self.targets.append(target.mul(self.blend_weight))
else:
self.targets[j] += target.mul(self.blend_weight)
elif self.mode == 'loss':
loss += self.crit(self.double_mean(masked_feature.clone()), self.targets[j])
if self.normalize:
loss = ScaleGradients.apply(loss, self.strength)
self.loss = self.strength * loss
return input
######################################################
# Histogram matching function (unused at the moment)
# Define a module to match histograms
class MatchHistogram(nn.Module):
def __init__(self, eps=1e-5, mode='pca'):
super(MatchHistogram, self).__init__()
self.eps = eps or 1e-5
self.mode = mode or 'pca'
self.dim_val = 3
def get_histogram(self, tensor):
m = tensor.mean(0).mean(0)
h = (tensor - m).permute(2,0,1).reshape(tensor.size(2),-1)
if h.is_cuda:
ch = torch.mm(h, h.T) / h.shape[1] + self.eps * torch.eye(h.shape[0], device=h.get_device())
else:
ch = torch.mm(h, h.T) / h.shape[1] + self.eps * torch.eye(h.shape[0])
return m, h, ch
def convert_tensor(self, tensor):
if tensor.dim() == 4:
tensor = tensor.squeeze(0).permute(2, 1, 0)
self.dim_val = 4
elif tensor.dim() == 3 and self.dim_val != 4:
tensor = tensor.permute(2, 1, 0)
elif tensor.dim() == 3 and self.dim_val == 4:
tensor = tensor.permute(2, 1, 0).unsqueeze(0)
return tensor
def nan2zero(self, tensor):
tensor[tensor != tensor] = 0
return tensor
def chol(self, t, c, s):
chol_t, chol_s = torch.cholesky(c), torch.cholesky(s)
return torch.mm(torch.mm(chol_s, torch.inverse(chol_t)), t)
def sym(self, t, c, s):
p = self.pca(t, c)
psp = torch.mm(torch.mm(p, s), p)
eval_psp, evec_psp = torch.symeig(psp, eigenvectors=True, upper=True)
e = self.nan2zero(torch.sqrt(torch.diagflat(eval_psp)))
evec_mm = torch.mm(torch.mm(evec_psp, e), evec_psp.T)
return torch.mm(torch.mm(torch.mm(torch.inverse(p), evec_mm), torch.inverse(p)), t)
def pca(self, t, c):
eval_t, evec_t = torch.symeig(c, eigenvectors=True, upper=True)
e = self.nan2zero(torch.sqrt(torch.diagflat(eval_t)))
return torch.mm(torch.mm(evec_t, e), evec_t.T)
def match(self, target_tensor, source_tensor):
source_tensor = self.convert_tensor(source_tensor)
target_tensor = self.convert_tensor(target_tensor)
_, t, ct = self.get_histogram(target_tensor)
ms, s, cs = self.get_histogram(source_tensor)
if self.mode == 'pca':
mt = torch.mm(torch.mm(self.pca(s, cs), torch.inverse(self.pca(t, ct))), t)
elif self.mode == 'sym':
mt = self.sym(t, ct, cs)
elif self.mode == 'chol':
mt = self.chol(t, ct, cs)
matched_tensor = mt.reshape(*target_tensor.permute(2,0,1).shape).permute(1,2,0)
matched_tensor += ms
return self.convert_tensor(matched_tensor)
def forward(self, input, source_tensor):
return self.match(input, source_tensor)
######################################################
# Blurred MaxPool2D (see https://github.com/ProGamerGov/neural-style-pt/issues/71)
class GaussianBlur(nn.Module):
def __init__(self, kernel_size=6, sigma = math.pi / 2):
super().__init__()
if type(sigma) is not list and type(sigma) is not tuple:
kernel_size = [kernel_size] * 2
if type(sigma) is not list and type(sigma) is not tuple:
sigma = [sigma] * 2
kernel = 1
meshgrid_tensor = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, std, mgrid in zip(kernel_size, sigma, meshgrid_tensor):
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - ((size - 1) / 2)) / std) ** 2 / 2)
self.kernel = (kernel / torch.sum(kernel)).view(1, 1, *kernel.size()).cuda()
def forward(self, x):
assert x.dim() == 4
groups = x.size(1)
weight = self.kernel.repeat(groups, * [1] * (self.kernel.dim() - 1))
x = torch.nn.functional.pad(x, (3,2,3,2), mode='reflect') # No idea if this is a good idea for keeping input the same size
x = torch.nn.functional.conv2d(x, weight=weight, groups=groups)
return x
class MaxPool2d(torch.nn.MaxPool2d):
def forward(self, x):
global blur_input
if blur_input is None:
blur_input = GaussianBlur(6, sigma = 0.25)
x = blur_input(x)
x = x.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
x = x.contiguous().view(x.size()[:4] + (-1,))
pool, _ = torch.max(x, dim=-1)
return pool
######################################################
# TV regularization
class TVLoss(nn.Module):
def __init__(self, strength):
super(TVLoss, self).__init__()
self.strength = strength
def forward(self, input):
self.x_diff = input[:,:,1:,:] - input[:,:,:-1,:]
self.y_diff = input[:,:,:,1:] - input[:,:,:,:-1]
self.loss = self.strength * (torch.sum(torch.abs(self.x_diff)) + torch.sum(torch.abs(self.y_diff)))
return input
######################################################
# Optimizer
def setup_optimizer(img, params, num_iterations, verbose=True):
if params.optimizer == 'lbfgs':
if verbose:
print("Running optimization with L-BFGS")
optim_state = {
'max_iter': num_iterations,
'tolerance_change': -1,
'tolerance_grad': -1,
}
if params.lbfgs_num_correction != 100:
optim_state['history_size'] = params.lbfgs_num_correction
optimizer = optim.LBFGS([img], **optim_state)
loopVal = 1
elif params.optimizer == 'adam':
if verbose:
print("Running optimization with ADAM")
optimizer = optim.Adam([img], lr = params.learning_rate)
loopVal = num_iterations - 1
return optimizer, loopVal
######################################################
# GPU config
def setup_gpu(params):
def setup_cuda():
if 'cudnn' in params.backend:
torch.backends.cudnn.enabled = True
if params.cudnn_autotune:
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.enabled = False
def setup_cpu():
if 'mkl' in params.backend and 'mkldnn' not in params.backend:
torch.backends.mkl.enabled = True
elif 'mkldnn' in params.backend:
raise ValueError("MKL-DNN is not supported yet.")
elif 'openmp' in params.backend:
torch.backends.openmp.enabled = True
multidevice = False
if "," in str(params.gpu):
devices = params.gpu.split(',')
multidevice = True
if 'c' in str(devices[0]).lower():
backward_device = "cpu"
setup_cuda(), setup_cpu()
else:
backward_device = "cuda:" + | |
# (c) British Crown Copyright 2020, the Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Met Office nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import netCDF4,argparse,sys
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import os
def collapse_dimensions_for_plotting(longitude, latitude, vname, vx, vd, dims):
"""
Pre-processing of COSP variable for plotting.
Arguments:
longitude: longitude from COSP output file [loc].
latitude: latitude from COSP output file [loc].
vname: variable name.
vx: variable from COSP output file [..., loc]
vd: dictionary with metadata about variable.
dims: dictionary with additional dimensions.
Return:
x: x axis values.
y: y axis values.
z: data array for plotting.
d: dictionary with plot configuration.
"""
yflip = False
xticks, yticks, xticks_labels, yticks_labels, xlabel, ylabel, vmax = (None,)*7
if vd['plot_type'] == 'map':
d = None
x = longitude[0]
y = latitude[:,0]
z = vx
if vname == 'parasolGrid_refl': z = vx[2]
# Roll longitude if there are values > 180
m = (x > 180.0)
if m is not None:
Nroll = longitude.shape[1] // 2
x[m] = x[m] - 360.0
x = np.roll(x, Nroll)
z = np.roll(z,Nroll,axis=1)
# Calculate latitude and longitude edge points.
# Assume they are increasing monotonically.
# Extend length to N+2 and calculate midpoints.
x = midpoints_to_edges(x)
y = midpoints_to_edges(y)
xticks = np.arange(-180,181,60)
yticks = np.arange(-90,91,30)
xlabel = 'Longitude (deg)'
ylabel = 'Latitude (deg)'
if vd['plot_type'] == '2Dhist':
weights = np.cos(latitude * np.pi / 180.0)
weights = weights / weights.sum()
z = np.sum(vx * weights, axis=2)
x = np.arange(z.shape[1]+1)
y = np.arange(z.shape[0]+1)
if vd['plot_type'] == 'zonal_cross_section':
z = np.average(vx, axis=2)
x = midpoints_to_edges(latitude[:,0])
y = np.arange(z.shape[0] + 1)
if vd['plot_type'] in ('2Dhist','zonal_cross_section'):
if vd['xaxis_type'] == 'tau7':
xticks_labels = ('0', '0.3', '1.3', '3.6', '9.4', '23', '60', '')
xticks = x
xlabel = 'Cloud optical depth'
if vd['xaxis_type'] == 'cloudsat_DBZE_BINS':
x = np.arange(-50,26,5)
xticks = x
xticks_labels = None
xlabel = 'Radar reflectivity (dBZ)'
if vd['xaxis_type'] == 'SR_BINS':
xticks_labels = ('0', '0.01', '1.2', '3', '5', '7', '10', '15', '20',
'25', '30', '40', '50', '60', '80', '')
xticks = x
xlabel = 'Lidar scattering ratio'
if vd['xaxis_type'] == 'latitude':
xticks_labels = None
xticks = np.arange(-90,91,30)
xlabel = 'Latitude (deg)'
if vd['yaxis_type'] == 'pres7':
yticks_labels = ('1000', '800', '680', '560', '440', '310', '180','')
yticks = y
ylabel = 'Cloud Top Pressure (hPa)'
if vd['yaxis_type'] == 'hgt16':
yticks_labels = ('', '0', '500', '1000', '1500', '2000', '2500',
'3000', '4000', '5000', '7000', '9000', '11000',
'13000', '15000', '17000', '')
yticks = y
ylabel = 'Cloud Top Height (m)'
if vd['yaxis_type'] == 'REICE_MODIS':
yticks_labels = ('0', '10', '20', '30', '40', '60', '90')
yticks = y
ylabel = 'Ice particle size (micron)'
if vd['yaxis_type'] == 'RELIQ_MODIS':
yticks_labels = ('0', '8', '10', '13', '15', '20', '30')
yticks = y
ylabel = 'Liquid particle size (micron)'
if vd['yaxis_type'] == 'levStat':
y = 480*np.arange(41)
yticks = y[0::4]
yticks_labels = None
ylabel = 'Altitude (m)'
yflip = True
if vd['yaxis_type'] == 'lev':
yticks = y[0::4]
yticks_labels = None
ylabel = 'Model level'
yflip = True
# Extra processing for specific variables
vmax = None
if vname == 'cfadLidarsr355': vmax = 0.03
if vname == 'cfadLidarsr532': vmax = 0.03
if vname == 'cfadLidarsr532gr': vmax = 0.03
if vname == 'cfadDbze94': vmax = 0.05
if vname == 'iwpmodis': vmax = 2.0
if vname == 'lwpmodis': vmax = 1.0
if vname == 'tauisccp': vmax = 100.0
if vname == 'tautmodis': vmax = 100.0
if vname == 'tauwmodis': vmax = 100.0
d = {'xticks':xticks,
'yticks':yticks,
'xticks_labels':xticks_labels,
'yticks_labels':yticks_labels,
'xlabel':xlabel,
'ylabel':ylabel,
'vmax':vmax}
# Flip y axis?
if yflip: z = np.flip(z, axis=0)
return x, y, z, d
def midpoints_to_edges(x):
"""
Calculate edge points. Midpoints must increase monotonically.
Arguments:
x: vector with mid points. Dimension N.
Return:
y: numpy vector with edges. Dimension N+1.
"""
y = np.append(np.append(2 * x[0] - x[1], x), 2 * x[-1] - x[-2])
return 0.5 * (y[1:] + y[:-1])
def plot_pcolormesh(x, y, v, d, fig_name, title=None, coastlines=False):
"""
Plot pcolormesh and write the output to a png file.
Arguments:
x: x axis values.
y: y axis values.
v: data array. Dimensions [Nx,Ny,Np]
d: dictionary with plot configuration.
fig_name: output file name.
Keywords:
title: plot title.
coastlines: plot coast lines.
"""
fig = plt.figure(figsize=(10,5))
cmap = plt.get_cmap('YlOrRd', 20)
cmap.set_bad('grey', 1)
if coastlines:
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=0))
ax.coastlines()
h = plt.pcolormesh(x, y, v, cmap=cmap, vmax=d['vmax'])
if d['xticks_labels']:
plt.xticks(d['xticks'],d['xticks_labels'])
else:
plt.xticks(d['xticks'])
if d['yticks_labels']:
plt.yticks(d['yticks'],d['yticks_labels'])
else:
plt.yticks(d['yticks'])
plt.xlabel(d['xlabel'])
plt.ylabel(d['ylabel'])
plt.colorbar(h,orientation='vertical')
if title is not None: plt.title(title)
plt.savefig(fig_name, dpi=200)
plt.close()
def read_dimensions(fname):
"""
Read useful dimensions from COSP output file.
Arguments:
fname: path to NetCDF file.
Return:
d: dictionary with the following dimensions:
'cloudsat_DBZE_BINS', 'hgt16', 'REICE_MODIS',
'RELIQ_MODIS', 'levStat', 'SR_BINS', 'lev'
"""
dim_names = ['cloudsat_DBZE_BINS', 'hgt16', 'REICE_MODIS', 'RELIQ_MODIS',
'levStat', 'SR_BINS', 'lev']
d = {}
f_id = netCDF4.Dataset(fname, 'r')
for dim in dim_names:
d[dim] = f_id.variables[dim][:]
f_id.close()
return d
def read_var_to_masked_array(fname, vname, fill_value, Nlat_lon = None):
"""
Reads a variable from a NetCDF file, and produces a masked array.
Arguments:
fname: path to NetCDF file.
vname: variable name.
fill_value: missing data value.
Keywords:
Nlat_lon: tuple (Nrows, Ncols). If defined, variable is
reshaped to a lat-lon grid.
Return:
x: variable data array.
lon: longitude array.
lat: latitude array
units: units attribute.
long_name: long name attribute.
"""
f_id = netCDF4.Dataset(fname, 'r')
x = np.ma.masked_equal(f_id.variables[vname][:], fill_value)
lon = np.ma.masked_equal(f_id.variables['longitude'][:], fill_value)
lat = np.ma.masked_equal(f_id.variables['latitude'][:], fill_value)
units = f_id.variables[vname].getncattr('units')
long_name = f_id.variables[vname].getncattr('long_name')
f_id.close()
if Nlat_lon is not None:
x = np.reshape(x, x.shape[:-1]+Nlat_lon)
lon = np.reshape(lon, lon.shape[:-1] + Nlat_lon)
lat = np.reshape(lat, lat.shape[:-1] + Nlat_lon)
return x, lon, lat, units, long_name
def produce_cosp_summary_plots(fname, variables, output_dir, Nlat_lon = None):
"""
Wrapper function that iterates over a list of COSP variables and produces
a PNG figure for each of them.
Arguments:
fname: COSP output filename.
variables: list of variable names.
output_dir: output directory.
Keywords:
Nlat_lon: tuple with (lat,lon) dimensions model's grid.
"""
fill_value = -1.0e30
dimensions = read_dimensions(fname)
for vname, vd in variables.items():
new_shape = None
if vd['reshape']: new_shape = Nlat_lon
vx, longitude, latitude, units, long_name = read_var_to_masked_array(fname, vname, fill_value, Nlat_lon = new_shape)
x, y, z, pkw = collapse_dimensions_for_plotting(longitude, latitude, vname, vx, vd, dimensions)
title = long_name + ' (' + units + ')'
fig_name = os.path.join(output_dir, ".".join([os.path.basename(fname), vname, 'png']))
coastlines = False
if vd['plot_type'] == 'map': coastlines = True
plot_pcolormesh(x, y, z, pkw, fig_name, title=title, coastlines=coastlines)
def variable2D_metadata(var_list, fname):
"""
Return dictionary with metadata for each variable.
Arguments:
var_list: list of variable names.
fname: COSP output filename.
Return:
d: dictionary of dictionaries with relevant metadata.
"""
map_dims = (('loc',),('PARASOL_NREFL','loc'))
hist2D_dims = (('pres7', 'tau7', 'loc'),
('levStat', 'cloudsat_DBZE_BINS', 'loc'),
('hgt16', 'tau7', 'loc'),
('REICE_MODIS', 'tau7', 'loc'),
('RELIQ_MODIS', 'tau7', 'loc'),
('levStat', 'SR_BINS', 'loc'))
zcs_dims = (('levStat','loc'), ('lev','loc'))
f_id = netCDF4.Dataset(fname, 'r')
vmeta = {}
| |
use bsmBI
tail = 'taaattCGTCTCA'
return bsmBI, tail, 2
if not bseFlag:
# use bsmBI
tail = 'taaattGAGGAGattcccta'
return bseRI, tail, 1
return 0, 0, 0
#given a parent plasmid and a desired product plasmid, design the eipcr primers
#use difflib to figure out where the differences are
#if there is a convenient restriction site in or near the modification, use that
# otherwise, check if there exists bseRI or bsaI sites, and design primers using those
# print/return warning if can't do this via eipcr (insert span too long)
def DesignEIPCR(product, insert, diffTuple, template):
# use 60 bp to right of mutation as domain for annealing region design
(fwdStart, fwdEnd) = (diffTuple[1], diffTuple[1]+60)
enz, tail, halfSiteSize = chooseReachover(template)
if enz == 0:
return 0, 0
# accounting for the wrap around case
if fwdEnd > len(product.sequence):
fwdEnd = fwdEnd % len(product.sequence)
fwdAnneal = getAnnealingRegion(product.sequence[fwdStart:] + product.sequence[:fwdEnd], 1)
else:
fwdAnneal = getAnnealingRegion(product.sequence[fwdStart:fwdEnd], 1)
# same with the 60 bp to the left of the mutation
(revStart, revEnd) = (diffTuple[0]-60, diffTuple[0])
if revStart < 0:
revAnneal = getAnnealingRegion(product.sequence[revStart:] + product.sequence[:revEnd], 0)
else:
revAnneal = getAnnealingRegion(product.sequence[revStart:revEnd], 0)
# use BsaI 'taaGGTCTCx1234' to do reachover digest and ligation
# wrap around case
if not diffTuple[1] > diffTuple[0]:
half = ((diffTuple[1] + len(product.sequence) - diffTuple[0]) / 2) + diffTuple[0]
else:
half = ((diffTuple[1] - diffTuple[0]) / 2) + diffTuple[0]
# the 4 bp in the overhang must not contain any N's --> otherwise, ligation won't work
overhang = product.sequence[half - halfSiteSize : half + halfSiteSize]
while 'N' in overhang.upper():
half = half + 1
overhang = product.sequence[half - halfSiteSize : half + halfSiteSize]
# Accounting for the == 0 case, which would otherwise send the mutagenic region to ''
if diffTuple[1] == 0:
fwdPrimer = DNA('primer','fwd EIPCR primer for '+product.name, tail + product.sequence[half - halfSiteSize :] + fwdAnneal)
else:
# Originally: product.sequence[half - 2 : diffTuple[1] + 1]
fwdPrimer = DNA('primer','fwd EIPCR primer for '+product.name, tail + product.sequence[half - halfSiteSize : diffTuple[1]] + fwdAnneal)
# print 'AFTER TAIL', product.sequence[half - halfSiteSize : diffTuple[1] + 1]
if half + halfSiteSize == 0:
revPrimer = DNA('primer','rev EIPCR primer for '+product.name, tail + reverseComplement(product.sequence[ diffTuple[0] :]) + revAnneal)
else:
revPrimer = DNA('primer','rev EIPCR primer for '+product.name, tail + reverseComplement(product.sequence[ diffTuple[0] : half + halfSiteSize]) + revAnneal)
# print 'REV AFTER TAIL', reverseComplement(product.sequence[ diffTuple[0] : half + halfSiteSize])
return (fwdPrimer, revPrimer), enz
# TODO: Implement this, along with restriction site checking?
def DesignWobble(parent, product):
return 0
def Distinguish2DNABands(a, b):
#case of 2
#for a standard 1-2% agarose gel,
#we can distinguish a and b if
#do the following in wolframalpha: LogLogPlot[|a - b| > (0.208*a+42), {a, 0, 9000}, {b, 0, 9000}]
return ( abs(a.length - b.length) > (0.208*a.length+42)) & (min(a.length, b.length) > 250 )
#only returns True if can distinguish between all of the DNA bands
def DistinguishDNABands(list_of_dnas):
ret_val = True
for i in range(len(list_of_dnas)-1):
ret_val = ret_val & Distinguish2DNABands(list_of_dnas[i], list_of_dnas[i+1])
return ret_val
def FindDistinguishingEnzyme(list_of_dnas):
#find the REase that can distinguish between the input DNAs
#DistinguishDNABands(a, b) returns true if we can
# tell apart bands a, b on a gel and a and b are both > 300bp, < 7kb
#Let n be the number of DNAs in the list. Let E be the enzyme under question
# Then we construct a n-dimensional matrix
# where the dimensions have max value defined by the number of fragments generated by E
# E can be used to distinguish between the DNAs if there is a complete row or column
# that is distinguishable (all True by DistinguishDNABands)
#ASSUMPTION, for now, only consider n=3
#iterate over all enzymes (enzyme list should be prioritized by availability and "goodness")
#execute find good enz
#iterate over all combinations of 2 enzymes
#execute find good enz
##find good enz
#for each enzyme/combo in the list
#calculate fragments for each input DNA
#skip if any DNA has # fragments > 6
#n-length list, each character represents the DNA fragment currently under investigation
#iterate to fill in the hypermatrix values
#find if the hypermatrix has a column/row that has all True
#returns top 5 list of enzymes/combos that work
return 0
def FindDistEnz():
return FindDistinguishingEnzyme(list_of_dnas)
# Description: SetFlags() returns overhang information about a DNA() digest object
def SetFlags(frag):
(TL,TR,BL,BR) = (0,0,0,0)
if frag.topLeftOverhang.sequence != '':
TL = 1
if frag.topRightOverhang.sequence != '':
TR = 1
if frag.bottomLeftOverhang.sequence != '':
BL = 1
if frag.bottomRightOverhang.sequence != '':
BR = 1
return (TL,TR,BL,BR)
def ligatePostProcessing(ligated, childrenTuple, message):
ligated.setChildren(childrenTuple)
for child in childrenTuple:
child.addParent(ligated)
ligated.setTimeStep(0.5)
ligated.addMaterials(['DNA Ligase','DNA Ligase Buffer','ddH20'])
ligated.instructions = message
return ligated
def isComplementary(seq1, seq2):
if seq1 == '' or seq2 == '':
return False
elif seq1 == Complement(seq2):
return True
return False
def isReverseComplementary(seq1, seq2):
if seq1 == '' or seq2 == '':
return False
elif seq1 == reverseComplement(seq2):
return True
return False
# Description: Ligate() function accepts a list of DNA() digest objects, and outputs list of DNA
def Ligate(inputDNAs):
products = []
# self ligation
for fragment in inputDNAs:
if not isinstance(fragment, DNA):
print '\n*Ligate Error*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
continue
(TL,TR,BL,BR) = SetFlags(fragment)
if fragment.DNAclass == 'plasmid':
print '\n*Ligate Warning*: for ligation reaction, invalid input molecule removed -- ligation input DNA objects must be of class \'digest\' or be PNK treated linear molecules.\n'
elif TL+TR+BL+BR == 1:
pass
elif TL+TR+BL+BR == 0:
# blunt end self ligation case --> need to identify that both sides were digested (i.e. both ecoRV blunt ends)
# and then return circular product of same sequence.
pass
elif fragment.topLeftOverhang.sequence != '':
if isComplementary(fragment.topLeftOverhang.sequence.lower(), fragment.bottomRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.topLeftOverhang.sequence+fragment.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif fragment.bottomLeftOverhang.sequence != '':
if isComplementary(fragment.bottomLeftOverhang.sequence.lower(), fragment.topRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.sequence+fragment.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
if len(products) > 0 or len(inputDNAs) == 1:
return products
i = 0
while i < len(inputDNAs):
fragOne = inputDNAs[i]
if not isinstance(fragOne, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
i += 1
continue
elif fragOne.DNAclass == 'plasmid':
i += 1
continue
j = i + 1
while j < len(inputDNAs):
fragTwo = inputDNAs[j]
if not isinstance(fragOne, DNA) or not isinstance(fragTwo, DNA):
j += 1
continue
elif fragTwo.DNAclass == 'plasmid':
j += 1
continue
(LTL,LTR,LBL,LBR) = SetFlags(fragOne)
(RTL,RTR,RBL,RBR) = SetFlags(fragTwo)
# first3 is the number of 3' overhangs for the left fragment, and so on for the other three classifiers
(first3, first5, second3, second5) = (LTR + LBL, LBR + LTL, RTR + RBL, RBR + RTL)
# blunt end ligation:
firstFlag = first3 + first5
secondFlag = second3 + second5
if fragOne.pnkTreated and fragTwo.pnkTreated and firstFlag <= 1 and secondFlag <= 1:
if not firstFlag and secondFlag or firstFlag and not secondFlag:
pass
elif not firstFlag and not secondFlag:
ligated = DNA('plasmid', fragOne.name+', '+fragTwo.name+' ligation product', fragOne.sequence + fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif firstFlag and secondFlag:
if first3 and second3:
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragTwo.topRightOverhang.sequence+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence)+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragOne.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with | |
from flopter.core.ivanalyser import IVAnalyser
import numpy as np
import pathlib as pth
import matplotlib.pyplot as plt
import flopter.magnum.adcdata as md
import flopter.core.ivdata as iv
import pandas as pd
import scipy.signal as sig
import xarray as xr
# import flopter.databases.magnum as mag
import flopter.magnum.readfastadc as adc
# from flopter.core.magopter import Magopter
# from codac.datastore import client
# import Ice
from flopter.core import filtering as filt, constants as c, fitters as f
import glob
import os
class Magoptoffline(IVAnalyser):
# Default values
_FOLDER_STRUCTURE = '/Data/Magnum/'
_TAR_VOLTAGE_CHANNEL = 0
_SRC_VOLTAGE_CHANNEL = 2
_VOLTAGE_CHANNEL = 5
_PROBE_CHANNEL_3 = 6
_PROBE_CHANNEL_4 = 7
_COAX_CONVERSION = {
3: 0,
4: 1
}
_ADC_TIMER_OFFSET = 1.0
_ARCING_THRESHOLD = 15
_ACCEPTED_FREQS = np.array([10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, 1.0e4, 2.0e4])
_SHUNT_RESISTANCE = 1
_CABLING_RESISTANCE = (0.0, 0.0)
_FIT_FILE_STRING = 'fit{}_{}.csv'
_FIT_FILE_GLOBSTR = '*fit*.csv'
def __init__(self, directory, filename, ts_filename=None, coaxes=2, combine_sweeps_fl=True, shunt_resistor=None,
cabling_resistance=None):
super().__init__()
# Check for leading/trailing forward slashes?
self.directory = directory
self.file = filename
self.ts_file = ts_filename
self.full_path = str(pth.Path.home() / self._FOLDER_STRUCTURE / directory / filename)
self.coaxes = coaxes
self.combine_sweeps_fl = combine_sweeps_fl
self.shunt_resistance = self._SHUNT_RESISTANCE
self.cabling_resistance = self._CABLING_RESISTANCE
if shunt_resistor is not None:
self.shunt_resistance = shunt_resistor
if cabling_resistance is not None:
self.cabling_resistance = cabling_resistance
self.m_data = md.MagnumAdcData(self.full_path, filename)
self.adc_duration = max(self.m_data.time)
self.raw_time = None
self.raw_voltage = None
# self.raw_current = [[] for i in range(self.coaxes)]
self.raw_current = []
self.time = None
# self.voltage = [[] for i in range(self.coaxes)]
# self.current = [[] for i in range(self.coaxes)]
self.voltage = []
self.current = []
self.peaks = None
self.max_voltage = []
self.arcs = []
self.iv_arrs = [[] for i in range(self.coaxes)]
self.fit_arrs = None
self.trim_beg = 0.0
self.trim_end = 1.0
self.timestamp = int(adc.get_magnumdb_timestamp(filename))
self.ts_timestamp = None
self.ts_temp = None
self.ts_temp_d = None
self.ts_dens = None
self.ts_dens_d = None
self.ts_coords = None
print('Running an offline magopter object, operating in offline mode.')
self.offline = True
self.beam_down_timestamp = None
self.magnum_db = None
self.magnum_data = None
def prepare(self, down_sampling_rate=5, plot_fl=False, filter_arcs_fl=False, roi_b_plasma=False, crit_freq=640,
crit_ampl=1.1e-3, homogenise_fl=True):
"""
Preparation consists of downsampling (if necessary) and applying the
correct calibration to the raw voltages to convert properly into
current and voltage
Optionally also homogenises, which chooses the region of interest and
sections the time trace into individual sweeps, populating the member
variable 'iv_arrs' with an IVData object for each sweep before combining
them all into a separate numpy array for each coax
"""
# Downsample by factor given
arr_size = len(self.m_data.data[self.m_data.channels[0]])
downsample = np.arange(0, arr_size, down_sampling_rate, dtype=np.int64)
for ch, data in self.m_data.data.items():
self.m_data.data[ch] = data[downsample]
self.m_data.time = self.m_data.time[downsample] + self._ADC_TIMER_OFFSET
# self.m_data.data[self._VOLTAGE_CHANNEL] = self.m_data.data[self._VOLTAGE_CHANNEL] * 100.
start = 0
end = len(self.m_data.time)
# Account for offset in ADC channels at lower sensitivity
if self.shunt_resistance < 1.1:
adc_voltage_offset = 0.116111
adc_voltage_multiplier = 100
adc_current_offset = [0.064491972847753478, 0.0496745709740604]
else:
adc_voltage_offset = 0.0
adc_voltage_multiplier = 10
adc_current_offset = [-0.000925, 0.001166]
# Read in raw values from adc file - these are the time and the voltages measured on each channel of the ADC
# These must be offset and scaled to the appropriate values
self.raw_time = np.array(self.m_data.time[start:end])
self.raw_voltage = ((np.array(self.m_data.data[self._VOLTAGE_CHANNEL][start:end]) - adc_voltage_offset)
* adc_voltage_multiplier)
for i, probe_index in enumerate([self._PROBE_CHANNEL_3, self._PROBE_CHANNEL_4]):
self.raw_current.append(np.array(self.m_data.data[probe_index][start:end]) - adc_current_offset[i])
# Convert the adc voltages into the measured values
for i in range(self.coaxes):
# Current is ohmicly calculated from the voltage across a shunt resistor
self.current.append((self.raw_current[i]) / self.shunt_resistance)
# Separate volages are applied to each probe depending on the current they draw
self.voltage.append(self.raw_voltage - self.current[i] - (self.cabling_resistance[i] * self.current[i]))
self.filter(crit_ampl=crit_ampl, crit_freq=crit_freq, plot_fl=plot_fl)
if homogenise_fl:
self.homogenise(filter_arcs_fl=filter_arcs_fl, plot_fl=plot_fl)
def homogenise(self, frequency=None, filter_arcs_fl=False, plot_fl=True):
"""
Chooses the region of interest and sections the time trace into
individual sweeps, populating the member variable 'iv_arrs' with an
IVData object for each sweep and combining these into a numpy array for
each coax
:param frequency: (float) The frequency of sweeps used in the shot
If not specified it will be calculated from the
raw voltage trace using FFT (which may be slow).
:param filter_arcs_fl: (bool) Boolean flag, if true will attempt to
automatically filter out arcs by excluding
sweeps which have abnormally high max/min
voltages
:param plot_fl: (bool) Boolean flag, controls whether the method
plots various useful figures
"""
triangle = f.TriangleWaveFitter()
if frequency is None:
# Use fourier decomposition to get frequency if none given
frequency = triangle.get_frequency(self.raw_time, self.raw_voltage, accepted_freqs=self._ACCEPTED_FREQS)
# Take the first 5% of data to run the sweep partitioning algorithm on
slc_oi = slice(0, int(0.05 * len(self.raw_time)))
# Smooth the voltage to get a first read of the peaks on the triangle wave
smoothed_voltage = sig.savgol_filter(self.raw_voltage, 21, 2)
top = sig.argrelmax(smoothed_voltage[slc_oi], order=100)[0]
bottom = sig.argrelmin(smoothed_voltage[slc_oi], order=100)[0]
_peaks = self.raw_time[np.concatenate([top, bottom])]
_peaks.sort()
# Get distances between the peaks and filter based on the found frequency
_peak_distances = np.diff(_peaks)
threshold = (1 / (2 * frequency)) - 0.001
_peaks_ind = np.where(_peak_distances > threshold)[0]
# Starting from the first filtered peak, arrange a period-spaced array
peaks_refined = np.arange(_peaks[_peaks_ind[0]], self.raw_time[-1], 1 / (2 * frequency))
self.peaks = peaks_refined
if plot_fl:
plt.figure()
plt.plot(self.raw_time, self.raw_voltage)
plt.plot(self.raw_time, triangle.fit(self.raw_time, self.raw_voltage).fit_y)
for peak in self.peaks:
plt.axvline(x=peak, linestyle='dashed', linewidth=1, color='r')
if self.combine_sweeps_fl:
skip = 2
sweep_fitter = triangle
else:
skip = 1
sweep_fitter = f.StraightLineFitter()
for i in range(self.coaxes):
for j in range(len(self.peaks) - skip):
sweep_start = np.abs(self.raw_time - self.peaks[j]).argmin()
sweep_stop = np.abs(self.raw_time - self.peaks[j + skip]).argmin()
sweep_voltage = self.voltage[i][sweep_start:sweep_stop]
sweep_time = self.raw_time[sweep_start:sweep_stop]
if filter_arcs_fl:
# TODO: Fix this
sweep_fit = sweep_fitter.fit(sweep_time, sweep_voltage)
self.max_voltage.append((np.max(np.abs(sweep_voltage - sweep_fit.fit_y))))
if i == 0 and plot_fl:
sweep_fit.plot()
if np.max(np.abs(sweep_voltage - sweep_fit.fit_y)) > self._ARCING_THRESHOLD:
self.arcs.append(np.mean(sweep_time))
continue
# sweep_current = [current[sweep_start:sweep_stop] for current in self.current]
sweep_current = self.current[i][sweep_start:sweep_stop]
# Reverse alternate sweeps if not operating in combined sweeps mode, so
if not self.combine_sweeps_fl and sweep_voltage[0] > sweep_voltage[-1]:
# sweep_voltage = np.array(list(reversed(sweep_voltage)))
# sweep_time = np.array(list(reversed(sweep_time)))
# sweep_current = np.array(list(reversed(sweep_current)))
sweep_time = np.flip(sweep_time)
sweep_voltage = np.flip(sweep_voltage)
sweep_current = np.flip(sweep_current)
# Create IVData objects for each sweep (or sweep pair)
# TODO: What's the std_err_scaler doing here? Look through commits
self.iv_arrs[i].append(iv.IVData(sweep_voltage, sweep_current, sweep_time, std_err_scaler=0.95))
# for j in range(self.coaxes):
# self.iv_arrs[j].append(iv.IVData(sweep_voltage - sweep_current[j], sweep_current[j], sweep_time,
# std_err_scaler=0.95))
def trim(self, trim_beg=0.0, trim_end=1.0):
self.trim_beg = trim_beg
self.trim_end = trim_end
for iv_arr in self.iv_arrs:
for _iv_data in iv_arr:
_iv_data.trim_beg = trim_beg
_iv_data.trim_end = trim_end
def denormalise(self):
pass
def fit(self, fitter=None, initial_vals=None, bounds=None, load_fl=False, save_fl=False, print_fl=False):
if load_fl and save_fl:
print('WARNING: Unnecessary to save and load at the same time - loading will be prioritised if successful.')
# Looks for csv files containing previously fitted data if asked for by the load_fl boolean flag.
fit_files = [self._FIT_FILE_STRING.format(i, self.timestamp) for i in range(self.coaxes)]
if load_fl:
start_dir = os.getcwd()
os.chdir('{}{}{}'.format(pth.Path.home(), self._FOLDER_STRUCTURE, self.directory))
directory_fit_files = glob.glob(self._FIT_FILE_GLOBSTR)
if set(fit_files).issubset(directory_fit_files):
return [pd.read_csv(filepath_or_buffer=ff) for ff in fit_files]
else:
print('Could not find fit files - they should be in {} with names of the format {}'
.format(self.directory, fit_files[0]))
print('Continuing with regular fit...')
os.chdir(start_dir)
# Fitting routine
if not fitter:
fitter = f.FullIVFitter()
if all(iv_arr is None or len(iv_arr) == 0 for iv_arr in self.iv_arrs):
raise ValueError('No iv_data found to fit in self.iv_arrs')
# pool = mp.Pool()
fit_arrs = [[] for dummy in range(self.coaxes)]
fit_time = [[] for dummy in range(self.coaxes)]
for i in range(self.coaxes):
for iv_data in self.iv_arrs[i]:
try:
# Parallelised using multiprocessing.pool
# TODO: Not currently working according to system monitor.
fit_data = iv_data.multi_fit(plot_fl=False)
# result = pool.apply_async(iv_data.multi_fit)
# fit_data = result.get(timeout=10)
except RuntimeError:
if print_fl:
print('Error encountered in fit, skipping timestep {}'.format(np.mean(iv_data.time)))
continue
if all(param.error >= (param.value * 0.5) for param in fit_data.fit_params):
if print_fl:
print('All fit parameters exceeded good fit voltage_threshold, skipping time step {}'
.format(np.mean(iv_data[c.TIME])))
continue
fit_arrs[i].append(fit_data)
fit_time[i].append(np.mean(iv_data[c.TIME]))
fit_dfs = [pd.DataFrame([fit_data.to_dict() for fit_data in fit_arrs[i]], index=fit_time[i]) for i in range(self.coaxes)]
if save_fl:
for i in range(self.coaxes):
fit_dfs[i].to_csv(path_or_buf='{}{}{}{}'.format(pth.Path.home(), self._FOLDER_STRUCTURE,
self.directory, fit_files[i]))
return fit_dfs
# def plot_thomson(self, fig=None, show_fl=False):
# if self.ts_temp is not None:
# if not fig:
# fig = plt.figure()
#
# plt.subplot(211)
# for i in range(len(self.ts_dens[0])):
# plt.errorbar(self.ts_coords[mag.DATA][i], self.ts_dens[mag.DATA][i], fmt='x-',
# label='t = {:.1f}'.format(self.ts_dens[mag.TIMES][i]),
# yerr=self.ts_dens_d[mag.DATA][i])
# plt.xlabel('Radial position (mm)')
# plt.ylabel(r'Density (m$^{-3}$)')
# plt.legend()
| |
Liquid Densities of Normal Fluids."
AIChE Journal 24, no. 6 (November 1, 1978): 1127-31.
doi:10.1002/aic.690240630
'''
Tr = T/Tc
if Tr <= 0.98:
lnU0 = 1.39644 - 24.076*Tr + 102.615*Tr**2 - 255.719*Tr**3 \
+ 355.805*Tr**4 - 256.671*Tr**5 + 75.1088*Tr**6
lnU1 = 13.4412 - 135.7437*Tr + 533.380*Tr**2-1091.453*Tr**3 \
+ 1231.43*Tr**4 - 728.227*Tr**5 + 176.737*Tr**6
elif Tr > 1:
raise Exception('Critical phase, correlation does not apply')
else:
lnU0 = Bhirud_normal_lnU0_interp(Tr)
lnU1 = Bhirud_normal_lnU1_interp(Tr)
Unonpolar = exp(lnU0 + omega*lnU1)
Vm = Unonpolar*R*T/Pc
return Vm
def COSTALD(T, Tc, Vc, omega):
r'''Calculate saturation liquid density using the COSTALD CSP method.
A popular and accurate estimation method. If possible, fit parameters are
used; alternatively critical properties work well.
The density of a liquid is given by:
.. math::
V_s=V^*V^{(0)}[1-\omega_{SRK}V^{(\delta)}]
V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3}
- 0.81446(1-T_r)+0.190454(1-T_r)^{4/3}
V^{(\delta)}=\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3}
{T_r-1.00001}
Units are that of critical or fit constant volume.
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Vc : float
Critical volume of fluid [m^3/mol].
This parameter is alternatively a fit parameter
omega : float
(ideally SRK) Acentric factor for fluid, [-]
This parameter is alternatively a fit parameter.
Returns
-------
Vs : float
Saturation liquid volume
Notes
-----
196 constants are fit to this function in [1]_.
Range: 0.25 < Tr < 0.95, often said to be to 1.0
This function has been checked with the API handbook example problem.
Examples
--------
Propane, from an example in the API Handbook
>>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097)
530.3009967969841
References
----------
.. [1] Hankinson, <NAME>., and <NAME>. "A New Correlation for
Saturated Densities of Liquids and Their Mixtures." AIChE Journal
25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412
'''
Tr = T/Tc
V_delta = (-0.296123 + 0.386914*Tr - 0.0427258*Tr**2
- 0.0480645*Tr**3)/(Tr - 1.00001)
V_0 = 1 - 1.52816*(1-Tr)**(1/3.) + 1.43907*(1-Tr)**(2/3.) \
- 0.81446*(1-Tr) + 0.190454*(1-Tr)**(4/3.)
return Vc*V_0*(1-omega*V_delta)
def Campbell_Thodos(T, Tb, Tc, Pc, M, dipole=None, hydroxyl=False):
r'''Calculate saturation liquid density using the Campbell-Thodos [1]_
CSP method.
An old and uncommon estimation method.
.. math::
V_s = \frac{RT_c}{P_c}{Z_{RA}}^{[1+(1-T_r)^{2/7}]}
Z_{RA} = \alpha + \beta(1-T_r)
\alpha = 0.3883-0.0179s
s = T_{br} \frac{\ln P_c}{(1-T_{br})}
\beta = 0.00318s-0.0211+0.625\Lambda^{1.35}
\Lambda = \frac{P_c^{1/3}} { M^{1/2} T_c^{5/6}}
For polar compounds:
.. math::
\theta = P_c \mu^2/T_c^2
\alpha = 0.3883 - 0.0179s - 130540\theta^{2.41}
\beta = 0.00318s - 0.0211 + 0.625\Lambda^{1.35} + 9.74\times
10^6 \theta^{3.38}
Polar Combounds with hydroxyl groups (water, alcohols)
.. math::
\alpha = \left[0.690T_{br} -0.3342 + \frac{5.79\times 10^{-10}}
{T_{br}^{32.75}}\right] P_c^{0.145}
\beta = 0.00318s - 0.0211 + 0.625 \Lambda^{1.35} + 5.90\Theta^{0.835}
Parameters
----------
T : float
Temperature of fluid [K]
Tb : float
Boiling temperature of the fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
M : float
Molecular weight of the fluid [g/mol]
dipole : float, optional
Dipole moment of the fluid [debye]
hydroxyl : bool, optional
Swith to use the hydroxyl variant for polar fluids
Returns
-------
Vs : float
Saturation liquid volume
Notes
-----
If a dipole is provided, the polar chemical method is used.
The paper is an excellent read.
Pc is internally converted to atm.
Examples
--------
Ammonia, from [1]_.
>>> Campbell_Thodos(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, M=17.03, dipole=1.47)
7.347363635885525e-05
References
----------
.. [1] Campbell, <NAME>., and <NAME>. "Prediction of Saturated
Liquid Densities and Critical Volumes for Polar and Nonpolar
Substances." Journal of Chemical & Engineering Data 30, no. 1
(January 1, 1985): 102-11. doi:10.1021/je00039a032.
'''
Tr = T/Tc
Tbr = Tb/Tc
Pc = Pc/101325.
s = Tbr * log(Pc)/(1-Tbr)
Lambda = Pc**(1/3.)/(M**0.5*Tc**(5/6.))
alpha = 0.3883 - 0.0179*s
beta = 0.00318*s - 0.0211 + 0.625*Lambda**(1.35)
if dipole:
theta = Pc*dipole**2/Tc**2
alpha -= 130540 * theta**2.41
beta += 9.74E6 * theta**3.38
if hydroxyl:
beta = 0.00318*s - 0.0211 + 0.625*Lambda**(1.35) + 5.90*theta**0.835
alpha = (0.69*Tbr - 0.3342 + 5.79E-10/Tbr**32.75)*Pc**0.145
Zra = alpha + beta*(1-Tr)
Vs = R*Tc/(Pc*101325)*Zra**(1+(1-Tr)**(2/7.))
return Vs
def SNM0(T, Tc, Vc, omega, delta_SRK=None):
r'''Calculates saturated liquid density using the Mchaweh, Moshfeghian
model [1]_. Designed for simple calculations.
.. math::
V_s = V_c/(1+1.169\tau^{1/3}+1.818\tau^{2/3}-2.658\tau+2.161\tau^{4/3}
\tau = 1-\frac{(T/T_c)}{\alpha_{SRK}}
\alpha_{SRK} = [1 + m(1-\sqrt{T/T_C}]^2
m = 0.480+1.574\omega-0.176\omega^2
If the fit parameter `delta_SRK` is provided, the following is used:
.. math::
V_s = V_C/(1+1.169\tau^{1/3}+1.818\tau^{2/3}-2.658\tau+2.161\tau^{4/3})
/\left[1+\delta_{SRK}(\alpha_{SRK}-1)^{1/3}\right]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Vc : float
Critical volume of fluid [m^3/mol]
omega : float
Acentric factor for fluid, [-]
delta_SRK : float, optional
Fitting parameter [-]
Returns
-------
Vs : float
Saturation liquid volume, [m^3/mol]
Notes
-----
73 fit parameters have been gathered from the article.
Examples
--------
Argon, without the fit parameter and with it. Tabulated result in Perry's
is 3.4613e-05. The fit increases the error on this occasion.
>>> SNM0(121, 150.8, 7.49e-05, -0.004)
3.4402256402733416e-05
>>> SNM0(121, 150.8, 7.49e-05, -0.004, -0.03259620)
3.493288100008123e-05
References
----------
.. [1] <NAME>., <NAME>, <NAME>, and <NAME>.
"A Simplified Method for Calculating Saturated Liquid Densities."
Fluid Phase Equilibria 224, no. 2 (October 1, 2004): 157-67.
doi:10.1016/j.fluid.2004.06.054
'''
Tr = T/Tc
m = 0.480 + 1.574*omega - 0.176*omega*omega
alpha_SRK = (1. + m*(1. - Tr**0.5))**2
tau = 1. - Tr/alpha_SRK
rho0 = 1. + 1.169*tau**(1/3.) + 1.818*tau**(2/3.) - 2.658*tau + 2.161*tau**(4/3.)
V0 = 1./rho0
if not delta_SRK:
return Vc*V0
else:
return Vc*V0/(1. + delta_SRK*(alpha_SRK - 1.)**(1/3.))
def CRC_inorganic(T, rho0, k, Tm):
r'''Calculates liquid density of a molten element or salt at temperature
above the melting point. Some coefficients are given nearly up to the
boiling point.
The mass density of the inorganic liquid is given by:
.. math::
\rho = \rho_{0} - k(T-T_m)
Parameters
----------
T : float
Temperature of the liquid, [K]
rho0 : float
Mass density of the liquid at Tm, [kg/m^3]
k : float
Linear temperature dependence of the mass density, [kg/m^3/K]
Tm : float
The normal melting point, used in the correlation [K]
Returns
-------
rho : float
Mass density of molten metal or salt, [kg/m^3]
Notes
-----
[1]_ has units of g/mL. While the individual densities could have been
converted to molar units, the temperature coefficient could only be
converted by refitting to calculated data. To maintain compatibility with
the form of the equations, this was not performed.
This linear form is useful only in small temperature ranges.
Coefficients for one compound could be used to predict the temperature
dependence of density of a similar compound.
Examples
--------
>>> CRC_inorganic(300, 2370.0, 2.687, 239.08)
2206.30796
References
----------
.. [1] <NAME>., <NAME>, and <NAME>. CRC Handbook of
Chemistry and Physics, 95E. [Boca Raton, FL]: CRC press, 2014.
'''
return rho0 - k*(T-Tm)
COOLPROP = 'COOLPROP'
PERRYDIPPR = 'PERRYDIPPR'
VDI_PPDS = 'VDI_PPDS'
MMSNM0 = 'MMSNM0'
MMSNM0FIT = 'MMSNM0FIT'
VDI_TABULAR = 'VDI_TABULAR'
HTCOSTALD = 'HTCOSTALD'
HTCOSTALDFIT = 'HTCOSTALDFIT'
COSTALD_COMPRESSED = 'COSTALD_COMPRESSED'
RACKETT = 'RACKETT'
RACKETTFIT = 'RACKETTFIT'
YEN_WOODS_SAT = 'YEN_WOODS_SAT'
YAMADA_GUNN = 'YAMADA_GUNN'
BHIRUD_NORMAL = 'BHIRUD_NORMAL'
TOWNSEND_HALES = 'TOWNSEND_HALES'
CAMPBELL_THODOS = 'CAMPBELL_THODOS'
EOS = 'EOS'
CRC_INORG_L = 'CRC_INORG_L'
CRC_INORG_L_CONST = 'CRC_INORG_L_CONST'
volume_liquid_methods = [PERRYDIPPR, VDI_PPDS, COOLPROP, MMSNM0FIT, VDI_TABULAR,
HTCOSTALDFIT, RACKETTFIT, CRC_INORG_L,
CRC_INORG_L_CONST, MMSNM0, HTCOSTALD,
YEN_WOODS_SAT, RACKETT, YAMADA_GUNN,
BHIRUD_NORMAL, TOWNSEND_HALES, CAMPBELL_THODOS]
'''Holds all low-pressure methods available for the VolumeLiquid class, for use
in iterating over them.'''
volume_liquid_methods_P = [COOLPROP, COSTALD_COMPRESSED, EOS]
'''Holds all high-pressure methods available for the VolumeLiquid class, for
use in iterating over them.'''
class VolumeLiquid(TPDependentProperty):
r'''Class for dealing with liquid molar volume as a function of
temperature and pressure.
For low-pressure (at 1 atm while under the vapor pressure; along the
saturation line otherwise) liquids, there are six coefficient-based methods
from five data sources, one source of tabular information, one source of
constant values, eight corresponding-states estimators, and the external
library CoolProp.
For high-pressure liquids (also, <1 atm liquids), there is one
corresponding-states estimator, and the external library CoolProp.
Parameters
----------
CASRN : str, optional
The CAS number of the chemical
MW : float, optional
Molecular weight, [g/mol]
Tb : float, optional
Boiling point, [K]
Tc : float, optional
Critical temperature, [K]
Pc : float, optional
Critical pressure, [Pa]
Vc : float, optional
Critical volume, [m^3/mol]
Zc : float, optional
Critical compressibility
omega : float, optional
Acentric factor, [-]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tree structure in `treelib`.
The :class:`Tree` object defines the tree-like structure based on :class:`Node` objects.
A new tree can be created from scratch without any parameter or a shallow/deep copy of another tree.
When deep=True, a deepcopy operation is performed on feeding tree parameter and more memory
is required to create the tree.
"""
import uuid
from copy import deepcopy
from itertools import chain
from functools import partial
from .exceptions import *
from .node import Node
class Tree:
"""
Tree objects are made of Node(s) stored in nodes dictionary.
"""
#: ROOT, DEPTH, WIDTH, ZIGZAG constants :
(DEPTH, WIDTH, ZIGZAG) = list(range(3))
node_class = Node
def __init__(self, tree=None, deep: bool = False, node_class=None, tree_id=None):
"""
Initiate a new tree or copy another tree with a shallow or deep copy.
"""
# Initialize self.tree_id
if tree_id is None:
tree_id = str(uuid.uuid1())
self.tree_id = tree_id
if node_class is not None:
if not issubclass(node_class, Node):
raise TypeError('node_class should be type of Node or sublcass of Node !')
self.node_class = node_class
#: Dict of nodes in a tree: {node ID (nid): node_instance}.
self.nodes = {}
#: Get or set the node ID (nid) of the root. This attribute can be accessed and modified
#: with ``.`` and ``=`` operator respectively.
self.root = None
if tree is not None:
self.root = tree.root
for nid, node in tree.nodes.items():
new_node = deepcopy(node) if deep else node
self.nodes[nid] = new_node
if tree.tree_id != self.tree_id:
new_node.clone_pointers(tree.tree_id, self.tree_id)
# Render characters
self._dt = {
'ascii': ('|', '|-- ', '+-- '),
'ascii-ex': ('\u2502', '\u251c\u2500\u2500 ', '\u2514\u2500\u2500 '),
'ascii-exr': ('\u2502', '\u251c\u2500\u2500 ', '\u2570\u2500\u2500 '),
'ascii-em': ('\u2551', '\u2560\u2550\u2550 ', '\u255a\u2550\u2550 '),
'ascii-emv': ('\u2551', '\u255f\u2500\u2500 ', '\u2559\u2500\u2500 '),
'ascii-emh': ('\u2502', '\u255e\u2550\u2550 ', '\u2558\u2550\u2550 '),
}
# HELPER FUNCTIONS -------------------------------------------------------------------------------------------------
@staticmethod
def _create_sort_fun(key, reverse):
if key is None:
if reverse:
key_fun = reversed
else:
key_fun = partial(lambda x: x) # Do not sort at all!
else:
key_fun = partial(sorted, key=key, reverse=reverse)
return key_fun
def _get_nid(self, node):
"""
Get the node ID (nid) for the given Node instance or node ID (the inverse of ``get_node``, used internally).
"""
if isinstance(node, self.node_class):
nid = node.nid
else:
nid = node
if nid not in self.nodes: # TODO extra check on node instance equality?
raise NodeIDAbsentError(f'Node ({nid}) is not in the tree!')
return nid
def _get_node(self, node):
"""
Get the Node instance for the given Node instance or node ID (similar to ``get_node``, used internally).
"""
if isinstance(node, self.node_class):
curr_node = self.nodes.get(node.nid) # Node is given as parameter and is not found in the tree
if curr_node != node:
raise NodeIDAbsentError(f'Node ({node}) is not in the tree!')
else:
curr_node = self.nodes.get(node) # Node ID (nid) is given as parameter
if curr_node is None:
raise NodeIDAbsentError(f'Node ({node}) is not in the tree!')
return curr_node
# SIMPLE READER FUNCTIONS ------------------------------------------------------------------------------------------
def size(self, level: int = None):
"""
Get the number of nodes in the whole tree if ``level`` is not given.
Otherwise, the total number of nodes at specific level is returned.
0 is returned if too high level is specified and the tree is not deep enough.
:param level: The level number in the tree. It must be greater or equal to 0, None to return len(tree).
"""
if level is None:
return len(self.nodes) # Same as __len__()
elif level == 0:
return 1 # On the root level it is trivially only one node
elif level > 0:
return sum(1 for node in self.nodes.values() if self.depth(node.nid) == level)
else:
raise InvalidLevelNumber(f'Level cannot be negative ({level})!')
def __getitem__(self, nid):
"""
Return a Node instance for a node ID (nid) if the tree contains it else raises NodeIDAbsentError.
"""
try:
return self.nodes[nid]
except KeyError:
raise NodeIDAbsentError(f'Node ({nid}) is not in the tree!')
def __len__(self):
"""
Return the number of nodes (node IDs (nid)) in the tree.
"""
return len(self.nodes)
def __contains__(self, node):
if isinstance(node, self.node_class):
nid = node.nid
return self.nodes.get(nid) == node # Only True if Node instances are equal, node ID (nid) is not enough!
else:
return node in self.nodes # Check if node ID (nid) is in the tree
def is_ancestor(self, ancestor, child) -> bool:
"""
Check if the ``ancestor`` the preceding nodes of ``child``.
:param ancestor: The Node instance or node ID (nid).
:param child: The Node instance or node ID (nid).
:return: True or False
"""
if self.root is not None:
ancestor_node = self._get_node(ancestor)
ancestor_nid = ancestor_node.nid
child_node = self._get_node(child)
parent_nid = child_node.predecessor(self.tree_id)
while parent_nid is not None: # If parent is None we are at the root node
parent_node = self.nodes[parent_nid]
if parent_nid == ancestor_nid and parent_node == ancestor_node:
return True
else:
parent_nid = parent_node.predecessor(self.tree_id) # The parent of the parent
return False
def depth(self, node=None, filter_fun=lambda x: True) -> int:
"""
Get the level for the given Node instance or node ID (nid) in the tree.
If node is None get the maximum depth of this tree.
:param node: The Node instance or node ID (nid).
:param filter_fun: a function with one parameter executed on a :class:`Node` object.
When this parameter is specified, the traversing will will NOT visit those nodes
(and their children) which does not pass filtering (evaluates false).
:return int: An integer (level) starting with 0 at the root. In other words, the root lives at level 0.
:throw NodeIDAbsentError:
"""
if node is None: # Get the maximum level of this tree
level = max((sum(1 for _ in self.busearch(leaf.nid, filter_fun)) - 1 for leaf in self.leaves()), default=0)
else: # Get level of the given node
level = sum(1 for _ in self.busearch(node, filter_fun)) - 1
return level
# Node returing READER FUNCTIONS -----------------------------------------------------------------------------------
def get_node(self, nid):
"""
Get the the Node instance with node ID ``nid``.
An alternative way is using ``__getitem__`` ('[]') operation on the tree.
But ``get_node()`` will return None if ``nid`` is absent, whereas '[]' will raise ``KeyError``.
"""
return self.nodes.get(nid)
def get_nodes(self, filter_fun=None, lookup_nodes: bool = True):
"""
Returns all Node instances in an iterator if filter function is not specified or None.
Else traverses the tree top down and filters subtrees by the given function.
:param filter_fun: a function with one parameter executed on a :class:`Node` object.
When this parameter is specified, the traversing will will NOT visit those nodes
(and their children) which does not pass filtering (evaluates false).
:param lookup_nodes: return Node instances (default) or node IDs (nids).
:return: return an iterator of Node instances (default) or node IDs (nids).
"""
if filter_fun is None:
if lookup_nodes:
return self.nodes.values()
else:
return (curr_node.nid for curr_node in self.nodes.values())
else:
return self.expand_tree(self.root, filter_fun=filter_fun, lookup_nodes=lookup_nodes)
def parent(self, node, level: int = -1, lookup_nodes: bool = True):
"""
For a given Node instance or node ID (nid), get parent Node instance at a given level.
Cornercases are evaluated in this order:
- If level equals 0 the root Node instance is returned.
- If the given Node instance or node ID (nid) equals root node None is returned.
- If no level is provided or level equals -1, the parent Node instance is returned.
NodeIDAbsentError exception is raised if the ``node`` does not exist in the tree.
"""
nid = self._get_nid(node) # TODO more rigorous check is needed!
if lookup_nodes:
lookup_nodes_fun = partial(lambda x: self.nodes[x])
else:
lookup_nodes_fun = partial(lambda x: x)
if level == 0: # Root level of any element -> Root Node instance or node ID (nid)
return lookup_nodes_fun(self.root)
elif nid == self.root: # Root node for every non-root level -> None
return None
tree_id = self.tree_id
ancestor = self.nodes[nid].predecessor(tree_id) # Direct parent nid (None for root node is alread handled)
if level == -1: # Direct parent is required
return lookup_nodes_fun(ancestor) # Parent Node instance (root node where parent is None already handled)
elif level >= self.depth(nid): # Root node is already handled, so depth(nid) cannot be <= 0
raise InvalidLevelNumber(f'The given node\'s | |
<filename>ddc_packages/hddump/hddump/hddumpMain.py
"""
Demonstration handle dump for CMIP/ESGF files ..
USAGE
=====
-h: print this message;
-v: print version;
-t: run a test
-f <file name>: examine file, print path to replacement if this file is obsolete, print path to sibling files (or replacements).
-id <tracking id>: examine handle record of tracking id.
-V: verbose
--debug: debug
--DEBUG: debug with extended output
"""
## see https://www.handle.net/proxy_servlet.html for info on restfull API
import collections, os, re
##import cfdm
try:
import netCDF4
NETCDF_SUPPORTED = True
except:
NETCDF_SUPPORTED = False
import xml
import http
from xml.dom import minidom
import hddump.packageConfig as packageConfig
import urllib
from urllib import request
from hddump.testdata import *
class Phandle(object):
def __init__(self, hdlDict, k='values'):
"""Obsolete class to parse handle metadat ... replaced by Open class"""
self.h = hdlDict
self.d = {}
try:
for r in hdlDict[k]:
self.d[r['type']] = r['data']
except:
print ( hdlDict[k] )
raise
class Remote(object):
htmpl = 'http://hdl.handle.net/api/handles/%s'
dh = dummyHandles()
httpcodes = {x.value:x.name for x in http.HTTPStatus}
def __init__(self,hdl,url=None):
"""Class to retrieve a handle .. optionally to retrieve from test data.
Still needs some error handling based on the HTTP response code."""
if hdl[:5] == 'xxxxx':
self.msg = self.dh.hh[hdl]
else:
if url == None:
thisid = hdl.replace('hdl:999999', '10876.test' )
if thisid[:4] == 'hdl:':
thisid = thisid[4:]
url = self.htmpl % thisid
self.fetch(url)
def fetch(self,url):
"""Retrieve the handle data, using urllib ir requests library; handle metadata is stored in self.msg"""
try:
fh = request.urlopen( url )
self.httpCode = fh.getcode()
except urllib.error.HTTPError as e:
print ( e.headers )
print( '%s: %s' % (e.code,self.httpcodes.get(e.code,'????')) )
self.httpCode = e.code
raise
msg = eval( fh.read() )
assert type( msg ) == type( {} ), 'Response of wrong type'
for k in ['responseCode', 'handle']:
assert k in msg, 'Required key %s not found: %s' % (k, str( msg.keys() ) )
self.msg = msg
class Open(object):
"""Create a handle object defined by a handle ID.
Initial object simply holds the id, to retrieve the object, execute the get() method.
If the argument is a ";" separated list, the tolist() method should be executed to convert to
a list of handle objects before executing the get() method on each element of the list.
This approach is perhaps a little unusual for an "Open" class ... but works well with given
handle record structure.
"""
cache = {}
htmpl = 'http://hdl.handle.net/api/handles/%s'
dh = dummyHandles()
def __init__(self,id,debug=False):
self.REC_id = id
self.REC_got = False
self.debug = debug
self.rec = dict()
def __repr__(self):
return self.REC_id
def tolist(self):
if self.REC_id.find( ';' ) == -1:
return [self, ]
else:
this = []
for id in self.REC_id.split(';'):
this.append( Open( id.strip() ) )
return this
def toDict(self):
if not self.REC_got:
self.get()
return self.rec
def dump(self):
print (self.toDict() )
def get(self, extract=True):
if self.REC_got:
return
#
# using the class object to store the cache of retrieved handles.
# This enables some caching ... NOT TESTED
#
if self.REC_id not in self.__class__.cache:
self.remote = Remote( self.REC_id )
self.__class__.cache[self.REC_id] = self.remote.msg
if extract:
self._extract( self.__class__.cache[self.REC_id] )
def _extract( self, msg ):
"""Extract alues from a handle message dictionary, and insert into self.rec"""
if self.debug:
print( msg.keys() )
print( msg['handle'] )
for r in msg['values']:
if str(r['type']) in ['IS_PART_OF','HAS_PARTS','replaces', 'replacedBy', 'isReplacedBy','parent','REPLACED_BY']:
self.rec[r['type']] = Open( r['data']['value'] ).tolist()
else:
self.rec[r['type']] = r['data']['value']
if self.rec['AGGREGATION_LEVEL'] == 'DATASET':
self.obsolete = 'REPLACED_BY' in self.rec
def addLatest(self):
"""Retrieve handle records for replacements until a current dataset is found."""
if not self.obsolete:
return
self.rec['REPLACED_BY'].get()
self.replacements=[self.rec['REPLACED_BY'],]
while self.replacements[-1].obsolete:
self.replacements.append( self.replacements[-1].rec['REPLACED_BY'].get() )
self.latest = self.replacements[-1]
def addSiblings(self):
if self.rec['AGGREGATION_LEVEL'] != 'FILE':
print( 'No known siblings .....' )
return
if 'IS_PART_OF' not in self.rec:
print( 'No parent' )
return
for p in self.IS_PART_OF:
p.get()
self.p.obsolete = all( [p.obsolete for p in self.p.rec['IS_PART_OF']] )
self.siblings = []
for p in self.rec['IS_PART_OF']:
for c in p.rec['HAS_PARTS']:
c.get()
self.siblings.append( c )
class Main(object):
"""Main: entry point, parsing comman line arguments.
USAGE
-----
m = Main( argList )
"""
knownargs0 = ['-h','-v','-t','-V','--debug', '--DEBUG']
knownargs1 = ['-f','-id']
re1 = re.compile( 'host="(.*?)"' )
re2 = re.compile( '<location(.*?)/>' )
def __init__(self, args):
self.re_dict = dict()
for k in ['host','href']:
self.re_dict[k] = re.compile( '%s="(.*?)"' % k )
self.htmpl = 'http://hdl.handle.net/api/handles/%s'
self.version = packageConfig.version
self.args = args
self.parseArgs()
if self.d.get( '-v', False ):
print ( self.version )
return
if self.d.get( '-h', False ):
print (self.version)
print ( __doc__ )
return
if self.d.get( '-t', False ):
self.runTest()
return
self.debugPlus = '--DEBUG' in self.d
self.debug = '--debug' in self.d or self.debugPlus
self.verbose = ( '-V' in self.d ) or self.debug
if '-f' in self.d:
fn = self.d['-f']
self.dumpF(fn)
if '-id' in self.d:
id = self.d['-id']
self.dumpF('',id=id)
def dumpF(self,fn, id=None):
"""Dump information about a file"""
if id == None:
assert os.path.isfile( fn ), 'File %s not found' % fn
nchead = NcHead( fn )
thisid = nchead['tracking_id'].replace( 'hdl:999999', '10876.test' )
else:
thisid = id.replace('hdl:999999', '10876.test' )
if self.debug:
print (thisid)
self.res = {'id':thisid, 'name':fn}
self.p = Open( thisid )
self.p.get()
if self.debug:
print( 'KEYS: ', self.p.rec.keys() )
if self.debugPlus:
for k in sorted( self.p.rec.keys() ):
print( '%s: %s' % (k, self.p.rec[k] ) )
self._globals( self.p )
thisType = 'none'
if 'IS_PART_OF' in self.p.rec:
thisType = 'file'
for p in self.p.rec['IS_PART_OF']:
p.get()
self.p.obsolete = all( [p.obsolete for p in self.p.rec['IS_PART_OF']] )
self.res['parents'] = [(p.REC_id, p.rec['DRS_ID'], p.rec['VERSION_NUMBER']) for p in self.p.rec['IS_PART_OF']]
self.res['obsolete'] = self.p.obsolete
self.res['RC'] = {False:'OK', True:'OBSOLETE'}[self.res['obsolete']]
self.res['name'] = self.p.rec['FILE_NAME']
if not self.p.obsolete:
current = [p for p in self.p.rec['IS_PART_OF'] if not p.obsolete]
if not len(current) == 1:
print ('ERROR: dataset has more than one current version ...' )
self._extractDataset( current[0] )
#
# Extract replica information. Results will be stored to self.res['replicas']
#
self._extractReplicas( current[0] )
print( 'File: %(name)s [%(id)s] %(RC)s' % self.res )
elif 'HAS_PARTS' in self.p.rec:
thisType = 'ds'
self._extractDataset( self.p )
#
# Extract replica information. Results will be stored to self.res['replicas']
#
self._extractReplicas( self.p )
self.res['obsolete'] = self.p.obsolete
self.res['RC'] = {False:'OK', True:'OBSOLETE'}[self.res['obsolete']]
self.res['name'] = self.p.rec['DRS_ID']
print( 'Dataset: %(name)s [%(id)s] %(RC)s' % self.res )
elif 'IS_PART_OF' not in self.p.rec:
print ( 'dumpF - 01' )
print ( self.p.rec.keys() )
if self.debug:
print( 'No parent' )
self.res['parents'] = None
return
if self.verbose:
if thisType == 'file':
print( 'Master host: %(master_host)s' % self.res )
print( '\nDatasets:' )
for p in self.res['parents'] :
print( 'ID: %s, NAME: %s, VERSION: %s' % p )
print( '\nSiblings:' )
for p in sorted(self.res['siblings'], key=lambda x: x[0]):
if p[1] != self.res['id']:
print( 'NAME: %s, ID: %s' % p )
if len( self.res['replicas'] ) > 0:
print( '\nReplicas:' )
for p in self.res['replicas'] :
print( 'Host: %s' % p )
else:
print( '\nNo replicas.' )
elif thisType == 'ds':
print( 'Master host: %(master_host)s' % self.res )
print( '\nFiles:' )
#for p in self.res['siblings'] :
for p in sorted(self.res['siblings'], key=lambda x: x[0]):
print( 'NAME: %s, ID: %s' % p )
if len( self.res['replicas'] ) > 0:
print( '\nReplicas:' )
for p in self.res['replicas'] :
print( 'Host: %s' % p )
else:
print( '\nNo replicas.' )
def _globals(self,current):
if NETCDF_SUPPORTED:
self._extractFileURL(current)
dods = self.res['href'].replace('fileServer','dodsC')
nc = netCDF4.Dataset( dods )
for a in sorted( nc.ncattrs() ):
print (' %s:: %s' % (a, nc.getncattr( a )) )
else:
print ( "Netcdf not supported ... check installation of netCDF4 module")
def _extractFileURL( self, current ):
"""Extract the file URL from a file handle object"""
if "URL_ORIGINAL_DATA" in current.rec:
this = current.rec['URL_ORIGINAL_DATA']
locs = self.re2.findall( this )
href = self.re_dict['href'].findall( locs[0] )[0]
self.res['href'] = href
else:
print ('NO URL ORiGINAL DATA')
def _extractReplicas( self, current ):
"""Extract replica information from a DATASET handle object"""
if 'REPLICA_NODE' in current.rec:
rep = current.rec['REPLICA_NODE']
locs = self.re2.findall( rep )
reps = [self.re_dict['host'].findall(l)[0] for l in locs]
self.res['replicas'] = reps
else:
self.res['replicas'] = []
def _extractDataset( self, current ):
for c in current.rec['HAS_PARTS']:
c.get()
self.res['siblings'] = [(c.rec['FILE_NAME'],c.REC_id) for c in current.rec['HAS_PARTS']]
master = current.rec['HOSTING_NODE']
this = self.re1.findall( master )
assert len(this) == 1, 'Unexpected matches in search for master host'
self.res['master_host'] = this[0]
def runTest(self):
"""This test does not work any more ... the 10876.test handles appear to have been | |
<filename>django_tables/tests/test_models.py
"""Test ModelTable specific functionality.
Sets up a temporary Django project using a memory SQLite database.
"""
from unittest.mock import Mock
from nose.tools import assert_raises, assert_equal
from django.conf import settings
from django.core.paginator import Paginator, QuerySetPaginator
import django_tables as tables
from django_tables.tests.testapp.models import City, Country
def setup_module(module):
# create a couple of objects
berlin = City.objects.create(name="Berlin", population=30)
amsterdam = City.objects.create(name="Amsterdam", population=6)
Country.objects.create(
name="Austria", tld="au", population=8, system="republic")
Country.objects.create(
name="Germany", tld="de", population=81, capital=berlin)
Country.objects.create(
name="France", tld="fr", population=64, system="republic")
Country.objects.create(
name="Netherlands",
tld="nl",
population=16,
system="monarchy",
capital=amsterdam,
)
class TestDeclaration:
"""
Test declaration, declared columns and default model field columns.
"""
def test_autogen_basic(self):
class CountryTable(tables.ModelTable):
class Meta:
model = Country # noqa
assert len(CountryTable.base_columns) == 8
assert 'name' in CountryTable.base_columns
assert not hasattr(CountryTable, 'name')
# Override one model column, add another custom one, exclude one
class CountryTable(tables.ModelTable):
capital = tables.TextColumn(verbose_name='Name of capital')
projected = tables.Column(verbose_name="Projected Population")
class Meta:
model = Country # noqa
exclude = ['tld']
assert len(CountryTable.base_columns) == 8
assert 'projected' in CountryTable.base_columns
assert 'capital' in CountryTable.base_columns
assert 'tld' not in CountryTable.base_columns
# Inheritance (with a different model) + field restrictions
class CityTable(CountryTable):
class Meta:
model = City # noqa
columns = ['id', 'name']
exclude = ['capital']
assert len(CityTable.base_columns) == 4
assert 'id' in CityTable.base_columns
assert 'name' in CityTable.base_columns
assert 'projected' in CityTable.base_columns # declared in parent
# not in Meta:columns
# assert not 'population' not in CityTable.base_columns
# in exclude, but only works on model fields (is that the right
# behaviour?)
assert 'capital' in CityTable.base_columns
# Define one column so that all automatically-generated columns are
# excluded
class CountryTable(tables.ModelTable):
capital = tables.TextColumn(verbose_name='Name of capital')
projected = tables.Column(verbose_name="Projected Population")
class Meta:
model = Country # noqa
columns = ['capital']
num_columns = len(CountryTable.base_columns)
assert num_columns == 2, "Actual: %s" % num_columns
assert 'projected' in CountryTable.base_columns
assert 'capital' in CountryTable.base_columns
assert 'tld' not in CountryTable.base_columns
def test_columns_custom_order(self):
"""Using the columns meta option, you can also modify the ordering.
"""
class CountryTable(tables.ModelTable):
foo = tables.Column()
class Meta:
model = Country # noqa
columns = ('system', 'population', 'foo', 'tld',)
assert [c.name for c in CountryTable().columns] == ['system', 'population', 'foo', 'tld'] # noqa
def test_columns_verbose_name(self):
"""Tests that the model field's verbose_name is used for the column
"""
class CountryTable(tables.ModelTable):
class Meta:
model = Country # noqa
columns = ('tld',)
assert [
c.column.verbose_name for c in CountryTable().columns
] == ['Domain Extension']
def _test_country_table(table):
for r in table.rows:
# "normal" fields exist
assert 'name' in r
# unknown fields are removed/not accessible
assert 'does-not-exist' not in r
# ...so are excluded fields
assert 'id' not in r
# [bug] access to data that might be available, but does not
# have a corresponding column is denied.
assert_raises(Exception, "r['id']")
# missing data is available with default values
assert 'null' in r
assert r['null'] == "foo" # note: different from prev. line!
# if everything else fails (no default), we get None back
assert r['null2'] is None
# all that still works when name overrides are used
assert 'tld' in r
assert 'domain' in r
assert len(r['domain']) == 2 # valid country tld
def test_basic():
"""
Some tests here are copied from ``test_basic.py`` but need to be
rerun with a ModelTable, as the implementation is different.
"""
class CountryTable(tables.ModelTable):
null = tables.Column(default="foo")
domain = tables.Column(model_rel="tld")
tld = tables.Column()
class Meta:
model = Country # noqa
exclude = ('id',)
countries = CountryTable()
_test_country_table(countries)
# repeat the avove tests with a table that is not associated with a
# model, and all columns being created manually.
class CountryTable(tables.ModelTable):
name = tables.Column()
population = tables.Column()
capital = tables.Column()
system = tables.Column()
null = tables.Column(default="foo")
null2 = tables.Column()
domain = tables.Column(model_rel="tld")
tld = tables.Column()
countries = CountryTable(Country) # noqa
_test_country_table(countries)
def test_with_filter():
class CountryTable(tables.ModelTable):
null = tables.Column(default="foo")
domain = tables.Column(model_rel="tld")
class Meta:
model = Country # noqa
exclude = ('id',)
countries = CountryTable(Country.objects.filter(name="France")) # noqa
assert len(countries.rows) == 1
row = countries.rows[0]
assert row['name'] == 'France'
_test_country_table(countries)
def test_with_empty_list():
class CountryTable(tables.ModelTable):
null = tables.Column(default="foo")
domain = tables.Column(model_rel="tld")
class Meta:
model = Country # noqa
exclude = ('id',)
# Should be able to pass in an empty list and call order_by on it
countries = CountryTable([], order_by='domain')
assert len(countries.rows) == 0
def test_with_no_results_query():
class CountryTable(tables.ModelTable):
null = tables.Column(default="foo")
domain = tables.Column(model_rel="tld")
class Meta:
model = Country # noqa
exclude = ('id',)
# Should be able to pass in an empty list and call order_by on it
countries = CountryTable(
Country.objects.filter(name='does not exist'), # noqa
order_by='domain',
)
assert len(countries.rows) == 0
def test_invalid_accessor():
"""Test that a column being backed by a non-existent model property
is handled correctly.
Regression-Test: There used to be a NameError here.
"""
class CountryTable(tables.ModelTable):
name = tables.Column(model_rel='something-i-made-up')
countries = CountryTable(Country) # noqa
assert_raises(ValueError, countries[0].__getitem__, 'name')
def test_sort():
class CountryTable(tables.ModelTable):
domain = tables.Column(model_rel="tld")
population = tables.Column()
system = tables.Column(default="republic")
custom1 = tables.Column()
custom2 = tables.Column(sortable=True)
class Meta:
model = Country # noqa
countries = CountryTable(Country.objects.all()) # noqa
def test_order(order, expected, table=countries):
table.order_by = order
actual = [r['id'] for r in table.rows]
assert actual == expected, "actual= %s" % repr(actual)
# test various orderings
test_order(('population',), [1, 4, 3, 2])
test_order(('-population',), [2, 3, 4, 1])
test_order(('name',), [1, 3, 2, 4])
# test sorting with a "rewritten" column name
countries.order_by = 'domain,tld' # "tld" would be invalid...
countries.order_by == ('domain',) # ...and is therefore removed
countries.order_by = ('-domain', 'tld') # "tld" would be invalid...
countries.order_by == ('-domain',) # ...and is therefore removed
test_order(('-domain',), [4, 3, 2, 1])
# test multiple order instructions; note: one row is missing a "system"
# value, but has a default set; however, that has no effect on sorting.
test_order(('system', '-population'), [2, 4, 3, 1])
# using a simple string (for convinience as well as querystring passing)
test_order('-population', [2, 3, 4, 1])
test_order('system,-population', [2, 4, 3, 1])
# test column with a default ``direction`` set to descending
class CityTable(tables.ModelTable):
name = tables.Column(direction='desc')
class Meta:
model = City # noqa
cities = CityTable(City.objects.all()) # noqa
test_order('name', [1, 2], table=cities) # Berlin to Amsterdam
test_order('-name', [2, 1], table=cities) # Amsterdam to Berlin
# test invalid order instructions...
countries.order_by = 'invalid_field,population'
assert countries.order_by == ('population',)
# ...in case of ModelTables, this primarily means that only
# model-based colunns are currently sortable at all.
countries.order_by = ('custom1', 'custom2')
assert countries.order_by == (), "Actual: %s" % repr(countries.order_by)
def test_default_sort():
class SortedCountryTable(tables.ModelTable):
class Meta:
model = Country # noqa
order_by = '-name'
countries = Country.objects.all() # noqa
# the order_by option is provided by TableOptions
assert_equal('-name', SortedCountryTable(countries)._meta.order_by)
# the default order can be inherited from the table
assert_equal(('-name',), SortedCountryTable(countries).order_by)
assert_equal(4, SortedCountryTable(countries).rows[0]['id'])
# and explicitly set (or reset) via __init__
assert_equal(
2,
SortedCountryTable(
countries,
order_by='system',
).rows[0]['id'],
)
assert_equal(1, SortedCountryTable(countries, order_by=None).rows[0]['id'])
def test_callable():
"""Some of the callable code is reimplemented for modeltables, so
test some specifics again.
"""
class CountryTable(tables.ModelTable):
null = tables.Column(default=lambda s: s['example_domain'])
example_domain = tables.Column()
class Meta:
model = Country # noqa
countries = CountryTable(Country) # noqa
# model method is called
assert [
row['example_domain'] for row in countries
] == [
'example.'+row['tld'] for row in countries
]
# column default method is called
assert [
row['example_domain'] for row in countries
] == [
row['null'] for row in countries
]
def test_relationships():
"""Test relationship spanning."""
class CountryTable(tables.ModelTable):
# add relationship spanning columns (using different approaches)
capital_name = tables.Column(model_rel='capital__name')
capital_name_link = tables.Column(model_rel='capital__name')
cap_pop = tables.Column(model_rel="capital__population")
invalid = tables.Column(model_rel="capital__invalid")
class Meta:
model = Country # noqa
def render_capital_name_link(self, country):
return '<a href="http://en.wikipedia.org/wiki/%s">%s</a>' % (
country.capital.name, country.capital.name)
countries = CountryTable(Country.objects.select_related('capital')) # noqa
# ordering and field access works
countries.order_by = 'capital_name'
assert [row['capital_name'] for row in countries.rows] == \
[None, None, 'Amsterdam', 'Berlin']
countries.order_by = 'cap_pop'
actual = [row['cap_pop'] for row in countries.rows]
assert actual == \
[None, None, 6, 30], "Actual: %s" % repr(actual)
# ordering by a column with an invalid relationship fails silently
countries.order_by = 'invalid'
assert countries.order_by == (), "Actual: %s" % repr(countries.order_by)
def test_pagination():
"""
test_pagination
Pretty much the same as static table pagination, but make sure we
provide the | |
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of functions for differentiable digital signal processing (DDSP)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Dict, Text, TypeVar
import gin
import numpy as np
from scipy import fftpack
import tensorflow.compat.v2 as tf
Number = TypeVar('Number', int, float, np.ndarray, tf.Tensor)
# Utility Functions ------------------------------------------------------------
def tf_float32(x):
"""Ensure array/tensor is a float32 tf.Tensor."""
if isinstance(x, tf.Tensor):
return tf.cast(x, dtype=tf.float32) # This is a no-op if x is float32.
else:
return tf.convert_to_tensor(x, tf.float32)
def make_iterable(x):
"""Ensure that x is an iterable."""
return x if isinstance(x, collections.Iterable) else [x]
def nested_lookup(nested_key: Text,
nested_dict: Dict[Text, Any],
delimiter: Text = '/') -> tf.Tensor:
"""Returns the value of a nested dict according to a parsed input string.
Args:
nested_key: String of the form "key/key/key...".
nested_dict: Nested dictionary.
delimiter: String that splits the nested keys.
Returns:
value: Value of the key from the nested dictionary.
"""
# Parse the input string.
keys = nested_key.split(delimiter)
# Return the nested value.
value = nested_dict
for key in keys:
value = value[key]
return value
def midi_to_hz(notes: Number) -> Number:
"""TF-compatible midi_to_hz function."""
notes = tf_float32(notes)
return 440.0 * (2.0**((notes - 69.0) / 12.0))
def hz_to_midi(frequencies: Number) -> Number:
"""TF-compatible hz_to_midi function."""
frequencies = tf_float32(frequencies)
log2 = lambda x: tf.math.log(x) / tf.math.log(2.0)
notes = 12.0 * (log2(frequencies) - log2(440.0)) + 69.0
# Map 0 Hz to MIDI 0 (Replace -inf with 0.)
cond = tf.equal(notes, -np.inf)
notes = tf.where(cond, 0.0, notes)
return notes
def unit_to_midi(unit: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map the unit interval [0, 1] to MIDI notes."""
unit = tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
return midi_min + (midi_max - midi_min) * unit
def midi_to_unit(midi: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map MIDI notes to the unit interval [0, 1]."""
unit = (midi - midi_min) / (midi_max - midi_min)
return tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
def unit_to_hz(unit: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map unit interval [0, 1] to [hz_min, hz_max], scaling logarithmically."""
midi = unit_to_midi(unit,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
return midi_to_hz(midi)
def hz_to_unit(hz: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map [hz_min, hz_max] to unit interval [0, 1], scaling logarithmically."""
midi = hz_to_midi(hz)
return midi_to_unit(midi,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
def resample(inputs: tf.Tensor,
n_timesteps: int,
method: Text = 'linear',
add_endpoint: bool = True) -> tf.Tensor:
"""Interpolates a tensor from n_frames to n_timesteps.
Args:
inputs: Framewise 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_frames],
[batch_size, n_frames], [batch_size, n_frames, channels], or
[batch_size, n_frames, n_freq, channels].
n_timesteps: Time resolution of the output signal.
method: Type of resampling, must be in ['linear', 'cubic', 'window']. Linear
and cubic ar typical bilinear, bicubic interpolation. Window uses
overlapping windows (only for upsampling) which is smoother for amplitude
envelopes.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Interpolated 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_timesteps],
[batch_size, n_timesteps], [batch_size, n_timesteps, channels], or
[batch_size, n_timesteps, n_freqs, channels].
Raises:
ValueError: If method is 'window' and input is 4-D.
ValueError: If method is not one of 'linear', 'cubic', or 'window'.
"""
inputs = tf_float32(inputs)
is_1d = len(inputs.shape) == 1
is_2d = len(inputs.shape) == 2
is_4d = len(inputs.shape) == 4
# Ensure inputs are at least 3d.
if is_1d:
inputs = inputs[tf.newaxis, :, tf.newaxis]
elif is_2d:
inputs = inputs[:, :, tf.newaxis]
def _image_resize(method):
"""Closure around tf.image.resize."""
# Image resize needs 4-D input. Add/remove extra axis if not 4-D.
outputs = inputs[:, :, tf.newaxis, :] if not is_4d else inputs
outputs = tf.compat.v1.image.resize(outputs,
[n_timesteps, outputs.shape[2]],
method=method,
align_corners=not add_endpoint)
return outputs[:, :, 0, :] if not is_4d else outputs
# Perform resampling.
if method == 'linear':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BILINEAR)
elif method == 'cubic':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BICUBIC)
elif method == 'window':
outputs = upsample_with_windows(inputs, n_timesteps, add_endpoint)
else:
raise ValueError('Method ({}) is invalid. Must be one of {}.'.format(
method, "['linear', 'cubic', 'window']"))
# Return outputs to the same dimensionality of the inputs.
if is_1d:
outputs = outputs[0, :, 0]
elif is_2d:
outputs = outputs[:, :, 0]
return outputs
def upsample_with_windows(inputs: tf.Tensor,
n_timesteps: int,
add_endpoint: bool = True) -> tf.Tensor:
"""Upsample a series of frames using using overlapping hann windows.
Good for amplitude envelopes.
Args:
inputs: Framewise 3-D tensor. Shape [batch_size, n_frames, n_channels].
n_timesteps: The time resolution of the output signal.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Upsampled 3-D tensor. Shape [batch_size, n_timesteps, n_channels].
Raises:
ValueError: If input does not have 3 dimensions.
ValueError: If attempting to use function for downsampling.
ValueError: If n_timesteps is not divisible by n_frames (if add_endpoint is
true) or n_frames - 1 (if add_endpoint is false).
"""
inputs = tf_float32(inputs)
if len(inputs.shape) != 3:
raise ValueError('Upsample_with_windows() only supports 3 dimensions, '
'not {}.'.format(inputs.shape))
# Mimic behavior of tf.image.resize.
# For forward (not endpointed), hold value for last interval.
if add_endpoint:
inputs = tf.concat([inputs, inputs[:, -1:, :]], axis=1)
n_frames = int(inputs.shape[1])
n_intervals = (n_frames - 1)
if n_frames >= n_timesteps:
raise ValueError('Upsample with windows cannot be used for downsampling'
'More input frames ({}) than output timesteps ({})'.format(
n_frames, n_timesteps))
if n_timesteps % n_intervals != 0.0:
minus_one = '' if add_endpoint else ' - 1'
raise ValueError(
'For upsampling, the target the number of timesteps must be divisible '
'by the number of input frames{}. (timesteps:{}, frames:{}, '
'add_endpoint={}).'.format(minus_one, n_timesteps, n_frames,
add_endpoint))
# Constant overlap-add, half overlapping windows.
hop_size = n_timesteps // n_intervals
window_length = 2 * hop_size
window = tf.signal.hann_window(window_length) # [window]
# Transpose for overlap_and_add.
x = tf.transpose(inputs, perm=[0, 2, 1]) # [batch_size, n_channels, n_frames]
# Broadcast multiply.
# Add dimension for windows [batch_size, n_channels, n_frames, window].
x = x[:, :, :, tf.newaxis]
window = window[tf.newaxis, tf.newaxis, tf.newaxis, :]
x_windowed = (x * window)
x = tf.signal.overlap_and_add(x_windowed, hop_size)
# Transpose back.
x = tf.transpose(x, perm=[0, 2, 1]) # [batch_size, n_timesteps, n_channels]
# Trim the rise and fall of the first and last window.
return x[:, hop_size:-hop_size, :]
def log_scale(x, min_x, max_x):
"""Scales a -1 to 1 value logarithmically between min and max."""
x = tf_float32(x)
x = (x + 1.0) / 2.0 # Scale [-1, 1] to [0, 1]
return tf.exp((1.0 - x) * tf.math.log(min_x) + x * tf.math.log(max_x))
@gin.register
def exp_sigmoid(x, exponent=10.0, max_value=2.0, threshold=1e-7):
"""Exponentiated Sigmoid pointwise nonlinearity.
Bounds input to [threshold, max_value] with slope given by exponent.
Args:
x: Input tensor.
exponent: In nonlinear regime (away from x=0), the output varies by this
factor for every change of x by 1.0.
max_value: Limiting value at x=inf.
threshold: Limiting value at x=-inf. Stablizes training when outputs are
pushed to 0.
Returns:
A tensor with pointwise nonlinearity applied.
"""
x = tf_float32(x)
return max_value * tf.nn.sigmoid(x)**tf.math.log(exponent) + threshold
@gin.register
def sym_exp_sigmoid(x, width=8.0):
"""Symmetrical version of exp_sigmoid centered at (0, 1e-7)."""
x = tf_float32(x)
return exp_sigmoid(width * (tf.abs(x)/2.0 - 1.0))
# Additive Synthesizer ---------------------------------------------------------
def remove_above_nyquist(frequency_envelopes: tf.Tensor,
amplitude_envelopes: tf.Tensor,
sample_rate: int = 16000) -> tf.Tensor:
"""Set amplitudes for oscillators above nyquist to 0.
Args:
frequency_envelopes: Sample-wise oscillator | |
<reponame>ArenaNetworks/dto-digitalmarketplace-api
from flask import json
import mock
from freezegun import freeze_time
from nose.tools import assert_equal, assert_not_equal, assert_in, assert_is_none
from app import db, encryption
from app.models import Address, User, Supplier, Application, Brief
import pendulum
from pendulum import create as datetime
from ..helpers import \
BaseApplicationTest, \
JSONTestMixin, \
JSONUpdateTestMixin, \
assert_api_compatible, \
assert_api_compatible_list
class BaseUserTest(BaseApplicationTest):
supplier = None
supplier_code = None
users = None
patcher_application = None
patcher_supplier = None
patcher_user = None
pub_application = None
pub_supplier = None
pub_user = None
def setup(self):
super(BaseUserTest, self).setup()
payload = self.load_example_listing("Supplier")
self.supplier = payload
self.supplier_code = payload['code']
self.users = []
self.patcher_application = mock.patch('app.tasks.publish_tasks.application')
self.patcher_supplier = mock.patch('app.tasks.publish_tasks.supplier')
self.patcher_user = mock.patch('app.tasks.publish_tasks.user')
self.pub_application = self.patcher_application.start()
self.pub_supplier = self.patcher_supplier.start()
self.pub_user = self.patcher_user.start()
def teardown(self):
super(BaseUserTest, self).teardown()
self.patcher_application.stop()
self.patcher_supplier.stop()
self.patcher_user.stop()
def _post_supplier(self):
response = self.client.post(
'/suppliers'.format(self.supplier_code),
data=json.dumps({'supplier': self.supplier}),
content_type='application/json')
assert response.status_code == 201
def _post_user(self, user):
response = self.client.post(
'/users',
data=json.dumps({'users': user}),
content_type='application/json')
assert response.status_code == 201
assert self.pub_user.delay.called is True
self.users.append(json.loads(response.get_data())["users"])
def _return_post_login(self, auth_users=None, status_code=200):
_auth_users = {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>'
}
if auth_users is not None and isinstance(auth_users, dict):
_auth_users.update(auth_users)
response = self.client.post(
'/users/auth',
data=json.dumps({'authUsers': _auth_users}),
content_type='application/json')
assert response.status_code == status_code
return response
def _post_application(self):
response = self.client.post(
'/applications',
data=json.dumps({
'update_details': {'updated_by': '<EMAIL>'},
'application': {"name": "my company"},
}),
content_type='application/json'
)
self.application_id = json.loads(response.get_data())['application']['id']
assert response.status_code == 201
assert self.pub_application.delay.called is True
class TestUsersAuth(BaseUserTest):
def create_user(self):
with self.app.app_context():
user = {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': 'joe bloggs'
}
self._post_user(user)
def valid_login(self):
return self._return_post_login()
def invalid_password(self):
return self._return_post_login(
auth_users={'password': '<PASSWORD>'},
status_code=403
)
def test_should_validate_credentials(self):
self.create_user()
with self.app.app_context():
response = self.valid_login()
data = json.loads(response.get_data())['users']
assert_equal(data['emailAddress'], '<EMAIL>')
def test_should_validate_mixedcase_credentials(self):
self.create_user()
with self.app.app_context():
response = self.client.post(
'/users/auth',
data=json.dumps({
'authUsers': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>'}}),
content_type='application/json')
assert_equal(response.status_code, 200)
data = json.loads(response.get_data())['users']
assert_equal(data['emailAddress'], '<EMAIL>')
def test_should_return_404_for_no_user(self):
with self.app.app_context():
response = self.client.post(
'/users/auth',
data=json.dumps({
'authUsers': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>'}}),
content_type='application/json')
assert_equal(response.status_code, 404)
data = json.loads(response.get_data())
assert_equal(data['authorization'], False)
def test_should_return_404_for_deleted_supplier_user(self):
with self.app.app_context():
db.session.add(
Supplier(code=1,
name=u"Supplier 1",
status="deleted",
addresses=[Address(address_line="{} Dummy Street",
suburb="Dummy",
state="ZZZ",
postal_code="0000",
country='Australia')])
)
db.session.commit()
user = {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'supplier',
'supplierCode': 1,
'name': '<NAME>'
}
self._post_user(user)
response = self.client.post(
'/users/auth',
data=json.dumps({
'authUsers': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>'}}),
content_type='application/json')
assert_equal(response.status_code, 404)
data = json.loads(response.get_data())
assert_equal(data['authorization'], False)
def test_should_return_403_for_bad_password(self):
self.create_user()
with self.app.app_context():
response = self.client.post(
'/users/auth',
data=json.dumps({
'authUsers': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>'}}),
content_type='application/json')
assert_equal(response.status_code, 403)
data = json.loads(response.get_data())
assert_equal(data['authorization'], False)
def test_logged_in_at_is_updated_on_successful_login(self):
self.create_user()
with self.app.app_context(), freeze_time('2015-06-06'):
self.valid_login()
user = User.get_by_email_address('<EMAIL>')
assert_equal(user.logged_in_at, datetime(2015, 6, 6))
def test_logged_in_at_is_not_updated_on_failed_login(self):
self.create_user()
with self.app.app_context(), freeze_time('2015-06-06'):
self.invalid_password()
user = User.get_by_email_address('<EMAIL>')
assert_equal(user.logged_in_at, None)
def test_failed_login_should_increment_failed_login_counter(self):
self.create_user()
with self.app.app_context():
self.invalid_password()
user = User.get_by_email_address('<EMAIL>')
assert_equal(user.failed_login_count, 1)
def test_successful_login_resets_failed_login_counter(self):
self.create_user()
with self.app.app_context():
self.invalid_password()
self.valid_login()
user = User.get_by_email_address('<EMAIL>')
assert_equal(user.failed_login_count, 0)
def test_user_is_locked_after_too_many_failed_login_attempts(self):
self.create_user()
self.app.config['DM_FAILED_LOGIN_LIMIT'] = 1
with self.app.app_context():
self.invalid_password()
user = User.get_by_email_address('<EMAIL>')
assert_equal(user.locked, True)
def test_all_login_attempts_fail_for_locked_users(self):
self.create_user()
self.app.config['DM_FAILED_LOGIN_LIMIT'] = 1
with self.app.app_context():
user = User.get_by_email_address('<EMAIL>')
user.failed_login_count = 1
db.session.add(user)
db.session.commit()
self._return_post_login(status_code=403)
class TestUsersPost(BaseUserTest, JSONTestMixin):
method = "post"
endpoint = "/users"
def test_can_post_a_buyer_user(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'phoneNumber': '01234 567890',
'password': '<PASSWORD>',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
assert_equal(data["phoneNumber"], "01234 567890")
def test_creating_buyer_user_with_bad_email_domain_fails(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert response.status_code == 400
assert json.loads(response.get_data())['error'] == 'invalid_buyer_domain'
def test_creating_buyer_user_with_good_email_domain_succeeds(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert response.status_code == 201
data = json.loads(response.get_data())['users']
assert data['active']
def test_creating_buyer_user_with_no_phone_number_succeeds(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'phoneNumber': '',
'password': '<PASSWORD>',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert response.status_code == 201
data = json.loads(response.get_data())['users']
assert data['active']
def test_creating_buyer_user_with_bad_phone_number_fails(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'phoneNumber': '1<PASSWORD>',
'password': '<PASSWORD>',
'role': 'buyer',
'name': 'joe bloggs'}}),
content_type='application/json')
assert response.status_code == 400
def test_creating_buyer_user_with_no_phone_stores_none(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'phoneNumber': '',
'password': '<PASSWORD>',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert response.status_code == 201
data = json.loads(response.get_data())['users']
assert_equal(data['phoneNumber'], None)
def test_can_post_an_admin_user(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': 'joe bloggs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
def test_can_post_an_admin_ccs_category_user(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin-ccs-category',
'name': 'joe bloggs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
def test_can_post_an_admin_ccs_sourcing_user(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin-ccs-sourcing',
'name': 'joe bloggs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
# The admin-ccs role is no longer in use
def test_can_not_post_an_admin_ccs_user(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin-ccs',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
error = json.loads(response.get_data())['error']
assert_in("'admin-ccs' is not one of", error)
def test_can_post_a_supplier_user(self):
with self.app.app_context():
db.session.add(
Supplier(code=1,
name=u"Supplier 1",
addresses=[Address(address_line="{} Dummy Street",
suburb="Dummy",
state="ZZZ",
postal_code="0000",
country='Australia')])
)
db.session.add(
Application(id=1, data={"name": "my company"}, status='saved', supplier_code=1)
)
db.session.commit()
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'supplierCode': 1,
'role': 'supplier',
'name': 'joe <PASSWORD>gs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
def test_can_post_an_applicant_user(self):
with self.app.app_context():
db.session.add(
Application(id=1, data={"name": "my company"}, status='saved')
)
db.session.commit()
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'application_id': 1,
'role': 'applicant',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
data = json.loads(response.get_data())["users"]
assert_equal(data["emailAddress"], "<EMAIL>")
def test_post_a_user_creates_audit_event(self):
with self.app.app_context():
db.session.add(
Supplier(code=1, name=u"Supplier 1",
addresses=[Address(address_line="{} Dummy Street",
suburb="Dummy",
state="ZZZ",
postal_code="0000",
country='Australia')])
)
db.session.commit()
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'supplierCode': 1,
'role': 'supplier',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 1)
assert_equal(data['auditEvents'][0]['type'], 'create_user')
assert_equal(data['auditEvents'][0]['data']['supplier_code'], 1)
def test_should_reject_a_supplier_user_with_invalid_supplier_code(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'supplierCode': 999,
'role': 'supplier',
'name': 'joe <PASSWORD>gs'}}),
content_type='application/json')
data = json.loads(response.get_data())["error"]
assert_equal(response.status_code, 400)
assert_equal(data, "Invalid supplier code or application id")
def test_should_reject_a_supplier_user_with_no_supplier_code(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'supplier',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_equal(data, "No supplier code provided for supplier user")
def test_should_reject_non_supplier_user_with_supplier_code(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'supplierCode': 1,
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_equal(data, "'supplier_code' is only valid for users with 'supplier' role, not 'admin'")
def test_should_reject_a_applicant_user_with_invalid_application_id(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'application_id': 999,
'role': 'applicant',
'name': '<NAME>'}}),
content_type='application/json')
data = json.loads(response.get_data())["error"]
assert_equal(response.status_code, 400)
assert_equal(data, "Invalid supplier code or application id")
def test_should_reject_a_applicant_with_no_application_id(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'applicant',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_equal(data, "'application id' is required for users with 'applicant' role")
def test_should_reject_non_applicant_with_application_id(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'application_id': 1,
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_equal(data, "'application_id' is only valid for users with 'applicant' or 'supplier' role, not 'admin'")
def test_should_reject_user_with_invalid_role(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'shopkeeper',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
def test_can_post_a_user_with_hashed_password(self):
with self.app.app_context():
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'hashpw': True,
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': 'joe bloggs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
user = User.query.filter(
User.email_address == '<EMAIL>') \
.first()
assert_not_equal(user.password, '<PASSWORD>')
def test_can_post_a_user_without_hashed_password(self):
with self.app.app_context():
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'hashpw': False,
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
user = User.query.filter(
User.email_address == '<EMAIL>') \
.first()
assert_equal(user.password, '<PASSWORD>')
def test_posting_same_email_twice_is_an_error(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': 'joe bloggs'}}),
content_type='application/json')
assert_equal(response.status_code, 201)
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'admin',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 409)
def test_return_400_for_invalid_user_json(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '',
'role': 'buyer',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_in("JSON was not a valid format", data)
def test_return_400_for_invalid_user_role(self):
response = self.client.post(
'/users',
data=json.dumps({
'users': {
'emailAddress': '<EMAIL>',
'password': '<PASSWORD>',
'role': 'invalid',
'name': '<NAME>'}}),
content_type='application/json')
assert_equal(response.status_code, 400)
data = json.loads(response.get_data())["error"]
assert_in("JSON was not a valid format", data)
class TestUsersUpdate(BaseUserTest, JSONUpdateTestMixin):
method = | |
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This file is automatically generated by mkgrokdump and should not
# be modified manually.
# List of known V8 instance types.
INSTANCE_TYPES = {
0: "INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
32: "STRING_TYPE",
33: "CONS_STRING_TYPE",
34: "EXTERNAL_STRING_TYPE",
35: "SLICED_STRING_TYPE",
37: "THIN_STRING_TYPE",
40: "ONE_BYTE_STRING_TYPE",
41: "CONS_ONE_BYTE_STRING_TYPE",
42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
43: "SLICED_ONE_BYTE_STRING_TYPE",
45: "THIN_ONE_BYTE_STRING_TYPE",
50: "UNCACHED_EXTERNAL_STRING_TYPE",
58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
64: "SYMBOL_TYPE",
65: "BIG_INT_BASE_TYPE",
66: "HEAP_NUMBER_TYPE",
67: "ODDBALL_TYPE",
68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
70: "FOREIGN_TYPE",
71: "WASM_FUNCTION_DATA_TYPE",
72: "WASM_CAPI_FUNCTION_DATA_TYPE",
73: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
74: "WASM_JS_FUNCTION_DATA_TYPE",
75: "WASM_TYPE_INFO_TYPE",
76: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
77: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
78: "CALLABLE_TASK_TYPE",
79: "CALLBACK_TASK_TYPE",
80: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
81: "LOAD_HANDLER_TYPE",
82: "STORE_HANDLER_TYPE",
83: "FUNCTION_TEMPLATE_INFO_TYPE",
84: "OBJECT_TEMPLATE_INFO_TYPE",
85: "ACCESS_CHECK_INFO_TYPE",
86: "ACCESSOR_INFO_TYPE",
87: "ACCESSOR_PAIR_TYPE",
88: "ALIASED_ARGUMENTS_ENTRY_TYPE",
89: "ALLOCATION_MEMENTO_TYPE",
90: "ALLOCATION_SITE_TYPE",
91: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
92: "ASM_WASM_DATA_TYPE",
93: "ASYNC_GENERATOR_REQUEST_TYPE",
94: "BASELINE_DATA_TYPE",
95: "BREAK_POINT_TYPE",
96: "BREAK_POINT_INFO_TYPE",
97: "CACHED_TEMPLATE_OBJECT_TYPE",
98: "CALL_HANDLER_INFO_TYPE",
99: "CLASS_POSITIONS_TYPE",
100: "DEBUG_INFO_TYPE",
101: "ENUM_CACHE_TYPE",
102: "FEEDBACK_CELL_TYPE",
103: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
104: "INTERCEPTOR_INFO_TYPE",
105: "INTERPRETER_DATA_TYPE",
106: "MODULE_REQUEST_TYPE",
107: "PROMISE_CAPABILITY_TYPE",
108: "PROMISE_REACTION_TYPE",
109: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
110: "PROTOTYPE_INFO_TYPE",
111: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
112: "SCRIPT_TYPE",
113: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
114: "STACK_FRAME_INFO_TYPE",
115: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
116: "TUPLE2_TYPE",
117: "WASM_EXCEPTION_TAG_TYPE",
118: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
119: "FIXED_ARRAY_TYPE",
120: "HASH_TABLE_TYPE",
121: "EPHEMERON_HASH_TABLE_TYPE",
122: "GLOBAL_DICTIONARY_TYPE",
123: "NAME_DICTIONARY_TYPE",
124: "NUMBER_DICTIONARY_TYPE",
125: "ORDERED_HASH_MAP_TYPE",
126: "ORDERED_HASH_SET_TYPE",
127: "ORDERED_NAME_DICTIONARY_TYPE",
128: "SIMPLE_NUMBER_DICTIONARY_TYPE",
129: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
130: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
131: "SCRIPT_CONTEXT_TABLE_TYPE",
132: "BYTE_ARRAY_TYPE",
133: "BYTECODE_ARRAY_TYPE",
134: "FIXED_DOUBLE_ARRAY_TYPE",
135: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
136: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
137: "AWAIT_CONTEXT_TYPE",
138: "BLOCK_CONTEXT_TYPE",
139: "CATCH_CONTEXT_TYPE",
140: "DEBUG_EVALUATE_CONTEXT_TYPE",
141: "EVAL_CONTEXT_TYPE",
142: "FUNCTION_CONTEXT_TYPE",
143: "MODULE_CONTEXT_TYPE",
144: "NATIVE_CONTEXT_TYPE",
145: "SCRIPT_CONTEXT_TYPE",
146: "WITH_CONTEXT_TYPE",
147: "EXPORTED_SUB_CLASS_BASE_TYPE",
148: "EXPORTED_SUB_CLASS_TYPE",
149: "EXPORTED_SUB_CLASS2_TYPE",
150: "SMALL_ORDERED_HASH_MAP_TYPE",
151: "SMALL_ORDERED_HASH_SET_TYPE",
152: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
153: "DESCRIPTOR_ARRAY_TYPE",
154: "STRONG_DESCRIPTOR_ARRAY_TYPE",
155: "SOURCE_TEXT_MODULE_TYPE",
156: "SYNTHETIC_MODULE_TYPE",
157: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
158: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
159: "WEAK_FIXED_ARRAY_TYPE",
160: "TRANSITION_ARRAY_TYPE",
161: "CELL_TYPE",
162: "CODE_TYPE",
163: "CODE_DATA_CONTAINER_TYPE",
164: "COVERAGE_INFO_TYPE",
165: "EMBEDDER_DATA_ARRAY_TYPE",
166: "FEEDBACK_METADATA_TYPE",
167: "FEEDBACK_VECTOR_TYPE",
168: "FILLER_TYPE",
169: "FREE_SPACE_TYPE",
170: "INTERNAL_CLASS_TYPE",
171: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
172: "MAP_TYPE",
173: "MEGA_DOM_HANDLER_TYPE",
174: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
175: "PREPARSE_DATA_TYPE",
176: "PROPERTY_ARRAY_TYPE",
177: "PROPERTY_CELL_TYPE",
178: "SCOPE_INFO_TYPE",
179: "SHARED_FUNCTION_INFO_TYPE",
180: "SMI_BOX_TYPE",
181: "SMI_PAIR_TYPE",
182: "SORT_STATE_TYPE",
183: "SWISS_NAME_DICTIONARY_TYPE",
184: "WEAK_ARRAY_LIST_TYPE",
185: "WEAK_CELL_TYPE",
186: "WASM_ARRAY_TYPE",
187: "WASM_STRUCT_TYPE",
188: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
189: "JS_GLOBAL_OBJECT_TYPE",
190: "JS_GLOBAL_PROXY_TYPE",
191: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
1043: "JS_ITERATOR_PROTOTYPE_TYPE",
1044: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
1045: "JS_OBJECT_PROTOTYPE_TYPE",
1046: "JS_PROMISE_PROTOTYPE_TYPE",
1047: "JS_REG_EXP_PROTOTYPE_TYPE",
1048: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
1049: "JS_SET_PROTOTYPE_TYPE",
1050: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
1051: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
1052: "JS_GENERATOR_OBJECT_TYPE",
1053: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
1054: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
1055: "JS_ARGUMENTS_OBJECT_TYPE",
1056: "JS_API_OBJECT_TYPE",
1058: "JS_BOUND_FUNCTION_TYPE",
1059: "JS_FUNCTION_TYPE",
1060: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1061: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1062: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1063: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1064: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1065: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1066: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1067: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1068: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1069: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1070: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
1071: "JS_ARRAY_CONSTRUCTOR_TYPE",
1072: "JS_PROMISE_CONSTRUCTOR_TYPE",
1073: "JS_REG_EXP_CONSTRUCTOR_TYPE",
1074: "JS_MAP_KEY_ITERATOR_TYPE",
1075: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
1076: "JS_MAP_VALUE_ITERATOR_TYPE",
1077: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
1078: "JS_SET_VALUE_ITERATOR_TYPE",
1079: "JS_DATA_VIEW_TYPE",
1080: "JS_TYPED_ARRAY_TYPE",
1081: "JS_MAP_TYPE",
1082: "JS_SET_TYPE",
1083: "JS_WEAK_MAP_TYPE",
1084: "JS_WEAK_SET_TYPE",
1085: "JS_ARRAY_TYPE",
1086: "JS_ARRAY_BUFFER_TYPE",
1087: "JS_ARRAY_ITERATOR_TYPE",
1088: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
1089: "JS_COLLATOR_TYPE",
1090: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
1091: "JS_DATE_TYPE",
1092: "JS_DATE_TIME_FORMAT_TYPE",
1093: "JS_DISPLAY_NAMES_TYPE",
1094: "JS_ERROR_TYPE",
1095: "JS_FINALIZATION_REGISTRY_TYPE",
1096: "JS_LIST_FORMAT_TYPE",
1097: "JS_LOCALE_TYPE",
1098: "JS_MESSAGE_OBJECT_TYPE",
1099: "JS_NUMBER_FORMAT_TYPE",
1100: "JS_PLURAL_RULES_TYPE",
1101: "JS_PROMISE_TYPE",
1102: "JS_REG_EXP_TYPE",
1103: "JS_REG_EXP_STRING_ITERATOR_TYPE",
1104: "JS_RELATIVE_TIME_FORMAT_TYPE",
1105: "JS_SEGMENT_ITERATOR_TYPE",
1106: "JS_SEGMENTER_TYPE",
1107: "JS_SEGMENTS_TYPE",
1108: "JS_STRING_ITERATOR_TYPE",
1109: "JS_V8_BREAK_ITERATOR_TYPE",
1110: "JS_WEAK_REF_TYPE",
1111: "WASM_GLOBAL_OBJECT_TYPE",
1112: "WASM_INSTANCE_OBJECT_TYPE",
1113: "WASM_MEMORY_OBJECT_TYPE",
1114: "WASM_MODULE_OBJECT_TYPE",
1115: "WASM_TABLE_OBJECT_TYPE",
1116: "WASM_TAG_OBJECT_TYPE",
1117: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
("read_only_space", 0x02119): (172, "MetaMap"),
("read_only_space", 0x02141): (67, "NullMap"),
("read_only_space", 0x02169): (154, "StrongDescriptorArrayMap"),
("read_only_space", 0x02191): (159, "WeakFixedArrayMap"),
("read_only_space", 0x021d1): (101, "EnumCacheMap"),
("read_only_space", 0x02205): (119, "FixedArrayMap"),
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x0229d): (169, "FreeSpaceMap"),
("read_only_space", 0x022c5): (168, "OnePointerFillerMap"),
("read_only_space", 0x022ed): (168, "TwoPointerFillerMap"),
("read_only_space", 0x02315): (67, "UninitializedMap"),
("read_only_space", 0x0238d): (67, "UndefinedMap"),
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
("read_only_space", 0x02405): (67, "TheHoleMap"),
("read_only_space", 0x02465): (67, "BooleanMap"),
("read_only_space", 0x02509): (132, "ByteArrayMap"),
("read_only_space", 0x02531): (119, "FixedCOWArrayMap"),
("read_only_space", 0x02559): (120, "HashTableMap"),
("read_only_space", 0x02581): (64, "SymbolMap"),
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
("read_only_space", 0x025d1): (178, "ScopeInfoMap"),
("read_only_space", 0x025f9): (179, "SharedFunctionInfoMap"),
("read_only_space", 0x02621): (162, "CodeMap"),
("read_only_space", 0x02649): (161, "CellMap"),
("read_only_space", 0x02671): (177, "GlobalPropertyCellMap"),
("read_only_space", 0x02699): (70, "ForeignMap"),
("read_only_space", 0x026c1): (160, "TransitionArrayMap"),
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
("read_only_space", 0x02711): (167, "FeedbackVectorMap"),
("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x027a9): (67, "ExceptionMap"),
("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
("read_only_space", 0x0292d): (131, "ScriptContextTableMap"),
("read_only_space", 0x02955): (129, "ClosureFeedbackCellArrayMap"),
("read_only_space", 0x0297d): (166, "FeedbackMetadataArrayMap"),
("read_only_space", 0x029a5): (119, "ArrayListMap"),
("read_only_space", 0x029cd): (65, "BigIntMap"),
("read_only_space", 0x029f5): (130, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x02a1d): (133, "BytecodeArrayMap"),
("read_only_space", 0x02a45): (163, "CodeDataContainerMap"),
("read_only_space", 0x02a6d): (164, "CoverageInfoMap"),
("read_only_space", 0x02a95): (134, "FixedDoubleArrayMap"),
("read_only_space", 0x02abd): (122, "GlobalDictionaryMap"),
("read_only_space", 0x02ae5): (102, "ManyClosuresCellMap"),
("read_only_space", 0x02b0d): (173, "MegaDomHandlerMap"),
("read_only_space", 0x02b35): (119, "ModuleInfoMap"),
("read_only_space", 0x02b5d): (123, "NameDictionaryMap"),
("read_only_space", 0x02b85): (102, "NoClosuresCellMap"),
("read_only_space", 0x02bad): (124, "NumberDictionaryMap"),
("read_only_space", 0x02bd5): (102, "OneClosureCellMap"),
("read_only_space", 0x02bfd): (125, "OrderedHashMapMap"),
("read_only_space", 0x02c25): (126, "OrderedHashSetMap"),
("read_only_space", 0x02c4d): (127, "OrderedNameDictionaryMap"),
("read_only_space", 0x02c75): (175, "PreparseDataMap"),
("read_only_space", 0x02c9d): (176, "PropertyArrayMap"),
("read_only_space", 0x02cc5): (98, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x02ced): (98, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02d15): (98, "NextCallSideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02d3d): (128, "SimpleNumberDictionaryMap"),
("read_only_space", 0x02d65): (150, "SmallOrderedHashMapMap"),
("read_only_space", 0x02d8d): (151, "SmallOrderedHashSetMap"),
("read_only_space", 0x02db5): (152, "SmallOrderedNameDictionaryMap"),
("read_only_space", 0x02ddd): (155, "SourceTextModuleMap"),
("read_only_space", 0x02e05): (183, "SwissNameDictionaryMap"),
("read_only_space", 0x02e2d): (156, "SyntheticModuleMap"),
("read_only_space", 0x02e55): (72, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02e7d): (73, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02ea5): (74, "WasmJSFunctionDataMap"),
("read_only_space", 0x02ecd): (75, "WasmTypeInfoMap"),
("read_only_space", 0x02ef5): (184, "WeakArrayListMap"),
("read_only_space", 0x02f1d): (121, "EphemeronHashTableMap"),
("read_only_space", 0x02f45): (165, "EmbedderDataArrayMap"),
("read_only_space", 0x02f6d): (185, "WeakCellMap"),
("read_only_space", 0x02f95): (32, "StringMap"),
("read_only_space", 0x02fbd): (41, "ConsOneByteStringMap"),
("read_only_space", 0x02fe5): (33, "ConsStringMap"),
("read_only_space", 0x0300d): (37, "ThinStringMap"),
("read_only_space", 0x03035): (35, "SlicedStringMap"),
("read_only_space", 0x0305d): (43, "SlicedOneByteStringMap"),
("read_only_space", 0x03085): (34, "ExternalStringMap"),
("read_only_space", 0x030ad): (42, "ExternalOneByteStringMap"),
("read_only_space", 0x030d5): (50, "UncachedExternalStringMap"),
("read_only_space", 0x030fd): (0, "InternalizedStringMap"),
("read_only_space", 0x03125): (2, "ExternalInternalizedStringMap"),
("read_only_space", 0x0314d): (10, "ExternalOneByteInternalizedStringMap"),
("read_only_space", 0x03175): (18, "UncachedExternalInternalizedStringMap"),
("read_only_space", 0x0319d): (26, "UncachedExternalOneByteInternalizedStringMap"),
("read_only_space", 0x031c5): (58, "UncachedExternalOneByteStringMap"),
("read_only_space", 0x031ed): (67, "SelfReferenceMarkerMap"),
("read_only_space", 0x03215): (67, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x03259): (91, "ArrayBoilerplateDescriptionMap"),
("read_only_space", 0x03359): (104, "InterceptorInfoMap"),
("read_only_space", 0x05699): (76, "PromiseFulfillReactionJobTaskMap"),
("read_only_space", 0x056c1): (77, "PromiseRejectReactionJobTaskMap"),
("read_only_space", 0x056e9): (78, "CallableTaskMap"),
("read_only_space", 0x05711): (79, "CallbackTaskMap"),
("read_only_space", 0x05739): (80, "PromiseResolveThenableJobTaskMap"),
("read_only_space", 0x05761): (83, "FunctionTemplateInfoMap"),
("read_only_space", 0x05789): (84, "ObjectTemplateInfoMap"),
("read_only_space", 0x057b1): (85, "AccessCheckInfoMap"),
("read_only_space", 0x057d9): (86, "AccessorInfoMap"),
("read_only_space", 0x05801): (87, "AccessorPairMap"),
("read_only_space", 0x05829): (88, "AliasedArgumentsEntryMap"),
("read_only_space", 0x05851): (89, "AllocationMementoMap"),
("read_only_space", 0x05879): (92, "AsmWasmDataMap"),
("read_only_space", 0x058a1): (93, "AsyncGeneratorRequestMap"),
("read_only_space", 0x058c9): (94, "BaselineDataMap"),
("read_only_space", 0x058f1): (95, "BreakPointMap"),
("read_only_space", 0x05919): (96, "BreakPointInfoMap"),
("read_only_space", 0x05941): (97, "CachedTemplateObjectMap"),
("read_only_space", 0x05969): (99, "ClassPositionsMap"),
("read_only_space", 0x05991): (100, "DebugInfoMap"),
("read_only_space", 0x059b9): (103, "FunctionTemplateRareDataMap"),
("read_only_space", 0x059e1): (105, "InterpreterDataMap"),
("read_only_space", 0x05a09): (106, "ModuleRequestMap"),
("read_only_space", 0x05a31): (107, "PromiseCapabilityMap"),
("read_only_space", 0x05a59): (108, "PromiseReactionMap"),
("read_only_space", 0x05a81): (109, "PropertyDescriptorObjectMap"),
("read_only_space", 0x05aa9): (110, "PrototypeInfoMap"),
("read_only_space", 0x05ad1): (111, "RegExpBoilerplateDescriptionMap"),
("read_only_space", 0x05af9): (112, "ScriptMap"),
("read_only_space", 0x05b21): (113, "SourceTextModuleInfoEntryMap"),
("read_only_space", 0x05b49): (114, "StackFrameInfoMap"),
("read_only_space", 0x05b71): (115, "TemplateObjectDescriptionMap"),
("read_only_space", 0x05b99): (116, "Tuple2Map"),
("read_only_space", 0x05bc1): (117, "WasmExceptionTagMap"),
("read_only_space", 0x05be9): (118, "WasmIndirectFunctionTableMap"),
("read_only_space", 0x05c11): (136, "SloppyArgumentsElementsMap"),
("read_only_space", 0x05c39): (153, "DescriptorArrayMap"),
("read_only_space", 0x05c61): (158, "UncompiledDataWithoutPreparseDataMap"),
("read_only_space", 0x05c89): (157, "UncompiledDataWithPreparseDataMap"),
("read_only_space", 0x05cb1): (174, "OnHeapBasicBlockProfilerDataMap"),
("read_only_space", 0x05cd9): (170, "InternalClassMap"),
("read_only_space", 0x05d01): (181, "SmiPairMap"),
("read_only_space", 0x05d29): (180, "SmiBoxMap"),
("read_only_space", 0x05d51): (147, "ExportedSubClassBaseMap"),
("read_only_space", 0x05d79): (148, "ExportedSubClassMap"),
("read_only_space", 0x05da1): (68, "AbstractInternalClassSubclass1Map"),
("read_only_space", 0x05dc9): (69, "AbstractInternalClassSubclass2Map"),
("read_only_space", 0x05df1): (135, "InternalClassWithSmiElementsMap"),
("read_only_space", 0x05e19): (171, "InternalClassWithStructElementsMap"),
("read_only_space", 0x05e41): (149, "ExportedSubClass2Map"),
("read_only_space", 0x05e69): (182, "SortStateMap"),
("read_only_space", 0x05e91): (90, "AllocationSiteWithWeakNextMap"),
("read_only_space", 0x05eb9): (90, "AllocationSiteWithoutWeakNextMap"),
("read_only_space", 0x05ee1): (81, "LoadHandler1Map"),
("read_only_space", 0x05f09): (81, "LoadHandler2Map"),
("read_only_space", 0x05f31): (81, "LoadHandler3Map"),
("read_only_space", 0x05f59): (82, "StoreHandler0Map"),
("read_only_space", 0x05f81): (82, "StoreHandler1Map"),
("read_only_space", 0x05fa9): (82, "StoreHandler2Map"),
("read_only_space", 0x05fd1): (82, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
("map_space", 0x02141): (1098, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("read_only_space", 0x021b9): "EmptyWeakFixedArray",
("read_only_space", 0x021c1): "EmptyDescriptorArray",
("read_only_space", 0x021f9): "EmptyEnumCache",
("read_only_space", 0x0222d): "EmptyFixedArray",
("read_only_space", 0x02235): "NullValue",
("read_only_space", 0x0233d): "UninitializedValue",
("read_only_space", 0x023b5): "UndefinedValue",
("read_only_space", 0x023f9): "NanValue",
("read_only_space", 0x0242d): "TheHoleValue",
("read_only_space", 0x02459): "HoleNanValue",
("read_only_space", 0x0248d): "TrueValue",
("read_only_space", 0x024cd): "FalseValue",
("read_only_space", 0x024fd): "empty_string",
("read_only_space", 0x02739): "EmptyScopeInfo",
("read_only_space", 0x02771): "ArgumentsMarker",
("read_only_space", 0x027d1): "Exception",
("read_only_space", 0x0282d): "TerminationException",
("read_only_space", 0x02895): "OptimizedOut",
("read_only_space", 0x028f5): "StaleRegister",
("read_only_space", 0x0323d): "EmptyPropertyArray",
("read_only_space", 0x03245): "EmptyByteArray",
("read_only_space", 0x0324d): "EmptyObjectBoilerplateDescription",
("read_only_space", 0x03281): "EmptyArrayBoilerplateDescription",
("read_only_space", 0x0328d): "EmptyClosureFeedbackCellArray",
("read_only_space", 0x03295): "EmptySlowElementDictionary",
("read_only_space", 0x032b9): "EmptyOrderedHashMap",
("read_only_space", 0x032cd): "EmptyOrderedHashSet",
("read_only_space", 0x032e1): "EmptyFeedbackMetadata",
("read_only_space", 0x032ed): "EmptyPropertyDictionary",
("read_only_space", 0x03315): "EmptyOrderedPropertyDictionary",
("read_only_space", 0x0332d): "EmptySwissPropertyDictionary",
("read_only_space", 0x03381): "NoOpInterceptorInfo",
("read_only_space", 0x033a9): "EmptyWeakArrayList",
("read_only_space", 0x033b5): "InfinityValue",
("read_only_space", 0x033c1): "MinusZeroValue",
("read_only_space", 0x033cd): "MinusInfinityValue",
("read_only_space", 0x033d9): "SelfReferenceMarker",
("read_only_space", 0x03419): "BasicBlockCountersMarker",
("read_only_space", 0x0345d): "OffHeapTrampolineRelocationInfo",
("read_only_space", 0x03469): "TrampolineTrivialCodeDataContainer",
("read_only_space", 0x03475): "TrampolinePromiseRejectionCodeDataContainer",
("read_only_space", 0x03481): "GlobalThisBindingScopeInfo",
("read_only_space", 0x034b5): "EmptyFunctionScopeInfo",
("read_only_space", 0x034d9): "NativeScopeInfo",
("read_only_space", 0x034f1): "HashSeed",
("old_space", 0x02119): "ArgumentsIteratorAccessor",
("old_space", 0x0215d): "ArrayLengthAccessor",
("old_space", 0x021a1): "BoundFunctionLengthAccessor",
| |
from exif_read import ExifRead
import json
import os
import urllib2
import urllib
import httplib
import datetime
import socket
import mimetypes
import random
import string
import copy
from Queue import Queue
import threading
import time
import config
import getpass
import sys
import processing
import requests
from tqdm import tqdm
from . import upload_api
from . import ipc
from .error import print_error
from .utils import force_decode
from camera_support.prepare_blackvue_videos import get_blackvue_info
from geo import get_timezone_and_utc_offset
from gpx_from_blackvue import gpx_from_blackvue, get_points_from_bv
from process_video import get_video_start_time_blackvue
from uploader_utils import set_video_as_uploaded
from utils import format_orientation
if os.getenv("AWS_S3_ENDPOINT", None) is None:
MAPILLARY_UPLOAD_URL = "https://secure-upload.mapillary.com"
else:
MAPILLARY_UPLOAD_URL = "{}/{}".format(
os.getenv("AWS_S3_ENDPOINT"), "mtf-upload-images")
MAPILLARY_DIRECT_UPLOAD_URL = "https://secure-upload.mapillary.com"
PERMISSION_HASH = "<KEY>mF<KEY>
SIGNATURE_HASH = "Td2/WYfCc/+xWzJX7VL691StviI="
BOUNDARY_CHARS = string.digits + string.ascii_letters
NUMBER_THREADS = int(os.getenv('NUMBER_THREADS', '5'))
MAX_ATTEMPTS = int(os.getenv('MAX_ATTEMPTS', '50'))
UPLOAD_PARAMS = {"url": MAPILLARY_UPLOAD_URL, "permission": PERMISSION_HASH, # TODO: This URL is dynamic in api 2.0
"signature": SIGNATURE_HASH, "aws_key": "<KEY>"}
CLIENT_ID = os.getenv("MAPILLARY_WEB_CLIENT_ID",
"MkJKbDA0bnZuZlcxeTJHTmFqN3g1dzo1YTM0NjRkM2EyZGU5MzBh")
DRY_RUN = bool(os.getenv('DRY_RUN', False))
if os.getenv("API_PROXY_HOST", None) is None:
API_ENDPOINT = "https://a.mapillary.com"
else:
API_ENDPOINT = "http://{}".format(os.getenv("API_PROXY_HOST"))
LOGIN_URL = "{}/v2/ua/login?client_id={}".format(API_ENDPOINT, CLIENT_ID)
ORGANIZATIONS_URL = API_ENDPOINT + "/v3/users/{}/organizations?client_id={}"
USER_URL = API_ENDPOINT + "/v3/users?usernames={}&client_id={}"
ME_URL = "{}/v3/me?client_id={}".format(API_ENDPOINT, CLIENT_ID)
USER_UPLOAD_URL = API_ENDPOINT + "/v3/users/{}/upload_tokens?client_id={}"
USER_UPLOAD_SECRETS = API_ENDPOINT + "/v3/users/{}/upload_secrets?client_id={}"
UPLOAD_STATUS_PAIRS = {"upload_success": "upload_failed",
"upload_failed": "upload_success"}
GLOBAL_CONFIG_FILEPATH = os.getenv("GLOBAL_CONFIG_FILEPATH", os.path.join(os.path.expanduser('~'),
".config", "mapillary", 'configs', CLIENT_ID))
class UploadThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.q = queue
self.total_task = self.q.qsize()
def run(self):
while not self.q.empty():
# fetch file from the queue and upload
try:
filepath, max_attempts, session = self.q.get(timeout=5)
except:
# If it can't get a task after 5 seconds, continue and check if
# task list is empty
continue
progress(self.total_task - self.q.qsize(), self.total_task,
'... {} images left.'.format(self.q.qsize()))
upload_file(filepath, max_attempts, session)
self.q.task_done()
# TODO note that this is not looked into, but left as out of improvement scope
def encode_multipart(fields, files, boundary=None):
"""
Encode dict of form fields and dict of files as multipart/form-data.
Return tuple of (body_string, headers_dict). Each value in files is a dict
with required keys 'filename' and 'content', and optional 'mimetype' (if
not specified, tries to guess mime type or uses 'application/octet-stream').
From MIT licensed recipe at
http://code.activestate.com/recipes/578668-encode-multipart-form-data-for-uploading-files-via/
"""
def escape_quote(s):
return s.replace('"', '\\"')
if boundary is None:
boundary = ''.join(random.choice(BOUNDARY_CHARS) for i in range(30))
lines = []
for name, value in fields.items():
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"'.format(
escape_quote(name)),
'',
str(value),
))
for name, value in files.items():
filename = value['filename']
if 'mimetype' in value:
mimetype = value['mimetype']
else:
mimetype = mimetypes.guess_type(
filename)[0] or 'application/octet-stream'
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
value['content'],
))
lines.extend((
'--{0}--'.format(boundary),
'',
))
body = '\r\n'.join(lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
return (body, headers)
def prompt_to_finalize(subcommand):
for i in range(3):
finalize = raw_input(
"Finalize all {} in this import? [y/n]: ".format(subcommand))
if finalize in ["y", "Y", "yes", "Yes"]:
return 1
elif finalize in ["n", "N", "no", "No"]:
return 0
else:
print('Please answer y or n. Try again.')
return 0
def flag_finalization(finalize_file_list):
for file in finalize_file_list:
finalize_flag = os.path.join(log_rootpath(file), "upload_finalized")
open(finalize_flag, 'a').close()
def get_upload_url(credentials):
'''
Returns upload URL using new upload API
'''
request_url = USER_UPLOAD_SECRETS.format(
credentials["MAPSettingsUserKey"], CLIENT_ID)
request = urllib2.Request(request_url)
request.add_header('Authorization', 'Bearer {}'.format(
credentials["user_upload_token"]))
try:
response = json.loads(urllib2.urlopen(request).read())
except requests.exceptions.HTTPError as e:
print("Error getting upload parameters, upload could not start")
sys.exit(1)
return response
def get_upload_file_list(import_path, skip_subfolders=False):
upload_file_list = []
if skip_subfolders:
upload_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and preform_upload(import_path, file))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
upload_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and preform_upload(root, file))
return sorted(upload_file_list)
# get a list of video files in a video_file
# TODO: Create list of supported files instead of adding only these 3
def get_video_file_list(video_file, skip_subfolders=False):
video_file_list = []
supported_files = ("mp4", "avi", "tavi", "mov", "mkv")
if skip_subfolders:
video_file_list.extend(os.path.join(os.path.abspath(video_file), file)
for file in os.listdir(video_file) if (file.lower().endswith((supported_files))))
else:
for root, dir, files in os.walk(video_file):
video_file_list.extend(os.path.join(os.path.abspath(root), file)
for file in files if (file.lower().endswith((supported_files))))
return sorted(video_file_list)
def get_total_file_list(import_path, skip_subfolders=False):
total_file_list = []
if skip_subfolders:
total_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
total_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower(
).endswith(('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')))
return sorted(total_file_list)
def get_failed_upload_file_list(import_path, skip_subfolders=False):
failed_upload_file_list = []
if skip_subfolders:
failed_upload_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and failed_upload(import_path, file))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
failed_upload_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower(
).endswith(('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and failed_upload(root, file))
return sorted(failed_upload_file_list)
def get_success_upload_file_list(import_path, skip_subfolders=False):
success_upload_file_list = []
if skip_subfolders:
success_upload_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and success_upload(import_path, file))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
success_upload_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower(
).endswith(('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and success_upload(root, file))
return sorted(success_upload_file_list)
def success_upload(root, file):
file_path = os.path.join(root, file)
log_root = log_rootpath(file_path)
upload_success = os.path.join(log_root, "upload_success")
upload_finalization = os.path.join(log_root, "upload_finalized")
manual_upload = os.path.join(log_root, "manual_upload")
success = (os.path.isfile(
upload_success) and not os.path.isfile(manual_upload)) or (os.path.isfile(upload_success) and os.path.isfile(manual_upload) and os.path.isfile(upload_finalization))
return success
def get_success_only_manual_upload_file_list(import_path, skip_subfolders=False):
success_only_manual_upload_file_list = []
if skip_subfolders:
success_only_manual_upload_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and success_only_manual_upload(import_path, file))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
success_only_manual_upload_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower(
).endswith(('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and success_only_manual_upload(root, file))
return sorted(success_only_manual_upload_file_list)
def success_only_manual_upload(root, file):
file_path = os.path.join(root, file)
log_root = log_rootpath(file_path)
upload_success = os.path.join(log_root, "upload_success")
manual_upload = os.path.join(log_root, "manual_upload")
success = os.path.isfile(upload_success) and os.path.isfile(manual_upload)
return success
def preform_upload(root, file):
file_path = os.path.join(root, file)
log_root = log_rootpath(file_path)
process_success = os.path.join(
log_root, "mapillary_image_description_success")
duplicate = os.path.join(log_root, "duplicate")
upload_succes = os.path.join(log_root, "upload_success")
upload = not os.path.isfile(upload_succes) and os.path.isfile(
process_success) and not os.path.isfile(duplicate)
return upload
def failed_upload(root, file):
file_path = os.path.join(root, file)
log_root = log_rootpath(file_path)
process_failed = os.path.join(
log_root, "mapillary_image_description_failed")
duplicate = os.path.join(log_root, "duplicate")
upload_failed = os.path.join(log_root, "upload_failed")
failed = os.path.isfile(
upload_failed) and not os.path.isfile(process_failed) and not os.path.isfile(
duplicate)
return failed
def get_finalize_file_list(import_path, skip_subfolders=False):
finalize_file_list = []
if skip_subfolders:
finalize_file_list.extend(os.path.join(os.path.abspath(import_path), file) for file in os.listdir(import_path) if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and preform_finalize(import_path, file))
else:
for root, dir, files in os.walk(import_path):
if os.path.join(".mapillary", "logs") in root:
continue
finalize_file_list.extend(os.path.join(os.path.abspath(root), file) for file in files if file.lower().endswith(
('jpg', 'jpeg', 'tif', 'tiff', 'pgm', 'pnm', 'gif')) and preform_finalize(root, file))
return sorted(finalize_file_list)
def preform_finalize(root, file):
file_path = os.path.join(root, file)
log_root = log_rootpath(file_path)
upload_succes = os.path.join(log_root, "upload_success")
upload_finalized = os.path.join(log_root, "upload_finalized")
manual_upload = os.path.join(log_root, "manual_upload")
finalize = os.path.isfile(
upload_succes) and not os.path.isfile(upload_finalized) and os.path.isfile(manual_upload)
return finalize
def print_summary(file_list):
# inform upload has finished and print out the summary
print("Done uploading {} images.".format(
len(file_list))) # improve upload summary
def get_upload_token(mail, pwd):
'''
Get upload token
'''
try:
params = urllib.urlencode({"email": mail, "password": pwd})
response = urllib2.urlopen(LOGIN_URL, params)
except:
return None
resp = json.loads(response.read())
if not resp or 'token' not in resp:
return None
return resp['token']
def get_organization_key(user_key, organization_username, upload_token):
organization_key = None
call = ORGANIZATIONS_URL.format(user_key, CLIENT_ID)
req = urllib2.Request(call)
req.add_header('Authorization', 'Bearer {}'.format(upload_token))
resp = json.loads(urllib2.urlopen(req).read())
organization_usernames = []
for org in resp:
organization_usernames.append(org['name'])
if org['name'] == organization_username:
organization_key = org['key']
if not organization_key:
print("No valid organization key found for organization user name " +
organization_username)
print("Available organization user names for current user are : ")
print(organization_usernames)
sys.exit(1)
return organization_key
def validate_organization_key(user_key, organization_key, upload_token):
call = ORGANIZATIONS_URL.format(user_key, CLIENT_ID)
req = urllib2.Request(call)
req.add_header('Authorization', 'Bearer {}'.format(upload_token))
resp = json.loads(urllib2.urlopen(req).read())
for org in resp:
if org['key'] == organization_key:
return
print("Organization key does not exist.")
sys.exit(1)
def validate_organization_privacy(user_key, organization_key, private, upload_token):
call = ORGANIZATIONS_URL.format(user_key, CLIENT_ID)
req = urllib2.Request(call)
req.add_header('Authorization', 'Bearer {}'.format(upload_token))
resp = json.loads(urllib2.urlopen(req).read())
for org in resp:
if org['key'] == organization_key:
if (private and (('private_repository' not in org) or not org['private_repository'])) or (not private and (('public_repository' not in org) or not org['public_repository'])):
print(
"Organization privacy does not match provided privacy settings.")
privacy = "private" if 'private_repository' in org and org[
'private_repository'] else "public"
privacy_provided = "private" if private else "public"
print("Organization " +
org['name'] + " with key " + org['key'] + " is " + privacy + " while your import privacy | |
# mask[3] tells us whether there's a gap 2 tiles to the right - which means the next tile will be sprite 2
if previous_mid_segment == 4 and mask[3]:
return 5, None
else:
# Alternate between 3 and 4
if previous_mid_segment == None or previous_mid_segment == 4:
sprite_x = 3
elif previous_mid_segment == 3:
sprite_x = 4
return sprite_x, sprite_x
else:
# Not a middle piece
return sprite_x, None
class Grass(Row):
def __init__(self, predecessor, index, y):
super().__init__("grass", index, y)
# In computer graphics, a mask is a series of boolean (true or false) values indicating which parts of an image
# will be transparent. Grass rows may contain hedges which block the player's movement, and we use a similar
# mechanism here. In our hedge mask, values of False mean a hedge is present, while True means there is a gap
# in the hedges. Hedges are two rows high - once hedges have been created on a row, the pattern will be
# duplicated on the next row (although the sprites will be different - e.g. there are separate sprites
# for the top-left and bottom-left corners of a hedge). Note that the upper sprites overlap with the row above.
self.hedge_row_index = None # 0 or 1, or None if no hedges on this row
self.hedge_mask = None
if not isinstance(predecessor, Grass) or predecessor.hedge_row_index == None:
# Create a brand-new set of hedges? We will only create hedges if the previous row didn't have any.
# We also only want hedges to appear on certain types of grass row, and on only a random selection
# of rows
if random() < 0.5 and index > 7 and index < 14:
self.hedge_mask = generate_hedge_mask()
self.hedge_row_index = 0
elif predecessor.hedge_row_index == 0:
self.hedge_mask = predecessor.hedge_mask
self.hedge_row_index = 1
if self.hedge_row_index != None:
# See comments in classify_hedge_segment for explanation of previous_mid_segment
previous_mid_segment = None
for i in range(1, 13):
sprite_x, previous_mid_segment = classify_hedge_segment(self.hedge_mask[i - 1:i + 3], previous_mid_segment)
if sprite_x != None:
self.children.append(Hedge(sprite_x, self.hedge_row_index, (i * 40 - 20, 0)))
def allow_movement(self, x):
# allow_movement in the base class ensures that the player can't walk off the left and right sides of the
# screen. The call to our own collide method ensures that the player can't walk through hedges. The margin of
# 8 prevents the player sprite from overlapping with the edge of a hedge.
return super().allow_movement(x) and not self.collide(x, 8)
def play_sound(self):
game.play_sound("grass", 1)
def next(self):
if self.index <= 5:
row_class, index = Grass, self.index + 8
elif self.index == 6:
row_class, index = Grass, 7
elif self.index == 7:
row_class, index = Grass, 15
elif self.index >= 8 and self.index <= 14:
row_class, index = Grass, self.index + 1
else:
row_class, index = choice((Road, Water)), 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Dirt(Row):
def __init__(self, predecessor, index, y):
super().__init__("dirt", index, y)
def play_sound(self):
game.play_sound("dirt", 1)
def next(self):
if self.index <= 5:
row_class, index = Dirt, self.index + 8
elif self.index == 6:
row_class, index = Dirt, 7
elif self.index == 7:
row_class, index = Dirt, 15
elif self.index >= 8 and self.index <= 14:
row_class, index = Dirt, self.index + 1
else:
row_class, index = choice((Road, Water)), 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Water(ActiveRow):
def __init__(self, predecessor, index, y):
# dxs contains a list of possible directions (and speeds) in which child objects (in this case, logs) on this
# row could move. We pass the lists to the constructor of the base class, which randomly chooses one of the
# directions. We want logs on alternate rows to move in opposite directions, so we take advantage of the fact
# that that in Python, multiplying a list by True or False results in either the same list, or an empty list.
# So by looking at the direction of child objects on the previous row (predecessor.dx), we can decide whether
# child objects on this row should move left or right. If this is the first of a series of Water rows,
# predecessor.dx will be zero, so child objects could move in either direction.
dxs = [-2,-1]*(predecessor.dx >= 0) + [1,2]*(predecessor.dx <= 0)
super().__init__(Log, dxs, "water", index, y)
def update(self):
super().update()
for log in self.children:
# Child (log) object positions are relative to the parent row. If the player exists, and the player is at the
# same Y position, and is colliding with the current log, make the log dip down into the water slightly
if game.bunner and self.y == game.bunner.y and log == self.collide(game.bunner.x, -4):
log.y = 2
else:
log.y = 0
def push(self):
# Called when the player is standing on a log on this row, so player object can be moved at the same speed and
# in the same direction as the log
return self.dx
def check_collision(self, x):
# If we're colliding with a log, that's a good thing!
# margin of -4 ensures we can't stand right on the edge of a log
if self.collide(x, -4):
return PlayerState.ALIVE, 0
else:
game.play_sound("splash")
return PlayerState.SPLASH, 0
def play_sound(self):
game.play_sound("log", 1)
def next(self):
# After 2 water rows, there's a 50-50 chance of the next row being either another water row, or a dirt row
if self.index == 7 or (self.index >= 1 and random() < 0.5):
row_class, index = Dirt, randint(4,6)
else:
row_class, index = Water, self.index + 1
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Road(ActiveRow):
def __init__(self, predecessor, index, y):
# Specify the possible directions and speeds from which the movement of cars on this row will be chosen
# We use Python's set data structure to specify that the car velocities on this row will be any of the numbers
# from -5 to 5, except for zero or the velocity of the cars on the previous row
dxs = list(set(range(-5, 6)) - set([0, predecessor.dx]))
super().__init__(Car, dxs, "road", index, y)
def update(self):
super().update()
# Trigger car sound effects. The zoom effect should play when the player is on the row above or below the car,
# the honk effect should play when the player is on the same row.
for y_offset, car_sound_num in [(-ROW_HEIGHT, Car.SOUND_ZOOM), (0, Car.SOUND_HONK), (ROW_HEIGHT, Car.SOUND_ZOOM)]:
# Is the player on the appropriate row?
if game.bunner and game.bunner.y == self.y + y_offset:
for child_obj in self.children:
# The child object must be a car
if isinstance(child_obj, Car):
# The car must be within 100 pixels of the player on the x-axis, and moving towards the player
# child_obj.dx < 0 is True or False depending on whether the car is moving left or right, and
# dx < 0 is True or False depending on whether the player is to the left or right of the car.
# If the results of these two comparisons are different, the car is moving towards the player.
# Also, for the zoom sound, the car must be travelling faster than one pixel per frame
dx = child_obj.x - game.bunner.x
if abs(dx) < 100 and ((child_obj.dx < 0) != (dx < 0)) and (y_offset == 0 or abs(child_obj.dx) > 1):
child_obj.play_sound(car_sound_num)
def check_collision(self, x):
if self.collide(x):
game.play_sound("splat", 1)
return PlayerState.SPLAT, 0
else:
return PlayerState.ALIVE, 0
def play_sound(self):
game.play_sound("road", 1)
def next(self):
if self.index == 0:
row_class, index = Road, 1
elif self.index < 5:
# 80% chance of another road
r = random()
if r < 0.8:
row_class, index = Road, self.index + 1
elif r < 0.88:
row_class, index = Grass, randint(0,6)
elif r < 0.94:
row_class, index = Rail, 0
else:
row_class, index = Pavement, 0
else:
# We've reached | |
"LD B,B", 1),
0x41 : (0, [ LDrs('B', 'C'), ], [], "LD B,C", 1),
0x42 : (0, [ LDrs('B', 'D'), ], [], "LD B,D", 1),
0x43 : (0, [ LDrs('B', 'E'), ], [], "LD B,E", 1),
0x44 : (0, [ LDrs('B', 'H'), ], [], "LD B,H", 1),
0x45 : (0, [ LDrs('B', 'L'), ], [], "LD B,L", 1),
0x46 : (0, [], [ MR(indirect="HL", action=LDr("B")) ], "LD B,(HL)", 1),
0x47 : (0, [ LDrs('B', 'A'), ], [], "LD B,A", 1),
0x48 : (0, [ LDrs('C', 'B'), ], [], "LD C,B", 1),
0x49 : (0, [ LDrs('C', 'C'), ], [], "LD C,C", 1),
0x4A : (0, [ LDrs('C', 'D'), ], [], "LD C,D", 1),
0x4B : (0, [ LDrs('C', 'E'), ], [], "LD C,E", 1),
0x4C : (0, [ LDrs('C', 'H'), ], [], "LD C,H", 1),
0x4D : (0, [ LDrs('C', 'L'), ], [], "LD C,L", 1),
0x4E : (0, [], [ MR(indirect="HL", action=LDr("C")) ], "LD C,(HL)", 1),
0x4F : (0, [ LDrs('C', 'A'), ], [], "LD C,A", 1),
0x50 : (0, [ LDrs('D', 'B'), ], [], "LD D,B", 1),
0x51 : (0, [ LDrs('D', 'C'), ], [], "LD D,C", 1),
0x52 : (0, [ LDrs('D', 'D'), ], [], "LD D,D", 1),
0x53 : (0, [ LDrs('D', 'E'), ], [], "LD D,E", 1),
0x54 : (0, [ LDrs('D', 'H'), ], [], "LD D,H", 1),
0x55 : (0, [ LDrs('D', 'L'), ], [], "LD D,L", 1),
0x56 : (0, [], [ MR(indirect="HL", action=LDr("D")) ], "LD D,(HL)", 1),
0x57 : (0, [ LDrs('D', 'A'), ], [], "LD D,A", 1),
0x58 : (0, [ LDrs('E', 'B'), ], [], "LD E,B", 1),
0x59 : (0, [ LDrs('E', 'C'), ], [], "LD E,C", 1),
0x5A : (0, [ LDrs('E', 'D'), ], [], "LD E,D", 1),
0x5B : (0, [ LDrs('E', 'E'), ], [], "LD E,E", 1),
0x5C : (0, [ LDrs('E', 'H'), ], [], "LD E,H", 1),
0x5D : (0, [ LDrs('E', 'L'), ], [], "LD E,L", 1),
0x5E : (0, [], [ MR(indirect="HL", action=LDr("E")) ], "LD E,(HL)", 1),
0x5F : (0, [ LDrs('E', 'A'), ], [], "LD E,A", 1),
0x60 : (0, [ LDrs('H', 'B'), ], [], "LD H,B", 1),
0x61 : (0, [ LDrs('H', 'C'), ], [], "LD H,C", 1),
0x62 : (0, [ LDrs('H', 'D'), ], [], "LD H,D", 1),
0x63 : (0, [ LDrs('H', 'E'), ], [], "LD H,E", 1),
0x64 : (0, [ LDrs('H', 'H'), ], [], "LD H,H", 1),
0x65 : (0, [ LDrs('H', 'L'), ], [], "LD H,L", 1),
0x66 : (0, [], [ MR(indirect="HL", action=LDr("H")) ], "LD H,(HL)", 1),
0x67 : (0, [ LDrs('H', 'A'), ], [], "LD H,A", 1),
0x68 : (0, [ LDrs('L', 'B'), ], [], "LD L,B", 1),
0x69 : (0, [ LDrs('L', 'C'), ], [], "LD L,C", 1),
0x6A : (0, [ LDrs('L', 'D'), ], [], "LD L,D", 1),
0x6B : (0, [ LDrs('L', 'E'), ], [], "LD L,E", 1),
0x6C : (0, [ LDrs('L', 'H'), ], [], "LD L,H", 1),
0x6D : (0, [ LDrs('L', 'L'), ], [], "LD L,L", 1),
0x6E : (0, [], [ MR(indirect="HL", action=LDr("L")) ], "LD L,(HL)", 1),
0x6F : (0, [ LDrs('L', 'A'), ], [], "LD L,A", 1),
0x70 : (0, [], [ MW(indirect="HL", source="B") ], "LD (HL),B", 1),
0x71 : (0, [], [ MW(indirect="HL", source="C") ], "LD (HL),C", 1),
0x72 : (0, [], [ MW(indirect="HL", source="D") ], "LD (HL),D", 1),
0x73 : (0, [], [ MW(indirect="HL", source="E") ], "LD (HL),E", 1),
0x74 : (0, [], [ MW(indirect="HL", source="H") ], "LD (HL),H", 1),
0x75 : (0, [], [ MW(indirect="HL", source="L") ], "LD (HL),L", 1),
0x76 : (0, [ on_condition(lambda state : not state.cpu.int, dec("PC")) ], [], "HALT", 1),
0x77 : (0, [], [ MW(indirect="HL", source="A") ], "LD (HL),A", 1),
0x78 : (0, [ LDrs('A', 'B'), ], [], "LD A,B", 1),
0x79 : (0, [ LDrs('A', 'C'), ], [], "LD A,C", 1),
0x7A : (0, [ LDrs('A', 'D'), ], [], "LD A,D", 1),
0x7B : (0, [ LDrs('A', 'E'), ], [], "LD A,E", 1),
0x7C : (0, [ LDrs('A', 'H'), ], [], "LD A,H", 1),
0x7D : (0, [ LDrs('A', 'L'), ], [], "LD A,L", 1),
0x7E : (0, [], [ MR(indirect="HL", action=LDr("A")) ], "LD A, (HL)", 1),
0x7F : (0, [ LDrs('A', 'A'), ], [], "LD A,A", 1),
0x80 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.B)&0xF) > 0xF) else 0),
set_flags("SZ5-3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.B, key="value"),
LDr('A') ], [], "ADD B", 1),
0x81 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.C)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.C, key="value"),
LDr('A') ], [], "ADD C", 1),
0x82 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.D)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.D, key="value"),
LDr('A') ], [], "ADD D", 1),
0x83 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.E)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.E, key="value"),
LDr('A') ], [], "ADD E", 1),
0x84 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.H)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.H, key="value"),
LDr('A') ], [], "ADD H", 1),
0x85 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.L)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.L, key="value"),
LDr('A') ], [], "ADD L", 1),
0x86 : (0, [], [ MR(indirect="HL",
action=do_each(
force_flag('H', lambda state,v : 1 if (((state.cpu.reg.A)&0xF)+(v&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C",
value=lambda state, v : state.cpu.reg.A + v,
dest="A"))) ], "ADD (HL)", 1),
0x87 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.A)&0xF) > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.A, key="value"),
LDr('A') ], [], "ADD A", 1),
0x88 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.B)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.B + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC B", 1),
0x89 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.C)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.C + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC C", 1),
0x8A : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.D)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.D + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC D", 1),
0x8B : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.E)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.E + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC E", 1),
0x8C : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.H)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.H + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC H", 1),
0x8D : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.L)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.L + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC L", 1),
0x8E : (0, [], [ MR(indirect="HL",
action=do_each(
force_flag('H', lambda state,v : 1 if (((state.cpu.reg.A)&0xF)+(v&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C",
value=lambda state, v : state.cpu.reg.A + v + state.cpu.reg.getflag('C'),
dest="A"))) ], "ADC (HL)", 1),
0x8F : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)+((state.cpu.reg.A)&0xF)+state.cpu.reg.getflag('C') > 0xF) else 0),
set_flags("SZ5H3V0C", value=lambda state : state.cpu.reg.A + state.cpu.reg.A + state.cpu.reg.getflag('C'), key="value"),
LDr('A') ], [], "ADC A", 1),
0x90 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)-((state.cpu.reg.B)&0xF) < 0x0) else 0),
set_flags("SZ5H3V1C", value=lambda state : state.cpu.reg.A - state.cpu.reg.B, key="value"),
LDr('A') ], [], "SUB B", 1),
0x91 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)-((state.cpu.reg.C)&0xF) < 0x0) else 0),
set_flags("SZ5H3V1C", value=lambda state : state.cpu.reg.A - state.cpu.reg.C, key="value"),
LDr('A') ], [], "SUB C", 1),
0x92 : (0, [ force_flag('H', lambda state : 1 if (((state.cpu.reg.A)&0xF)-((state.cpu.reg.D)&0xF) < 0x0) else 0),
set_flags("SZ5H3V1C", value=lambda state : state.cpu.reg.A - state.cpu.reg.D, key="value"),
LDr('A') ], [], "SUB D", 1),
0x93 : (0, [ force_flag('H', lambda state : 1 | |
<filename>statsmodels/gam/tests/results/results_mpg_bs.py
import numpy as np
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
mpg_bs = Bunch()
mpg_bs.smooth0 = Bunch()
mpg_bs.smooth0.term = 'weight'
mpg_bs.smooth0.bs_dim = 12
mpg_bs.smooth0.dim = 1
mpg_bs.smooth0.p_order = np.array([
3, 2
])
mpg_bs.smooth0.by = 'NA'
mpg_bs.smooth0.label = 's(weight)'
mpg_bs.smooth0.sp = -1
mpg_bs.smooth0.m = np.array([
3, 2
])
mpg_bs.smooth0.knots = np.array([
1488, 1488, 1488, 1488, 1953.22222222, 2118.77777778, 2275,
2383.88888889, 2515.55555556, 2757.33333333, 3016.44444444,
3208.11111111, 4066, 4066, 4066, 4066
])
mpg_bs.smooth0.rank = 10
mpg_bs.smooth0.null_space_dim = 1
mpg_bs.smooth0.df = 11
mpg_bs.smooth0.S_scale = 2.44395544177397e-06
mpg_bs.smooth0.vn = 'weight'
mpg_bs.smooth0.first_para = 5
mpg_bs.smooth0.last_para = 15
mpg_bs.smooth0.S = np.array([
0.130569121544375, -0.0191732921244136, 0.0202548028417763,
0.0393112950002946, 0.032061392663319, 0.0241651485426007,
0.0238771778381105, 0.0389406835250852, 0.0171569521441248,
0.00680570834700402, 0.00541920498010491, -0.0191732921244136,
0.0631473273216884, -0.0592997810399249, 0.0178641542128365,
0.00461438941034586, 0.00793912465273829, 0.00408821971418319,
-0.000681869049168392, 0.00213834646721613, 0.000935846814259467,
0.000163463753965317, 0.0202548028417763, -0.0592997810399249,
0.201022514824508, -0.186241322892132, 0.00284878067469136,
0.0287314367063779, 0.00263260805442764, -0.00708846456492193,
0.00065385484055723, 0.000395067932117418, -0.000586348849229599,
0.0393112950002947, 0.0178641542128365, -0.186241322892132,
0.479322071007576, -0.286380917395105, 0.0563704638942206,
0.0348887744557148, 0.0255203052273876, 0.0138979985789615,
0.00560832050247454, 0.00383270204450356, 0.032061392663319,
0.00461438941034587, 0.00284878067469137, -0.286380917395105,
0.414581586658592, -0.181289913269406, -0.00412646444222532,
-0.00354719180127285, -0.00206283566879658, -0.000685858743449883,
-0.00142523589453221, 0.0241651485426007, 0.00793912465273829,
0.0287314367063779, 0.0563704638942206, -0.181289913269406,
0.213321319885371, -0.0483506678191679, 0.0110304252072989,
0.0122299348640366, 0.00357726593043956, 0.00234383478372281,
0.0238771778381105, 0.00408821971418319, 0.00263260805442764,
0.0348887744557148, -0.00412646444222533, -0.0483506678191679,
0.0999220996092033, -0.0356092452156359, 0.00921547832469341,
0.00389860712399989, 0.00147745030481206, 0.0389406835250852,
-0.000681869049168392, -0.00708846456492192, 0.0255203052273876,
-0.00354719180127286, 0.0110304252072989, -0.0356092452156359,
0.0574427000604467, -0.0102653399490586, -0.000311188772793241,
0.00285258581655637, 0.0171569521441248, 0.00213834646721613,
0.000653854840557233, 0.0138979985789615, -0.00206283566879658,
0.0122299348640366, 0.00921547832469341, -0.0102653399490586,
0.0189789273643945, -0.0062453384832703, 0.00255839876648785,
0.00680570834700402, 0.000935846814259467, 0.000395067932117419,
0.00560832050247454, -0.000685858743449884, 0.00357726593043956,
0.00389860712399989, -0.000311188772793241, -0.0062453384832703,
0.0211496391608495, -0.0111654555301857, 0.00541920498010491,
0.000163463753965317, -0.000586348849229599, 0.00383270204450356,
-0.00142523589453221, 0.00234383478372281, 0.00147745030481206,
0.00285258581655637, 0.00255839876648785, -0.0111654555301857,
0.00794139033444708
]).reshape(11, 11, order='F')
mpg_bs.coefficients = np.array([
29.6272774569595, -6.21365498504518, 1.43986598470837, 1.01128095138012,
20.1053719083286, -1.26007449980292, -5.26871981200625,
-4.544036357677, -7.60063071956733, -5.01036711884368,
-6.96226144900638, -9.06722069409647, -8.81829781369916,
-7.9145836553663, -6.28068941724657, 5.09307848346347,
1.90848821039499, -0.646225516186639, -1.50240395085899,
-4.19244286007642, -5.72993924243941, -6.83323296859843,
-5.77088950575513, -4.29112523442438
])
mpg_bs.fitted_values = np.array([
21.9396113442978, 21.9396113442978, 18.7594768791121, 23.978775547494,
19.6605284085114, 22.5415570425591, 21.4364029092864, 21.5001397125411,
19.345544773382, 17.3284985371825, 23.0829354555269, 23.0829354555269,
20.5257074462847, 20.350418275392, 20.0920273672739, 16.3475493433762,
15.2683918200041, 14.6016645854518, 48.1414677548395, 34.6770647570973,
33.3220963193152, 34.9078179723597, 34.9078179723597, 25.7413956426768,
31.8998747894453, 31.4406608644666, 31.4406608644666, 25.4076816004152,
24.637322541799, 19.5423694752458, 43.7120662014648, 36.1794319009933,
37.8600814894106, 31.3973345386248, 30.9527603762328, 29.8740452948944,
29.6825700516209, 26.8853631419929, 26.3600803699054, 26.1773024038522,
25.3856227340742, 23.3020193188726, 24.7100852851954, 26.4172597325081,
34.6770647570973, 33.3220963193152, 23.0296498930938, 14.7476201253168,
14.7476201253168, 14.0557212409125, 34.34782143196, 33.965299405798,
33.7800699350403, 32.4689563918131, 32.329358068477, 23.1906404151376,
23.1906404151376, 23.1512875039068, 20.6941238423555, 25.5363151316018,
25.3886549365523, 25.5363151316018, 25.3886549365523, 34.4607486893619,
25.3326646650999, 20.7768158749875, 31.8441811103844, 23.0217104934054,
22.4301358955383, 23.1106217353394, 22.4200843547568, 14.7491664060278,
14.8191667947128, 14.012803756677, 13.9581303982076, 18.1760880728356,
33.3190101477328, 32.4975851853098, 31.1829311245072, 25.6555703896226,
22.6046524037776, 25.6230700315092, 19.5479517982215, 19.6165588348237,
19.6186479966108, 25.1994604891903, 24.8987688972877, 22.3628638356355,
22.3628638356355, 34.2300125601631, 39.2885271153262, 33.1621106074804,
32.5172901213345, 30.742747639841, 32.1452590079292, 30.6934785442198,
31.6517339944659, 30.5907093672281, 30.9641455088909, 24.6266167464042,
24.9022880874316, 18.78153275305, 17.1521355867311, 19.0141306741668,
18.231718904575, 16.8216593127165, 17.7202420891745, 22.0561680452922,
27.2153336488053, 20.5096211586017, 25.567299743712, 21.9831410020533,
26.7629956266796, 20.2961667790902, 25.2853678595518, 21.7669720922915,
26.7629956266796, 18.4910047689371, 33.3190101477328, 25.7413956426768,
31.8998747894453, 31.4406608644666, 29.7789375739066, 24.637322541799,
19.1142432042937, 19.2193336362723, 17.6365280620964, 17.6365280620964,
17.579806089837, 15.8168257322192, 21.8960566915, 21.7074289198647,
21.6525958338155, 21.4785952775663, 18.9756522798894, 18.9897722666541,
30.4602645168316, 29.3805961723331, 27.2238321670602, 27.9694092954432,
27.7274373142384, 24.7585135497211, 24.3629123288499, 21.0288767895242,
26.8745904429808, 24.038604652806, 24.1756501080763, 20.4312780252146,
32.4709598995115, 31.6700176166953, 31.9735144592214, 30.0023513412019,
28.4464554097632, 25.3419682258173, 30.061904278659, 29.8978667285749,
37.2387365989938, 37.2387365989938, 29.9813024611395, 29.8303048561215,
29.7392162621016, 29.1611058213974, 28.9573381148783, 23.6489155519235,
23.2515846177326, 21.6472193877583, 21.6596390100383, 21.6094248571684,
20.9701064457552, 20.8066296061343, 20.6663348110776, 25.161054576378,
33.0089371917933, 24.3712852430277, 24.3712852430277, 24.2659916432858,
18.5704777306123, 18.4763039586984, 17.9248313386425, 17.7576418795043,
38.0290768582748, 27.2115396047694, 37.9994882599177, 27.1914558802737,
26.6487573286281, 34.7866136817861, 24.6246815506878, 26.2295193582235,
26.4988154777346, 21.8800181352396, 33.5183981701922, 24.5373651901876,
20.8017272859603, 20.6222748754003, 20.8111189208126, 20.5838532565161,
18.3170655814092, 17.5023519244001, 20.8093138226564, 18.3617172827568,
19.5919656568361, 26.0100107405061, 20.4732985733418
])
mpg_bs.linear_predictors = mpg_bs.fitted_values
mpg_bs.deviance = 871.169775911354
mpg_bs.null_deviance = 8721.1724137931
mpg_bs.iter = 1
mpg_bs.weights = np.array([
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
])
mpg_bs.prior_weights = np.array([
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
])
mpg_bs.df_null = 202
mpg_bs.y = np.array([
21, 21, 19, 24, 18, 19, 19, 19, 17, 16, 23, 23, 21, 21, 20, 16, 16, 15,
47, 38, 38, 37, 31, 24, 31, 31, 31, 24, 24, 19, 49, 31, 38, 30, 30, 30,
30, 27, 27, 27, 27, 24, 25, 24, 38, 38, 24, 15, 15, 13, 30, 31, 31, 31,
31, 17, 17, 17, 16, 26, 26, 26, 26, 36, 26, 19, 31, 22, 22, 22, 22, 16,
16, 14, 14, 19, 37, 31, 31, 24, 23, 25, 19, 19, 19, 25, 25, 23, 23, 31,
45, 31, 31, 31, 31, 31, 31, 31, 31, 27, 27, 17, 17, 19, 19, 17, 19, 19,
28, 19, 25, 19, 28, 19, 25, 19, 28, 18, 37, 24, 31, 31, 31, 24, 19, 19,
17, 17, 17, 17, 21, 21, 21, 21, 19, 19, 31, 26, 26, 32, 28, 26, 24, 24,
28, 25, 23, 23, 35, 31, 31, 31, 27, 27, 30, 30, 34, 38, 38, 28, 28, 29,
29, 26, 26, 24, 24, 24, 24, 24, 24, 29, 30, 27, 27, 27, 20, 19, 20, 19,
37, 27, 37, 27, 27, 37, 26, 24, 24, 19, 33, 25, 23, 23, 24, 24, 17, 17,
23, 19, 18, 26, 19
])
mpg_bs.residuals = mpg_bs.y - mpg_bs.fitted_values
mpg_bs.sig2 = 4.70648213227865
mpg_bs.edf_all = np.array([
0.999999999999898, 1.00000000000005, 1.00000000000003, 1.00000000000002,
0.715182726327133, 0.945523457125076, 0.933952133938362,
0.910787981046657, 0.918439669310707, 0.936922915439925,
0.967620005020078, 0.978502846618072, 0.979593620228742,
0.973114724412765, 0.993928896515985, 0.206529848573726,
0.134722764346217, 0.130032558435079, 0.214058208629181,
0.428628830751172, 0.480753053928721, 0.728730191076721,
0.464984912297227, 0.858004843431828
])
mpg_bs.edf1 = np.array([
0.999999999999765, 1.00000000000012, 1.00000000000008, 1.00000000000005,
0.895328939039617, 0.985281018312391, 0.980485303917254,
0.985115307594987, 0.979973329752608, 0.991029113855046,
0.996293301947028, 0.996456521529546, 0.997881056094132,
0.998369079216076, 0.999638164901435, 0.263857404503542,
0.167489975859197, 0.241183045516588, 0.33714799512247,
0.51312433111897, 0.537892914498989, 0.811390270624644,
0.600430783273412, 0.922162336898961
])
mpg_bs.hat = np.array([
0.078074509536494, 0.078074509536494, 0.065030323239247,
0.0401345230539626, 0.168187944335173, 0.0664399958506056,
0.0736573430018038, 0.0780134136835909, 0.070154483540544,
0.180767093886445, 0.0664593954539867, 0.0664593954539867,
0.0594553974001933, 0.0675038468082751, 0.0536944450768444,
0.0970468595627894, 0.125358300385429, 0.125631704260218,
0.944302622125637, 0.0519142263263012, 0.0379831848669348,
0.0493844279965059, 0.0493844279965059, 0.0949083347321329,
0.0309819618026538, 0.029589102491272, 0.029589102491272,
0.0670447385890157, 0.0842123937662785, 0.0687733897932081,
0.422927507867707, 0.1328199698695, 0.096736475340286,
0.0419915413707043, 0.0396238084435642, 0.0346918436236485,
0.0363065585955308, 0.0491064279890273, 0.0441863897171056,
0.0388360718854439, 0.0414457600258062, 0.0451305758954266,
0.0505018876352514, 0.0744589274009074, 0.0519142263263012,
0.0379831848669348, 0.101629248471346, 0.436525380451652,
0.436525380451652, 0.410550032148353, 0.0425422548236529,
0.039274955616929, 0.0380389889031733, 0.0330421266803994,
0.0325988284653424, 0.066244431441902, 0.066244431441902,
0.0666221560337819, 0.0952529331187707, 0.046598430925228,
0.0484713995290722, 0.046598430925228, 0.0484713995290722,
0.11544865825422, 0.0480259855055657, 0.0566600271329405,
0.137824712103277, 0.112620492286817, 0.182195648532627,
0.11200306230358, 0.188335501134619, 0.169514959934583,
0.158055134563875, 0.160687809078496, 0.168998857825895,
0.0681007619372312, 0.0357408884807467, 0.0331290501952606,
0.0300353569162307, 0.0866855568546318, 0.0663539796978426,
0.0330200310685004, 0.0646249177791591, 0.0586792000782014,
0.0592011499253728, 0.0382030583658832, 0.0445521201486119,
0.067008407725231, 0.067008407725231, 0.0432898262619123,
0.12461566211325, 0.0358742808101827, 0.03362714942096,
0.0325446423810936, 0.0323846714084075, 0.0335509666038901,
0.0303353227561161, 0.0362607404205336, 0.0298241376272862,
0.0362837776795306, 0.0432688893651977, 0.0709252762463711,
0.147397446518277, 0.0723900341977973, 0.0517894283747262,
0.106665126964701, 0.0503219409131233, 0.0834357951937577,
0.100818501015755, 0.105292582127573, 0.104194456647628,
0.0762380011365815, 0.105895524421508, 0.131464943163143,
0.105727045973233, 0.0727635750299144, 0.105895524421508,
0.0437337708748914, 0.0357408884807467, 0.0949083347321329,
0.0309819618026538, 0.029589102491272, 0.0529778772840967,
0.0842123937662785, 0.0607153181824941, 0.0672240685836559,
0.144807911379215, 0.144807911379215, 0.141129878619691,
0.688420146600013, 0.0609351365077056, 0.0626297831888101,
0.0648027622574709, 0.0759997034218865, 0.0782103847541935,
0.0707477713155958, 0.0410921970518355, 0.0589697594313821,
0.152256577677227, 0.056765876958851, 0.0440206300688631,
0.0331810206918762, 0.145634415508243, 0.171438771769591,
0.045261532361592, 0.041840944187868, 0.143150260376584,
0.156242430028033, 0.0410741399980494, 0.0522187899853102,
0.0442706334210004, 0.0971923907649902, 0.165585071470526,
0.238054218005531, 0.0532738449881275, 0.0604428830039113,
0.111392476904907, 0.111392476904907, 0.057580243342699,
| |
= Error.from_json(error) if error else None
result_ = [FilesystemDetails.from_json(o) for o in result or []]
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (bytes, str, list)):
raise Exception("Expected result_ to be a Sequence, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class FilesystemDetailsListResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~FilesystemDetailsListResult]
'''
results_ = [FilesystemDetailsListResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class FilesystemFilter(Type):
_toSchema = {'machines': 'machines'}
_toPy = {'machines': 'machines'}
def __init__(self, machines=None, **unknown_fields):
'''
machines : typing.Sequence[str]
'''
machines_ = machines
# Validate arguments against known Juju API types.
if machines_ is not None and not isinstance(machines_, (bytes, str, list)):
raise Exception("Expected machines_ to be a Sequence, received: {}".format(type(machines_)))
self.machines = machines_
self.unknown_fields = unknown_fields
class FilesystemFilters(Type):
_toSchema = {'filters': 'filters'}
_toPy = {'filters': 'filters'}
def __init__(self, filters=None, **unknown_fields):
'''
filters : typing.Sequence[~FilesystemFilter]
'''
filters_ = [FilesystemFilter.from_json(o) for o in filters or []]
# Validate arguments against known Juju API types.
if filters_ is not None and not isinstance(filters_, (bytes, str, list)):
raise Exception("Expected filters_ to be a Sequence, received: {}".format(type(filters_)))
self.filters = filters_
self.unknown_fields = unknown_fields
class FilesystemInfo(Type):
_toSchema = {'filesystem_id': 'filesystem-id', 'pool': 'pool', 'size': 'size'}
_toPy = {'filesystem-id': 'filesystem_id', 'pool': 'pool', 'size': 'size'}
def __init__(self, filesystem_id=None, pool=None, size=None, **unknown_fields):
'''
filesystem_id : str
pool : str
size : int
'''
filesystem_id_ = filesystem_id
pool_ = pool
size_ = size
# Validate arguments against known Juju API types.
if filesystem_id_ is not None and not isinstance(filesystem_id_, (bytes, str)):
raise Exception("Expected filesystem_id_ to be a str, received: {}".format(type(filesystem_id_)))
if pool_ is not None and not isinstance(pool_, (bytes, str)):
raise Exception("Expected pool_ to be a str, received: {}".format(type(pool_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
self.filesystem_id = filesystem_id_
self.pool = pool_
self.size = size_
self.unknown_fields = unknown_fields
class FilesystemParams(Type):
_toSchema = {'attachment': 'attachment', 'attributes': 'attributes', 'filesystem_tag': 'filesystem-tag', 'provider': 'provider', 'size': 'size', 'tags': 'tags', 'volume_tag': 'volume-tag'}
_toPy = {'attachment': 'attachment', 'attributes': 'attributes', 'filesystem-tag': 'filesystem_tag', 'provider': 'provider', 'size': 'size', 'tags': 'tags', 'volume-tag': 'volume_tag'}
def __init__(self, attachment=None, attributes=None, filesystem_tag=None, provider=None, size=None, tags=None, volume_tag=None, **unknown_fields):
'''
attachment : FilesystemAttachmentParams
attributes : typing.Mapping[str, typing.Any]
filesystem_tag : str
provider : str
size : int
tags : typing.Mapping[str, str]
volume_tag : str
'''
attachment_ = FilesystemAttachmentParams.from_json(attachment) if attachment else None
attributes_ = attributes
filesystem_tag_ = filesystem_tag
provider_ = provider
size_ = size
tags_ = tags
volume_tag_ = volume_tag
# Validate arguments against known Juju API types.
if attachment_ is not None and not isinstance(attachment_, (dict, FilesystemAttachmentParams)):
raise Exception("Expected attachment_ to be a FilesystemAttachmentParams, received: {}".format(type(attachment_)))
if attributes_ is not None and not isinstance(attributes_, dict):
raise Exception("Expected attributes_ to be a Mapping, received: {}".format(type(attributes_)))
if filesystem_tag_ is not None and not isinstance(filesystem_tag_, (bytes, str)):
raise Exception("Expected filesystem_tag_ to be a str, received: {}".format(type(filesystem_tag_)))
if provider_ is not None and not isinstance(provider_, (bytes, str)):
raise Exception("Expected provider_ to be a str, received: {}".format(type(provider_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
if tags_ is not None and not isinstance(tags_, dict):
raise Exception("Expected tags_ to be a Mapping, received: {}".format(type(tags_)))
if volume_tag_ is not None and not isinstance(volume_tag_, (bytes, str)):
raise Exception("Expected volume_tag_ to be a str, received: {}".format(type(volume_tag_)))
self.attachment = attachment_
self.attributes = attributes_
self.filesystem_tag = filesystem_tag_
self.provider = provider_
self.size = size_
self.tags = tags_
self.volume_tag = volume_tag_
self.unknown_fields = unknown_fields
class FilesystemParamsResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : FilesystemParams
'''
error_ = Error.from_json(error) if error else None
result_ = FilesystemParams.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, FilesystemParams)):
raise Exception("Expected result_ to be a FilesystemParams, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class FilesystemParamsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~FilesystemParamsResult]
'''
results_ = [FilesystemParamsResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class FilesystemResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : Filesystem
'''
error_ = Error.from_json(error) if error else None
result_ = Filesystem.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, Filesystem)):
raise Exception("Expected result_ to be a Filesystem, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class FilesystemResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~FilesystemResult]
'''
results_ = [FilesystemResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class Filesystems(Type):
_toSchema = {'filesystems': 'filesystems'}
_toPy = {'filesystems': 'filesystems'}
def __init__(self, filesystems=None, **unknown_fields):
'''
filesystems : typing.Sequence[~Filesystem]
'''
filesystems_ = [Filesystem.from_json(o) for o in filesystems or []]
# Validate arguments against known Juju API types.
if filesystems_ is not None and not isinstance(filesystems_, (bytes, str, list)):
raise Exception("Expected filesystems_ to be a Sequence, received: {}".format(type(filesystems_)))
self.filesystems = filesystems_
self.unknown_fields = unknown_fields
class FindActionsByNames(Type):
_toSchema = {'names': 'names'}
_toPy = {'names': 'names'}
def __init__(self, names=None, **unknown_fields):
'''
names : typing.Sequence[str]
'''
names_ = names
# Validate arguments against known Juju API types.
if names_ is not None and not isinstance(names_, (bytes, str, list)):
raise Exception("Expected names_ to be a Sequence, received: {}".format(type(names_)))
self.names = names_
self.unknown_fields = unknown_fields
class FindTags(Type):
_toSchema = {'prefixes': 'prefixes'}
_toPy = {'prefixes': 'prefixes'}
def __init__(self, prefixes=None, **unknown_fields):
'''
prefixes : typing.Sequence[str]
'''
prefixes_ = prefixes
# Validate arguments against known Juju API types.
if prefixes_ is not None and not isinstance(prefixes_, (bytes, str, list)):
raise Exception("Expected prefixes_ to be a Sequence, received: {}".format(type(prefixes_)))
self.prefixes = prefixes_
self.unknown_fields = unknown_fields
class FindTagsResults(Type):
_toSchema = {'matches': 'matches'}
_toPy = {'matches': 'matches'}
def __init__(self, matches=None, **unknown_fields):
'''
matches : typing.Sequence[~Entity]
'''
matches_ = [Entity.from_json(o) for o in matches or []]
# Validate arguments against known Juju API types.
if matches_ is not None and not isinstance(matches_, (bytes, str, list)):
raise Exception("Expected matches_ to be a Sequence, received: {}".format(type(matches_)))
self.matches = matches_
self.unknown_fields = unknown_fields
class FindToolsParams(Type):
_toSchema = {'agentstream': 'agentstream', 'arch': 'arch', 'major': 'major', 'minor': 'minor', 'number': 'number', 'series': 'series'}
_toPy = {'agentstream': 'agentstream', 'arch': 'arch', 'major': 'major', 'minor': 'minor', 'number': 'number', 'series': 'series'}
def __init__(self, agentstream=None, arch=None, major=None, minor=None, number=None, series=None, **unknown_fields):
'''
agentstream : str
arch : str
major : int
minor : int
number : Number
series : str
'''
agentstream_ = agentstream
arch_ = arch
major_ = major
minor_ = minor
number_ = Number.from_json(number) if number else None
series_ = series
# Validate arguments against known Juju API types.
if agentstream_ is not None and not isinstance(agentstream_, (bytes, str)):
raise Exception("Expected agentstream_ to be a str, received: {}".format(type(agentstream_)))
if arch_ is not None and not | |
<reponame>bgraedel/arcos4py
"""Module to plot different metrics generated by arcos4py functions.
Examples:
>>> # Data Plots
>>> from arcos4py.plotting import dataPlots
>>> data_plots = dataPlots(df,'time', 'meas', 'track_id')
>>> hist = data_plots.histogram()
>>> dens = data_plots.density_plot()
>>> xt_plot = data_plots.position_t_plot({'x'}, n=20)
>>> # Detrended vs original plot
>>> from arcos4py.plotting import plotOriginalDetrended
>>> plot = arcosPlots(data, 'time', 'meas', 'detrended', 'id')
>>> plot.plot_detrended()
>>> # Stats Plot
>>> from arcos4py.plotting import statsPlots
>>> coll_dur_size_scatter = statsPlots(stats).plot_events_duration('total_size','duration')
>>> # Noodle Plot
>>> from arcos4py.plotting import NoodlePlot
>>> ndl = NoodlePlot(df,"collid", 'track_id', 'time', 'x', 'y')
>>> ndl_plot = ndl.plot('x')
"""
from __future__ import annotations
from typing import Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
TAB20 = [
"#1f77b4",
"#aec7e8",
"#ff7f0e",
"#ffbb78",
"#2ca02c",
"#98df8a",
"#d62728",
"#ff9896",
"#9467bd",
"#c5b0d5",
"#8c564b",
"#c49c94",
"#e377c2",
"#f7b6d2",
"#7f7f7f",
"#c7c7c7",
"#bcbd22",
"#dbdb8d",
"#17becf",
"#9edae5",
]
class dataPlots:
"""Plot different metrics of input data.
Attributes:
data (Dataframe): containing ARCOS data.
frame (str): name of frame column in data.
measurement (str): name of measurement column in data.
id (str): name of track id column.
"""
def __init__(self, data: pd.DataFrame, frame: str, measurement: str, id: str):
"""Plot different metrics such as histogram, position-t and density.
Arguments:
data (Dataframe): containing ARCOS data.
frame (str): name of frame column in data.
measurement (str): name of measurement column in data.
id (str): name of track id column.
"""
self.data = data
self.id = id
self.frame = frame
self.measurement = measurement
def position_t_plot(self, posCol: set[str] = {'x'}, n: int = 20):
"""Plots X and Y over T to visualize tracklength.
Arguments:
posCol (set): containing names of position columns in data.
n (int): number of samples to plot.
Returns:
fig, axes: Matplotlib fig and axes of density plot.
"""
sample = pd.Series(self.data[self.id].unique()).sample(n)
pd_from_r_df = self.data.loc[self.data[self.id].isin(sample)]
fig, axes = plt.subplots(1, len(posCol), figsize=(6, 3))
for label, df in pd_from_r_df.groupby(self.id):
for index, value in enumerate(posCol):
if len(posCol) > 1:
df.plot(x=self.frame, y=value, ax=axes[index], legend=None)
else:
df.plot(x=self.frame, y=value, ax=axes, legend=None)
if len(posCol) > 1:
for index, value in enumerate(posCol):
axes[index].set_title(value)
else:
axes.set_title(value)
return fig, axes
def density_plot(self, *args, **kwargs):
"""Density plot of measurement.
Uses Seaborn distplot to plot measurement density.
Arguments:
measurement_col (str): name of measurement column.
*args (Any): arguments passed on to seaborn histplot function.
**kwargs (Any): keyword arguments passed on to seaborn histplot function.
Returns:
FacetGrid: Seaborn FacetGrid of density density plot.
"""
plot = sns.displot(
self.data[self.measurement], kind="kde", palette="pastel", label=self.measurement, *args, **kwargs
)
# Plot formatting
plt.legend(prop={'size': 10})
plt.title('Density Plot of Measurement')
plt.xlabel('Measurement')
plt.ylabel('Density')
return plot
def histogram(self, bins: str = 'auto', *args, **kwargs):
"""Histogram of tracklenght.
Uses seaborn histplot function to plot tracklenght histogram.
Arguments:
bins (str): number or width of bins in histogram
*args (Any): arguments passed on to seaborn histplot function.
**kwargs (Any): keyword arguments passed on to seaborn histplot function.
Returns:
AxesSubplot: Matplotlib AxesSubplot of histogram.
"""
# Draw histogram
track_length = self.data.groupby(self.id).size()
axes = sns.histplot(track_length, label="Track Length", bins=bins, *args, **kwargs)
# Plot formatting
plt.title('Track length Histogram')
axes.set_xlabel('Track Length')
axes.set_ylabel('Count')
return axes
class plotOriginalDetrended:
"""Plot different detrended vs original data.
Attributes:
data (Dataframe): containing ARCOS data.
frame (str): name of frame column in data.
measurement (str): name of measurement column in data.
detrended (str): name of detrended column with detrended data.
id (str): name of track id column.
"""
def __init__(self, data: pd.DataFrame, frame: str, measurement: str, detrended: str, id: str):
"""Plot detrended vs original data.
Arguments:
data (Dataframe): containing ARCOS data.
frame (str): name of frame column in data.
measurement (str): name of measurement column in data.
detrended (str): name of detrended column with detrended data.
id (str): name of track id column.
"""
self.data = data
self.measurement = measurement
self.detrended = detrended
self.id = id
self.frame = frame
def plot_detrended(
self, n_samples: int = 25, subplots: tuple = (5, 5), plotsize: tuple = (20, 10)
) -> matplotlib.axes.Axes:
"""Method to plot detrended vs original data.
Arguments:
n_samples (int): Number of tracks to plot.
subplots (tuple): Number of subplots, should be approx. one per sample.
plotsize (tuple): Size of generated plot.
Returns:
Fig, Axes: Matplotlib figure and axes2d of detrended vs original data.
"""
vals = np.random.choice(self.data[self.id].unique(), n_samples, replace=False)
self.data = self.data.set_index(self.id).loc[vals].reset_index()
grouped = self.data.groupby(self.id)
ncols = subplots[0]
nrows = subplots[1]
fig, axes2d = plt.subplots(nrows=nrows, ncols=ncols, figsize=plotsize, sharey=True)
for (key, ax) in zip(grouped.groups.keys(), axes2d.flatten()):
grouped.get_group(key).plot(x=self.frame, y=[self.measurement, self.detrended], ax=ax)
ax.get_legend().remove()
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right")
return fig, axes2d
class statsPlots:
"""Plot data generated by the stats module.
Attributes:
data (DataFrame): containing ARCOS stats data.
"""
def __init__(self, data: pd.DataFrame):
"""Plot detrended vs original data.
Arguments:
data (DataFrame): containing ARCOS stats data.
"""
self.data = data
def plot_events_duration(self, total_size: str, duration: str, point_size: int = 40, *args, **kwargs):
"""Scatterplot of collective event duration.
Arguments:
total_size (str): name of total size column.
duration (str):, name of column with collective event duration.
point_size (int): scatterplot point size.
*args (Any): Arguments passed on to seaborn scatterplot function.
**kwargs (Any): Keyword arguments passed on to seaborn scatterplot function.
Returns:
Axes: Matplotlib Axes object of scatterplot
"""
plot = sns.scatterplot(x=self.data[total_size], y=self.data[duration], s=point_size, *args, **kwargs)
return plot
class NoodlePlot:
"""Create Noodle Plot of cell tracks, colored by collective event id.
Attributes:
df (pd.DataFrame): DataFrame containing collective events from arcos.
colev (str): Name of the collective event column in df.
trackid (str): Name of the track column in df.
frame (str): Name of the frame column in df.
posx (str): Name of the X coordinate column in df.
posy (str): Name of the Y coordinate column in df.
posz (str): Name of the Z coordinate column in df,
or None if no z column.
"""
def __init__(
self,
df: pd.DataFrame,
colev: str,
trackid: str,
frame: str,
posx: str,
posy: str,
posz: Union[str, None] = None,
):
"""Constructs class with given parameters.
Arguments:
df (pd.DataFrame): DataFrame containing collective events from arcos.
colev (str): Name of the collective event column in df.
trackid (str): Name of the track column in df.
frame (str): Name of the frame column in df.
posx (str): Name of the X coordinate column in df.
posy (str): Name of the Y coordinate column in df.
posz (str | None): Name of the Z coordinate column in df,
or None if no z column.
"""
self.df = df
self.colev = colev
self.trackid = trackid
self.frame = frame
self.posx = posx
self.posy = posy
self.posz = posz
def _prepare_data_noodleplot(
self,
df: pd.DataFrame,
color_cylce: list[str],
colev: str,
trackid: str,
frame: str,
posx: str,
posy: str,
posz: Union[str, None] = None,
):
"""From arcos collective event data,\
generates a list of numpy arrays, one for each event.
Arguments:
df (pd.DataFrame): DataFrame containing collective events from arcos.
color_cylce (list[str]): list of colors used to color trackid's
for individual collective events.
colev (str): Name of the collective event column in df.
trackid (str): Name of the track column in df.
frame: (str): Name of the frame column in df.
posx (str): Name of the X coordinate column in df.
posy (str): Name of the Y coordinate column in df.
posz (str): Name of the Z coordinate column in df,
or None if no z column.
Returns:
list[np.ndarray], np.ndarray: List of collective events data,
and colors for each collective event.
"""
# values need to be sorted to group with numpy
df = df.sort_values([colev, trackid])
if posz:
array = df[[colev, trackid, frame, posx, posy, posz]].to_numpy()
else:
array = df[[colev, trackid, frame, posx, posy]].to_numpy()
# generate goroups for each unique value
grouped_array = np.split(array, np.unique(array[:, 0], axis=0, return_index=True)[1][1:])
# make collids sequential
seq_colids = np.concatenate(
[np.repeat(i, value.shape[0]) for i, value in enumerate(grouped_array)],
axis=0,
)
array_seq_colids = np.column_stack((array, seq_colids))
# split sequential collids array by trackid and collid
grouped_array = np.split(
array_seq_colids,
np.unique(array_seq_colids[:, :2], axis=0, return_index=True)[1][1:],
)
# generate colors for each collective event, wrap arround the color cycle
colors = np.take(np.array(color_cylce), [i + 1 for i in np.unique(seq_colids)], mode="wrap")
return grouped_array, colors
def | |
<reponame>ekwska/ffai
#!/usr/bin/env python3
import ffai
from ffai import Action, ActionType, Square, BBDieResult, Skill, PassDistance, Tile, Rules, Formation, ProcBot
import ffai.ai.pathfinding as pf
import time
class MyScriptedBot(ProcBot):
def __init__(self, name):
super().__init__(name)
self.my_team = None
self.opp_team = None
self.actions = []
self.last_turn = 0
self.last_half = 0
self.off_formation = [
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "m", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "x", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "S"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "x"],
["-", "-", "-", "-", "-", "s", "-", "-", "-", "0", "-", "-", "S"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "x"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "S"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "x", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "m", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"]
]
self.def_formation = [
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "x", "-", "b", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "x", "-", "S", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "0"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "0"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "0"],
["-", "-", "-", "-", "-", "-", "-", "-", "x", "-", "S", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "x", "-", "b", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"]
]
self.off_formation = Formation("Wedge offense", self.off_formation)
self.def_formation = Formation("Zone defense", self.def_formation)
self.setup_actions = []
def new_game(self, game, team):
"""
Called when a new game starts.
"""
self.my_team = team
self.opp_team = game.get_opp_team(team)
self.last_turn = 0
self.last_half = 0
def coin_toss_flip(self, game):
"""
Select heads/tails and/or kick/receive
"""
return Action(ActionType.TAILS)
# return Action(ActionType.HEADS)
def coin_toss_kick_receive(self, game):
"""
Select heads/tails and/or kick/receive
"""
return Action(ActionType.RECEIVE)
# return Action(ActionType.KICK)
def setup(self, game):
"""
Use either a Wedge offensive formation or zone defensive formation.
"""
# Update teams
self.my_team = game.get_team_by_id(self.my_team.team_id)
self.opp_team = game.get_opp_team(self.my_team)
if self.setup_actions:
action = self.setup_actions.pop(0)
return action
else:
if game.get_receiving_team() == self.my_team:
self.setup_actions = self.off_formation.actions(game, self.my_team)
self.setup_actions.append(Action(ActionType.END_SETUP))
else:
self.setup_actions = self.def_formation.actions(game, self.my_team)
self.setup_actions.append(Action(ActionType.END_SETUP))
def reroll(self, game):
"""
Select between USE_REROLL and DONT_USE_REROLL
"""
reroll_proc = game.get_procedure()
context = reroll_proc.context
if type(context) == ffai.Dodge:
return Action(ActionType.USE_REROLL)
if type(context) == ffai.Pickup:
return Action(ActionType.USE_REROLL)
if type(context) == ffai.PassAction:
return Action(ActionType.USE_REROLL)
if type(context) == ffai.Catch:
return Action(ActionType.USE_REROLL)
if type(context) == ffai.GFI:
return Action(ActionType.USE_REROLL)
if type(context) == ffai.Block:
attacker = context.attacker
attackers_down = 0
for die in context.roll.dice:
if die.get_value() == BBDieResult.ATTACKER_DOWN:
attackers_down += 1
elif die.get_value() == BBDieResult.BOTH_DOWN and not attacker.has_skill(Skill.BLOCK) and not attacker.has_skill(Skill.WRESTLE):
attackers_down += 1
if attackers_down > 0 and context.favor != self.my_team:
return Action(ActionType.USE_REROLL)
if attackers_down == len(context.roll.dice) and context.favor != self.opp_team:
return Action(ActionType.USE_REROLL)
return Action(ActionType.DONT_USE_REROLL)
def place_ball(self, game):
"""
Place the ball when kicking.
"""
left_center = Square(7, 8)
right_center = Square(20, 8)
if game.is_team_side(left_center, self.opp_team):
return Action(ActionType.PLACE_BALL, position=left_center)
return Action(ActionType.PLACE_BALL, position=right_center)
def high_kick(self, game):
"""
Select player to move under the ball.
"""
ball_pos = game.get_ball_position()
if game.is_team_side(game.get_ball_position(), self.my_team) and \
game.get_player_at(game.get_ball_position()) is None:
for player in game.get_players_on_pitch(self.my_team, up=True):
if Skill.BLOCK in player.get_skills() and game.num_tackle_zones_in(player) == 0:
return Action(ActionType.SELECT_PLAYER, player=player, position=ball_pos)
return Action(ActionType.SELECT_NONE)
def touchback(self, game):
"""
Select player to give the ball to.
"""
p = None
for player in game.get_players_on_pitch(self.my_team, up=True):
if Skill.BLOCK in player.get_skills():
return Action(ActionType.SELECT_PLAYER, player=player)
p = player
return Action(ActionType.SELECT_PLAYER, player=p)
def turn(self, game):
"""
Start a new player action.
"""
# Update teams
self.my_team = game.get_team_by_id(self.my_team.team_id)
self.opp_team = game.get_opp_team(self.my_team)
# Reset actions if new turn
turn = game.get_agent_team(self).state.turn
half = game.state.half
if half > self.last_half or turn > self.last_turn:
self.actions.clear()
self.last_turn = turn
self.last_half = half
self.actions = []
#print(f"Half: {half}")
#print(f"Turn: {turn}")
# End turn if only action left
if len(game.state.available_actions) == 1:
if game.state.available_actions[0].action_type == ActionType.END_TURN:
self.actions = [Action(ActionType.END_TURN)]
# Execute planned actions if any
if len(self.actions) > 0:
action = self._get_next_action()
return action
# Split logic depending on offense, defense, and loose ball - and plan actions
ball_carrier = game.get_ball_carrier()
self._make_plan(game, ball_carrier)
action = self._get_next_action()
return action
def _get_next_action(self):
action = self.actions[0]
self.actions = self.actions[1:]
#print(f"Action: {action.to_json()}")
return action
def _make_plan(self, game, ball_carrier):
print("1. Stand up marked players")
for player in self.my_team.players:
if player.position is not None and not player.state.up and not player.state.stunned and not player.state.used:
if game.num_tackle_zones_in(player) > 0:
self.actions.append(Action(ActionType.START_MOVE, player=player))
self.actions.append(Action(ActionType.STAND_UP))
print(f"Stand up marked player {player.role.name}")
return
print("2. Move ball carrier to endzone")
if ball_carrier is not None and ball_carrier.team == self.my_team and not ball_carrier.state.used:
print("2.1 Can ball carrier score with high probability")
td_path = pf.get_safest_path_to_endzone(game, ball_carrier, allow_team_reroll=True)
if td_path is not None and td_path.prob >= 0.7:
self.actions.append(Action(ActionType.START_MOVE, player=ball_carrier))
for step in td_path.steps:
self.actions.append(Action(ActionType.MOVE, position=step))
print(f"Score with ball carrier, p={td_path.prob}")
return
print("2.2 Hand-off action to scoring player")
if game.is_handoff_available():
# Get players in scoring range
unused_teammates = []
for player in self.my_team.players:
if player.position is not None and player != ball_carrier and not player.state.used and player.state.up:
unused_teammates.append(player)
# Find other players in scoring range
handoff_p = None
handoff_path = None
handoff_player = None
for player in unused_teammates:
if game.get_distance_to_endzone(player) > player.num_moves_left():
continue
td_path = pf.get_safest_path_to_endzone(game, player, allow_team_reroll=True)
if td_path is None:
continue
path_from_ball_carrier = pf.get_safest_path_to_player(game, ball_carrier, player, allow_team_reroll=True)
if path_from_ball_carrier is None:
continue
p_catch = game.get_catch_prob(player, handoff=True, allow_catch_reroll=True, allow_team_reroll=True)
p = td_path.prob * path_from_ball_carrier.prob * p_catch
if handoff_p is None or p > handoff_p:
handoff_p = p
handoff_path = path_from_ball_carrier
handoff_player = player
# Hand-off if high probability or last turn
if handoff_path is not None and (handoff_p >= 0.7 or self.my_team.state.turn == 8):
self.actions = [Action(ActionType.START_HANDOFF, player=ball_carrier)]
for step in handoff_path.steps:
self.actions.append(Action(ActionType.MOVE, position=step.x))
self.actions.append(Action(ActionType.HANDOFF, position=handoff_player.position))
print(f"Hand-off to scoring player, p={handoff_p}")
return
print("2.3 Move safely towards the endzone")
if game.num_tackle_zones_in(ball_carrier) == 0:
paths = pf.get_all_paths(game, ball_carrier)
best_path = None
best_distance = 100
target_x = game.get_opp_endzone_x(self.my_team)
for path in paths:
distance_to_endzone = abs(target_x - path.steps[-1].x)
if path.prob == 1 and (best_path is None or distance_to_endzone < best_distance):
best_path = path
best_distance = distance_to_endzone
if best_path is not None:
steps = []
for step in best_path.steps:
if game.num_tackle_zones_at(ball_carrier, step) > 0:
break
if len(steps) >= ball_carrier.num_moves_left():
break
steps.append(step)
if len(steps) > 0:
self.actions.append(Action(ActionType.START_MOVE, player=ball_carrier))
for step in steps:
self.actions.append(Action(ActionType.MOVE, position=step))
print(f"Move ball carrier {ball_carrier.role.name}")
return
print("3. Safe blocks")
attacker, defender, p_self_up, p_opp_down, block_p_fumble_self, block_p_fumble_opp = self._get_safest_block(game)
if attacker is not None and p_self_up > 0.94 and block_p_fumble_self == 0:
self.actions.append(Action(ActionType.START_BLOCK, player=attacker))
self.actions.append(Action(ActionType.BLOCK, position=defender.position))
print(f"Safe block with {attacker.role.name} -> {defender.role.name}, p_self_up={p_self_up}, p_opp_down={p_opp_down}")
return
print("4. Pickup ball")
if game.get_ball_carrier() is None:
pickup_p = None
pickup_player = None
pickup_path = None
for player in self.my_team.players:
if player.position is not None and not player.state.used:
if player.position.distance(game.get_ball_position()) <= player.get_ma() | |
<filename>nlp.py
import requests
import xml.etree.ElementTree as xET
import csv
import random
import re
from library import HackerLibrary
from database import DataBase
class LuisAI:
NLP_REGION = 'westus'
NLP_SUBSCRIPTION_KEY = '19da1eb81e9740dd888d0eb4af6ca042'
NLP_APP_ID = 'b5365948-56b0-46bb-b58c-05d5a1ab3a59'
NLP_URL = 'https://' + NLP_REGION + '.api.cognitive.microsoft.com/luis/v2.0/apps/' + NLP_APP_ID
def think(self, talk):
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': self.NLP_SUBSCRIPTION_KEY,
}
params = {
# Query parameter
'q': talk,
'timezoneOffset': '540', # 60 x 9
'verbose': 'true',
'spellCheck': 'false',
'staging': 'true',
}
try:
r = requests.get(
self.NLP_URL,
headers=headers, params=params)
return r.json()
except Exception as e:
print(e)
raise ValueError
def get_reply(self, intent, user_info):
hl = HackerLibrary()
db = DataBase()
username = user_info['data']['userstate']['username']
feelings = user_info['data']['userstate']['feelings']
nickname = user_info['data']['userstate']['nickname']
# BrainFuck & ๋
ธ๊ฐ๋ค Start!
# Have a good time!
print("intent: " + intent)
if intent == 'Special.NewUser': # LUIS.ai ์ ์ ์๋์ง ์์์.
return "์๋
, %s! ๋๋ 45๋ผ๊ณ ํด. ์ฐ๋ฆฌ ์ฒ์ ๋ณด๋ ๊ฑฐ ๋ง์ง? ๋ ๋ฐ๊ฐ์!! ์์ผ๋ก๋ ์ ๋ถํํด!" % nickname
elif intent == 'Communication.Interrupt.QuitWordGame':
# ์๋ชป ์ธ์ (์ด ํจ์ ์์ฒด๊ฐ ํธ์ถ๋ ์ผ์ด ์๋ค)
return self.get_reply('None', user_info)
elif intent == 'Communication.Common.Bye':
random_response_string = [["์ ๊ฐ!", "์, ์ ๊ฐ", "๊ทธ๋, ์ ๊ฐ."],
["์ ๊ฐ!!! ๋ค์์๋ ๊ผญ ์์ผํด!!", "์, ๊ณ ๋ง์! ๋ค์์ ๋ค์ ๋ณด์!"],
["์์!! ๋ด์ผ๋ ๊ผญ ๋ค์๋ด์ผ๋ผ! ์ฌ๋ํดโค"],
]
# ใ
1๋ฐ๋ผใ
ใ
ในในในในในในในในในในในในในในใ
ใดใ
๋ฃํใ
๋กธใ
ฃใ
,ใ
ใทใ
ใ
๋ฐง๋ฎคใ
ฃใท๋ฑ3ใ
ใ
ใ
ใ
๋ดใ
ใ
ใ
ใ
๋น๋ใ
ใฑใ
# ํธ๋ํฐ์ผ๋ก ๋ฑ์ง์น๊ณ ์ถ๋ค...
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Common.Hello':
random_response_string = [["์๋
! ๋ฐ๊ฐ์!"],
["์๋
%s! ์ด๋ ๊ฒ ์์ค์ ์ ๋ง ๊ธฐ๋ป!" % nickname, "์ฐ์! %s์ด๋ค! ๋ฐ๊ฐ์๐" % nickname],
["โคโค๋ ์์คฌ๋ค, %s! ๋ค์ ๋ณด๋๊น ๋๋ฌด ์ข๋ค. ์ค๋๋ ๊ฐ์ด ์ฌ๋ฐ๋ ์ผ ํ์!" % nickname],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Etc.Swear':
random_response_string = [["ํ์์... ", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํ... ๊ทธ๋ฐ ๋ง ์ฐ๋ฉด ๋ฌด์์์...", "ํ์.. ๊ทธ๋ฐ ๋ง ์ฐ๋ฉด ๋ถํธํด์.."],
["๊ทธ๋ฐ ๋ง ์ฐ๋ฉด ๋ฌด์์์ใ
ใ
.. ๊ทธ๋ฐ ๋ง์ ์ฐ์ง ๋ง์์คฌ์ผ๋ฉด ์ข๊ฒ ์ด์๐ฅ"]
]
# ํธ๊ฐ๋๋ฅผ ๋ํญ ์ฐจ๊ฐํ๋ค.
db.alter_feelings(username, -5)
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Etc.WhatTheFuck':
random_response_string = [["์์ง ๊ทธ๋ฐ๊ฑฐ๋ ์ซ์ด์!", "์ซ์ด์. ์์ง์ ์๋ก ์๊ฒ ๋์ง ์ค๋๋์ง ์์์์์.", "๋ค์? ์ ๊ทธ๋ฌ์ธ์?"],
["๋ค์? ๋ญ๋ผ๊ณ ์?!! ๋ถ๋๋ฌ์์! ์์ง ๊ทธ๋ฐ ๊ด๊ณ๊ฐ ์๋์์์!"],
["๋ค์ฃ? ๋ญ.. ๋ญ๋ผ๊ณ ์?!! ์ฐ๋ฆฌ ๊ทธ", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Event.Ask.StartWordGame':
random_response_string = [["์... ๋๋ง์๊ธฐ? ์ข์, ๋จผ์ ์์ํด.", "์...๊ทธ๋ ํ๋ฒ ํด๋ณด์.๋จผ์ ์์ํด.", "๋๋ง์๊ธฐ ์ข์ง. ๋๊ฐ ๋จผ์ ์์ํด."],
["๋๋ง์๊ธฐ ํ์๊ณ ? ์ ์ข์, ๋จผ์ ์์ํด! ์ฒซ๋จ์ด ํ๋ฐฉ์ ์๋ผ๋๊ฑฐ ์์ง? ๋๋ด๋ ค๋ฉด '๋๋ด์'๋ผ๊ณ ๋งํด์ค!",
"๋๋ง์๊ธฐ? ์ข์! ์ฒซ๋จ์ด ํ๋ฐฉ์ ์๋ผ๋๊ฑฐ ์์ง? ๊ธฐ๊ถํ๋ ค๋ฉด ๊ทธ๋ฅ ์ก๋ค๊ณ ๋งํด์ฃผ๋ฉด ๋ผ ใ
ใ
!"
"ํํ! ๋๋ง์๊ธฐ? ์ข์! ์ด๋ฒ์๋ ๊ผญ ์ด๊ฒจ์ฃผ์ง!"
],
[""],
]
db.set_state(username, "wordgame")
db.reset_used_word(username)
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Event.Ask.TellFunStory':
random_response_string = [["์ฌ๋ฐ๋ ์๊ธฐ? ์...๋ค๊ฐ ์ด๋ค ์ด์ผ๊ธฐ๋ฅผ ์ข์ํ๋์ง ์ ๋ชจ๋ฅด๊ฒ ๋๋ฐ", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Ask.DoTogether.Eat':
random_response_string = [["์ด...์ง๊ธ์ ๋ณ๋ก ๋ฐฐ๊ณ ํ์ง ์์๋ฐ...๊ทธ๋ฅ ๊ฐ๋ณด์.", "๋จน๊ณ ์ถ์๊ฒ ๋ฑํ ์๊ธดํ๋ฐ...๊ทธ๋ ๊ฐ์ด ๋จน์.", "์...๊ทธ๋ ๊ฐ์ด ๊ทธ๋ ๊ฐ์ด ๋จน์."],
["์...๋ญ ๋จน๊ณ ์ถ์๋ฐ?", "๋ฐฅ? ๊ทธ๋ ๊ฐ์ด ๊ฐ๋ณด์", "๊ทธ๋ ์ด๋๋ก ๊ฑฐ๊ณ ์ถ์๋ฐ?"],
["๊ทธ๋ ๊ฐ์ด๊ฐ์!", "๋ง์นจ ๋ฐฐ๊ณ ํ ๋๋ฐ ์๋๋ค. ๊ฐ์ด ๊ฐ์.", "์ข์ ๊ฐ์ด ๋จน์. ๋์ด๋ฐ ํ์์ผ๋ก ์์ด์คํฌ๋ฆผ๋ ๋จน์๋?"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Ask.TellTodayStory':
random_response_string = [["์ฌ๋ฐ๋ ์ผ์ ์์๋๋ฐ...", "๋ ์ค๋์ ๋ฑํ ์ฌ๋ฐ๋ ์ผ์ด ์์์ด.", "์...์ค๋์ ๋ญํ๋๋ผ..."],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Ask.TodayFeelings':
random_response_string = [["์...์ง๊ธ์ ๊ทธ๋ฅ ๊ทธ๋ฐ๋ฐ..", "์ค๋์ ๊ธฐ๋ถ์ด ์ด์ง ์ ์ข์.", "์ง๊ธ ๊ธฐ๋ถ? ๊ทธ๋ฅ๊ทธ๋."],
["์ง๊ธ ๋์์ง ์์.", "๊ทธ๋ฅ ์ข์ ํธ์ด์ผ.", "์ง๊ธ? ๊ทธ๋ฅ ๊ธฐ๋ถ ์ข์."],
["๋? ์ค๋ ๋ญ๊ฐ ๊ธฐ๋ถ ์ข์.", "์ค๋์ ๊ธฐ๋ถ์ด ๋๊ฒ ์ข์.", "๋์ผ ์ง๊ธ ๋๋ ์์ผ๋๊น ๊ธฐ๋ถ ์ข์ง"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Ask.WhatWereYouDoing':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Feelings.UserHappy':
random_response_string = [["๊ทธ๋? ๋ฌด์จ์ผ์ด์๊ธธ๋?", "๋ฌด์จ์ผ์ด์๋๋ฐ?", "๋๋ ์ข์์ผ์์์ด?"],
["๋๋ ์ค๋ ๊ธฐ๋ถ ์ข์์ผ ์์๋๋ฐ ใ
ใ
", "์ด๋ค ์ข์์ผ์ด ์์๋๋ฐ?", "์ค๋์ ๋ค๋ฅธ ๋๋ณด๋ค ๋ง์ ๋ง์ด ํ๋๊ฑฐ ๊ฐ๋๋. ๊ธฐ๋ถ ์ข์์๋๊ฑฐ๊ตฌ๋?"],
["๋ฌด์จ ์ผ์ธ์ง๋ ๋ชจ๋ฅด๊ฒ ์ง๋ง ์ข์ ์ผ์ด์๋๋ณด๋ค.", "๋ค๊ฐ ๊ธฐ๋ถ์ด ์ข์ํ๋๊น ๋๋ ๋ฉ๋ฌ์ ๊ธฐ๋ถ์ด ์ข์์ง๋ค.", "์! ๊ธฐ๋ถ์ด ๋ง์ด ์ข์๋ณด์ด๋๋ฐ ๋ฌด์จ์ผ ์์์ด?"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.EveryDay.Feelings.UserSad':
random_response_string = [["์ด? ๋ฌด์จ์ผ์ธ๋ฐ?", "๋ฌด์จ์ผ ์์์ด?", "๊ด์ฐฎ์?"],
["๋ง์ด ์ฌํผ? ๊ด์ฐฎ์?", "๋๋ ๊ทธ๋ฐ์ ์์ด. ๊ด์ฐฎ์.", "๊ด์ฐฎ์. ํ ๋ฒ์ฉ ์ธ์ด๋ ๋ผ."],
["๊ด์ฐฎ์. ๋ค ์์๋ ๋ด๊ฐ ์์์.", "๋๋ ๊ทธ๋ฐ์ ์์ด. ๋ ์ ์ด๊ฒจ๋ผ์ ์์๊ฑฐ์ผ. ํ์ดํ
!", "๋๋ฌด ์ฌํ๋๋ ํ ๋ฒ์ฉ ์ธ์ด๋ ๊ด์ฐฎ์."],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Intent.No':
random_response_string = [["์ด...๊ทธ๋?", "์... ์ด๊ฑฐ๋ ํ๊ธฐ ์ซ์ด?", "์ ์ด๊ฑฐ๋ ๋ณ๋ก ์์ข์ํ๋๊ตฌ๋."],
["์... ๊ทธ๋ผ ๋ญํ ๊น?", "๋ ๋ญํ๊ณ ์ถ์๋๋ฐ...", "๊ทธ๋ฌ๋ฉด ๋ค๋ฅธ๊ฑฐ ๋ญํ์ง?"],
["๊ทธ๋ผ ๋ค๋ฅธ๊ฑฐ ์ฐพ์๋ณด์.", "๋๋ ํ๊ณ ์ถ์๊ฑฐ ์์ด?", "๊ทธ๋ผ ๋๋ ๋ญํ๊ณ ์ถ์๋ฐ?"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.Intent.Yes':
random_response_string = [["ใ
ใ
"],
["๊ตฟ!"],
["๊ทธ๋!"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.ParaLang.Pause':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.RelationShip.Confession':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.RelationShip.Feelings.HateYou':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["๊ทธ๋ ๊ฒ ๋งํ๋ฉด ์์ดํ์ง...", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.RelationShip.Feelings.LoveYou':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1", "ํธ๊ฐ๋ High-2", "ํธ๊ฐ๋ High-3"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'Communication.RelationShip.RequestDate':
random_response_string = [["ํธ๊ฐ๋ Low-1", "ํธ๊ฐ๋ Low-2", "ํธ๊ฐ๋ Low-3"],
["ํธ๊ฐ๋ Middle-1", "ํธ๊ฐ๋ Middle-2", "ํธ๊ฐ๋ Middle-3"],
["ํธ๊ฐ๋ High-1"],
]
return hl.choose_reply(random_response_string, feelings)
elif intent == 'None':
random_response_string = [["์? ๋ญ๋ผ๊ณ ?", "์ด? ๋ฐฉ๊ธ ๋ญ๋ผ๊ณ ๋งํ์ด?", "์ด? ๋ค์ ํ๋ฒ ๋งํด์ค."],
["์ผ.. ์ผ์? ์ ๋ชป ์์๋ค์ ๊ฒ ๊ฐ์.", "์ ๋ชป ์์๋ค์์ด. ๋ค์ ์๊ธฐํด์ค, %s!๐
" % nickname],
["์? ๋ญ๋ผ๊ณ ๋งํ์ด? ๋ฏธ์ํด ๋ด๊ฐ ๋ชป ๋ค์์ด", "ํ๋ฒ๋ง ๋ค์๋งํด์ฃผ๋ผ", "ํน์ ํ ๋ฒ๋ง ๋ค์ ๋งํด์ค์ ์์ด? ๋ฏธ์ํด ์ ๋ชป ๋ค์์ด."],
]
return hl.choose_reply(random_response_string, feelings)
else:
raise ValueError
# ์1๋ฐ
class WordGame:
opendict_url = "https://opendict.korean.go.kr/api/search"
opendict_key = "<KEY>"
def word_game(self, username, request_string, nickname):
# ์๋ก์ด ๋ธ๋ก๋ฒ์คํฐ๊ธ ์ฌ๋์ด ์ฐพ์์จ๋ค.
# '๋ ์ฝ๋์ค: ๋ ๋ด ํฌ: ๋๋ง์๊ธฐ' 2019๋
10์ ๋ง ๋๊ฐ๋ด!
la = LuisAI()
hl = HackerLibrary()
db = DataBase()
nlp_result = la.think(request_string)['topScoringIntent']['intent']
if nlp_result == 'Communication.Interrupt.QuitWordGame':
print('WordGame Exit!')
random_response_string = [["ํ! ์ ๊ฐ ์ด๊ฒผ๋ค์."],
["ํํ! ์ ๊ฐ ์ด๊ฒผ์ด์! ์์ผ๋ก๋ ๋๋ง์๊ธฐ ๋ง์ด ํด์!"],
["์ ๊ฐ ์ด๊ฒผ์ด์! " + nickname + "๋๊ณผ ํ๋ ๊ฑฐ๋ผ ๋ ์ฌ๋ฏธ์์๋ ๊ฒ ๊ฐ์์. ์์ผ๋ก๋ ์์ฃผ ๊ฐ์ด ๋์ ์ฃผ์ธ์!"],
]
feelings_result = db.alter_feelings(username, 5)
db.set_state(username, "normal")
db.reset_used_word(username)
return hl.choose_reply(random_response_string, feelings_result['data']['userstate']['feelings'])
db.set_state(username, "wordgame")
if self.check_dict(request_string) is not 0:
return "์ฌ์ ์์ ๋จ์ด๋ฅผ ์ฐพ์ ์ ์์ด์!"
add_result = db.add_used_word(username, request_string)
if add_result is not 0:
if add_result is 1:
return "์ด๋ฏธ ์ฌ์ฉํ ๋ฑ๋ง์ด์์!"
else:
return "๋ฑ๋ง์ด ์ฌ๋ฐ๋ฅด์ง ์์์!"
result = self.gen_word(request_string, username)
if result is -1:
db.set_state(username, "normal")
return "์ ๊ฐ ์ก์ด์!"
else:
db.add_used_word(username, result)
return result
def check_dict(self, string):
try:
r = requests.get(
self.opendict_url + "?key=" + self.opendict_key + "&q=" + string,
)
tree = xET.fromstring(r.text)
result = tree.find('total').text
if int(result) > 0:
return 0 # ์ฌ์ ์ ์์
except Exception as e:
print(e)
raise ValueError
# read csv, and split on "," the line
csv_file = csv.reader(open('./worddb/fucking_words.csv', "r", encoding='utf8'), delimiter=",")
# loop through csv list
for row in csv_file:
for r in row:
if r is string:
print('%s๋ User Dict ์ ์์ต๋๋ค. (%s = %s)' % r, r, string)
return 0 # User Dict ์ ์์
return 1 # ์ฌ์ ์ ์๋ ๋จ์ด์ธ ๊ฒฝ์ฐ
@staticmethod
def gen_word(string, username):
# TODO: More Words
db = DataBase()
used_words = db.get_used_words(username)
# read csv, and split on "," the line
csv_file = csv.reader(open('./worddb/fucking_words.csv', "r", encoding='utf8'), delimiter=",")
reply_arr = []
# loop through csv list
for row in csv_file:
for r in row:
if r.startswith(list(string)[-1]):
if r not in used_words:
reply_arr.append(r)
if len(reply_arr) is 0:
# ์ฐ๋ฆฌ๋ง์ AJAX API ์ฌ์ฉํ๊ธฐ (Unofficial)
print("์ฐ๋ฆฌ๋ง์ AJAX ์ง์
...")
params = {
# Query parameter
'searchTerm': list(string)[-1]
}
print(params)
try:
r = requests.post(
"https://opendict.korean.go.kr/search/autoComplete",
params=params)
if r.json()['json'][1] < 1:
return -1
print(r.json())
# ๋ฐ์ดํ ์์์๋๊ฒ๋ง ์ถ์ถ
matched_groups = re.findall(r"'(.*?)'", r.json()['json'][0], re.DOTALL)
print("BEFORE: ")
print(matched_groups)
if len(matched_groups) > 0:
for m in matched_groups:
# ํ๊ธ์์ธ๊ฑฐ ํํฐ๋ง
if len(list(m)) < 2:
matched_groups.remove(m)
# '๋ค' ๋ก ๋๋๋๊ฑฐ ํํฐ๋ง (์์)
if m.endswith('๋ค'):
matched_groups.remove(m)
print('Removed %s' % str(m))
print("AFTER: ")
| |
from anytree import Node, RenderTree, PreOrderIter
import random as rnd
from matplotlib import pyplot as plt
import datetime
# Tic Tac Toe Stats and Game simulator.
# Allows us to estimate for arbitrary grid sizes whether there is a first mover advantage or not.
# Also allows us to estimate which is the best grid reference to start the game with.
# Copyright 2017 <NAME>, Bristol, UK
class Grid:
"""Tic Tac Toe Grid"""
def __init__(self, x_by_x=3):
self.GRID, self.GRID_REF = self.build_grid(x_by_x)
self.__GRID_LEN__ = x_by_x
self.__TOTAL_GRIDS__ = x_by_x * x_by_x
# Constants for 'x', 'o' and 'draw'
self.X = "x"
self.O = "o"
self.D = "draw"
def build_grid(self, x_by_x=3):
"""
:param x_by_x: Size of grid
:return: Grid and Grid Reference
Build the Tic Tac Toe Grid - provide size of the square grid.
Grid references for 3x3 tic tac toe grid:
1 | 2 | 3
---------
4 | 5 | 6
---------
7 | 8 | 9
"""
# base grid (e.g. 3x3) each space can be 'x', 'o' or ''
base = []
# base_ref is a list of grid locations (e.g. for 3x3 grid - list will have location numbers from 1 to 9)
base_ref = [i for i in range(1, (x_by_x * x_by_x + 1))]
for x in range(0, x_by_x):
row = []
for y in range(0, x_by_x):
row.append('')
base.append(row)
return base, base_ref
def clean(self):
"""Clean the grid - remove x or o"""
self.GRID = self.build_grid(x_by_x=self.__GRID_LEN__)[0]
def build_graph(self):
"""
Build graph of possible moves this will NOT work for grid size more than 3x3 as possible number of moves
will be greater than 20 trillion!
Use this to do brute-force solving of a tic-tac-toe grid.
The graph is a set of moves - each node is a grid reference and one walk from root to a leaf is a full game
with each grid reference present once.
It can be thought of as a set of move sequences, some examples include:
[1, 3, 4, 6, 5, 7, 9, 8, 2]
[1, 2, 7, 5, 6, 4, 9, 8, 3]
Where each entry is a grid reference (or a grid entry)
Grid references for 3x3 tic tac toe grid:
1 | 2 | 3
---------
4 | 5 | 6
---------
7 | 8 | 9
We always assume 'o' is the first move and 'x' the next.
For move sequence: [1, 3, 4, 6, 5, 7, 9, 8, 2]
The final grid becomes:
o | o | x
---------
o | o | x
---------
x | x | o
Here 'o' is the winner (i.e. first mover is the winner)
"""
root = Node("Root")
if len(self.GRID_REF) > 9:
raise Exception("Cannot create a graph for Grid size greater than 3 x 3. Use 'sample_moves' instead.")
self.populate_graph(self.GRID_REF, root, 0, "r")
return root
def sample_moves(self, no_of_samples=100000):
"""
:param no_of_samples: take number of unique samples
:return: set of move sequences
This is the alternative to brute force - we sample unique move sequences instead of trying to create a full set of
moves. This is required for grid sizes greater than 3x3.
Grid size - Number of Grid Slots - total number of moves
2x2 = 4 = 24 move sequences
3x3 = 9 = 362880 move sequences
4x4 = 16 = >20 trillion move sequences
In general total number of moves = Factorial(number of grid items)
"""
move_samples = {}
current_state = [str(x) for x in self.GRID_REF]
while len(move_samples) < no_of_samples:
moves = ':'.join(current_state)
move_samples[moves] = True
current_state = self.switch(current_state)
return move_samples.keys()
def populate_graph(self, nodes_to_be_added, parent, step, root):
""" populate the moves graph to create all possible move sequences """
step += 1
for i in range(0, len(nodes_to_be_added)):
_nodes_to_be_added = [x for x in nodes_to_be_added]
tmp = _nodes_to_be_added.pop(len(nodes_to_be_added) - 1 - i)
child = parent.name + ":" + str(tmp)
node = Node(child, parent=parent)
if root is None:
self.populate_graph(_nodes_to_be_added, node, step, tmp)
else:
self.populate_graph(_nodes_to_be_added, node, step, root)
def switch(self, state):
"""
Randomly switch moves in a move set
:param state: current move sequence
:return: switched move sequence
Example:
Input > [1, 3, 4, 6, 5, 7, 9, 8, 2]
Output > [2, 3, 4, 6, 5, 7, 9, 8, 1] (first and last grid ref are switched)
"""
pick_A = rnd.randint(0, self.__TOTAL_GRIDS__ - 1)
pick_Z = rnd.randint(0, self.__TOTAL_GRIDS__ - 1)
while pick_A == pick_Z:
pick_Z = rnd.randint(0, self.__TOTAL_GRIDS__ - 1)
tmp = state[pick_A]
state[pick_A] = state[pick_Z]
state[pick_Z] = tmp
return state
def pretty_print(self):
""" pretty print the grid """
separator = ''
for i in range(0, self.__GRID_LEN__ * 16):
separator = separator + '-'
for i in range(0, self.__GRID_LEN__):
line = ''
for j in range(0, self.__GRID_LEN__):
if j > 0:
line = line + "\t|\t{}".format(self.GRID[i][j])
else:
line = line + "\t" + self.GRID[i][j]
if i != 0:
print(separator)
print(line)
def move(self, row, col, value):
"""
Mark a particular grid (given by row - column reference) with a value of 'x' or 'o'
:param row:
:param col:
:param value: ('x' or 'o')
:return:
"""
if self.GRID[row][col] == '':
self.GRID[row][col] = value
return True
return False
def set_o(self, row, col):
"""
Convenience method - set 'o' at a grid entry
:param row: grid row
:param col: grid col
:return:
"""
self.move(row, col, self.O)
def set_o_slot(self, slot):
"""
Convenience method for 'o', using slot (e.g. in a 3x3 grid slots are numbered from 1 to 9 (left to right)
:param slot: the slot number for a grid
:return:
"""
idx = self.slot_to_index(slot)
self.set_o(idx[0], idx[1])
def set_x(self, row, col):
"""
Convenience method - set 'x' at a grid entry
:param row: grid row
:param col: grid col
:return:
"""
self.move(row, col, self.X)
def slot_to_index(self, slot):
"""
Convert a slot to row, col value
:param slot: integer value - 1 to max grid ref (e.g. 3x3 max grid ref = 9)
:return:
Grid references (slots) for 3x3 tic tac toe grid:
1 | 2 | 3
---------
4 | 5 | 6
---------
7 | 8 | 9
so slot 1 = 0, 0 and slot 8 = 2, 1
"""
slot = int(slot)
for i in range(1, self.__GRID_LEN__ + 1):
slot_a = (i - 1) * self.__GRID_LEN__
slot_z = i * self.__GRID_LEN__
if slot > slot_a and slot <= slot_z:
return (i - 1, slot - (1 + slot_a))
def set_x_slot(self, slot):
"""
Convenience method for 'x', using slot (e.g. in a 3x3 grid slots are numbered from 1 to 9 (left to right)
:param slot: the slot number for a grid
:return:
"""
idx = self.slot_to_index(slot)
self.set_x(idx[0], idx[1])
def clear(self, row, col):
"""
Clear a particular grid entry
:param row:
:param col:
:return:
"""
self.GRID[row][col] = ''
def game_over(self):
"""
Check if the game is over (a straight line with same symbols)
:return: Winning mark ('x' or 'o', None otherwise)
"""
# Check Vertical
for i in range(0, self.__GRID_LEN__):
x_count = 0
o_count = 0
for j in range(0, self.__GRID_LEN__):
if self.GRID[i][j] == self.X:
x_count += 1
elif self.GRID[i][j] == self.O:
o_count += 1
if x_count == self.__GRID_LEN__:
return self.X
if o_count == self.__GRID_LEN__:
return self.O
# Check Horizontal
x_count = 0
o_count = 0
for j in range(0, self.__GRID_LEN__):
if self.GRID[j][i] == self.X:
x_count += 1
elif self.GRID[j][i] == self.O:
o_count += 1
if x_count == self.__GRID_LEN__:
return self.X
if o_count == self.__GRID_LEN__:
return self.O
# Check Cross
matches = 1
for i in range(0, self.__GRID_LEN__ - 1):
if self.GRID[i][i] != '' and self.GRID[i][i] == self.GRID[i + 1][i + 1]:
matches += 1
if matches == self.__GRID_LEN__:
return self.GRID[0][0]
# Check Cross
matches = 1
for i in range(0, self.__GRID_LEN__ - |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.