code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
def extractAllaboutmynothingsBlogspotCom(item):
'''
Parser for 'allaboutmynothings.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Yasashii Shinjitsu to Seiryaku Kekkon', 'Yasashii Shinjitsu to Seiryaku Kekkon', 'translated'),
('Cinderella Dropped Her Panties', 'Cinderella Dropped Her Panties', 'translated'),
('Please Be More Serious', 'Please Be More Serious', 'translated'),
('This Has Become Serious', 'This Has Become Serious', 'translated'),
('Being swayed by the Deluded Shacho', 'Being swayed by the Deluded Shacho', 'translated'),
('Woman Hating Duke', 'Women-Hating Duke Feels Lust Only For One Aristocrat Lady', 'translated'),
('True and False Young Master', 'True and False Young Master', 'translated'),
('The Love Potion', 'The Love Potion', 'translated'),
('<NAME>', '<NAME>', 'translated'),
('namjang secretary', 'namjang secretary', 'translated'),
('shameful lessons', 'The S Manager\'s Shameful Lessons', 'translated'),
('reconcile', 'Do Not Reconcile', 'translated'),
('Dark Empress', 'Dark Empress', 'translated'),
('mo yan transmigrates', 'Mo Yan\'s Transmigration Inside The Book', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | WebMirror/management/rss_parser_funcs/feed_parse_extractAllaboutmynothingsBlogspotCom.py | def extractAllaboutmynothingsBlogspotCom(item):
'''
Parser for 'allaboutmynothings.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Yasashii Shinjitsu to Seiryaku Kekkon', 'Yasashii Shinjitsu to Seiryaku Kekkon', 'translated'),
('Cinderella Dropped Her Panties', 'Cinderella Dropped Her Panties', 'translated'),
('Please Be More Serious', 'Please Be More Serious', 'translated'),
('This Has Become Serious', 'This Has Become Serious', 'translated'),
('Being swayed by the Deluded Shacho', 'Being swayed by the Deluded Shacho', 'translated'),
('Woman Hating Duke', 'Women-Hating Duke Feels Lust Only For One Aristocrat Lady', 'translated'),
('True and False Young Master', 'True and False Young Master', 'translated'),
('The Love Potion', 'The Love Potion', 'translated'),
('<NAME>', '<NAME>', 'translated'),
('namjang secretary', 'namjang secretary', 'translated'),
('shameful lessons', 'The S Manager\'s Shameful Lessons', 'translated'),
('reconcile', 'Do Not Reconcile', 'translated'),
('Dark Empress', 'Dark Empress', 'translated'),
('mo yan transmigrates', 'Mo Yan\'s Transmigration Inside The Book', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 0.135118 | 0.13707 |
from numpy import *
from scipy.misc import imsave
def quartic_kernel(x):
if not (-1.0 < x < 1.0):
return 0.0
return 15.0 / 16.0 * (1 - x ** 2) ** 2
class Gradient:
def __init__(self):
self.colors = []
self.steps = []
def add_color(self, color, step):
self.colors.append(array(color))
self.steps.append(step)
def value(self, at):
i = 0
while not (self.steps[i] <= at <= self.steps[i + 1]):
i += 1
d = 1 - (self.steps[i + 1] - at) / (self.steps[i + 1] - self.steps[i])
fr = self.colors[i]
to = self.colors[i + 1]
return (to - fr) * d + fr
def to_image(self, alpha=False):
img = zeros((8, 256, 4))
for x_ in range(img.shape[1]):
x = x_ / img.shape[1]
val = self.value(x)
for i in range(img.shape[0]):
img[i][x_] = val
if not alpha:
img[i][x_][3] = 1.0
return img
def to_csrc(self, name):
ret = "rgba_t " + name + "[] = {\n\t{ 0, 0, 0, 0 },\n"
for x in range(1, 256):
clr = self.value(x / 255)
ret += "\t{ % 4d, % 4d, % 4d, % 4d },\n" % \
tuple(min(int(c * 256), 255) for c in clr)
ret += "};\n"
return ret
gradient_heat = Gradient()
gradient_heat.add_color([0.0, 0.0, 0.0, 0.3], 0.00)
gradient_heat.add_color([0.0, 0.0, 1.0, 0.3], 0.15)
gradient_heat.add_color([0.0, 0.5, 1.0, 0.3], 0.25)
gradient_heat.add_color([0.0, 1.0, 0.0, 0.3], 0.40)
gradient_heat.add_color([1.0, 1.0, 0.0, 0.4], 0.60)
gradient_heat.add_color([1.0, 0.5, 0.0, 0.5], 0.80)
gradient_heat.add_color([1.0, 0.0, 0.0, 0.6], 1.00)
gradient_grayscale = Gradient()
gradient_grayscale.add_color([0.0, 0.0, 0.0, 0.3], 0.00)
gradient_grayscale.add_color([1.0, 1.0, 1.0, 0.6], 1.00)
gradients = [
("heat", gradient_heat),
("grayscale", gradient_grayscale)
]
print()
print("#include \"colormaps.h\"")
print()
print("// Generated by utils/gencolormaps.py")
print()
for name, grad in gradients:
print(grad.to_csrc("colormap_" + name))
imsave("gradient_" + name + ".png", grad.to_image()) | utils/gencolormaps.py |
from numpy import *
from scipy.misc import imsave
def quartic_kernel(x):
if not (-1.0 < x < 1.0):
return 0.0
return 15.0 / 16.0 * (1 - x ** 2) ** 2
class Gradient:
def __init__(self):
self.colors = []
self.steps = []
def add_color(self, color, step):
self.colors.append(array(color))
self.steps.append(step)
def value(self, at):
i = 0
while not (self.steps[i] <= at <= self.steps[i + 1]):
i += 1
d = 1 - (self.steps[i + 1] - at) / (self.steps[i + 1] - self.steps[i])
fr = self.colors[i]
to = self.colors[i + 1]
return (to - fr) * d + fr
def to_image(self, alpha=False):
img = zeros((8, 256, 4))
for x_ in range(img.shape[1]):
x = x_ / img.shape[1]
val = self.value(x)
for i in range(img.shape[0]):
img[i][x_] = val
if not alpha:
img[i][x_][3] = 1.0
return img
def to_csrc(self, name):
ret = "rgba_t " + name + "[] = {\n\t{ 0, 0, 0, 0 },\n"
for x in range(1, 256):
clr = self.value(x / 255)
ret += "\t{ % 4d, % 4d, % 4d, % 4d },\n" % \
tuple(min(int(c * 256), 255) for c in clr)
ret += "};\n"
return ret
gradient_heat = Gradient()
gradient_heat.add_color([0.0, 0.0, 0.0, 0.3], 0.00)
gradient_heat.add_color([0.0, 0.0, 1.0, 0.3], 0.15)
gradient_heat.add_color([0.0, 0.5, 1.0, 0.3], 0.25)
gradient_heat.add_color([0.0, 1.0, 0.0, 0.3], 0.40)
gradient_heat.add_color([1.0, 1.0, 0.0, 0.4], 0.60)
gradient_heat.add_color([1.0, 0.5, 0.0, 0.5], 0.80)
gradient_heat.add_color([1.0, 0.0, 0.0, 0.6], 1.00)
gradient_grayscale = Gradient()
gradient_grayscale.add_color([0.0, 0.0, 0.0, 0.3], 0.00)
gradient_grayscale.add_color([1.0, 1.0, 1.0, 0.6], 1.00)
gradients = [
("heat", gradient_heat),
("grayscale", gradient_grayscale)
]
print()
print("#include \"colormaps.h\"")
print()
print("// Generated by utils/gencolormaps.py")
print()
for name, grad in gradients:
print(grad.to_csrc("colormap_" + name))
imsave("gradient_" + name + ".png", grad.to_image()) | 0.583678 | 0.417717 |
import os
import codecs
import operator
import json
import itertools
import numpy as np
def load_data(dataset='unknown',
location='./data/',
maxlen=None,
seed=1234,
limit_cls=1000):
""" Loads a dataset.
Expects dataset to exist in 'location' directory as file 'dataset.npy'.
Arguments:
maxlen : integer, sequences longer than this will be skipped
seed : integer, seed for random shuffling
limit_cls : integer, limit on how many sequences is retrieved per class
Returns:
tuple of numpy arrays: (x, y)
"""
filename = dataset + ".npy"
path = os.path.join(location, filename)
data = np.load(path)
sentences, labels = data[0], data[1]
return sentences, labels
def prepare_data(classes=['neg', 'pos'],
dataset='unknown',
location='./data/',
seed=1234):
""" Loads raw strings and writes it as a a dataset.
Expects a dataset in '{location}' as files '{dataset}_{class}.txt'.
Arguments:
seed : integer, seed for random shuffling
Side-effects:
Writes files '{dataset}.npz' with the training data as numbers
and '{dataset}_index.json' as index to the meaning of numbers.
Note that sentences should already be tokenized.
"""
sentences = []
labels = []
word_counts = {}
word_index = {}
# 1. first get all sentences
for idx, class_ in enumerate(classes):
filename = "{}_{}.txt".format(dataset, class_)
path = os.path.join(location, filename)
sentences_cls = codecs.open(path, 'r', 'utf-8').readlines()
# NOTE the strings are turned to lower case, should they?
# NOTE should there be a maxlen for a sentence?
#sentences += [list(map(str.lower, s.strip())) for s in sentences_cls]
for sentence in sentences_cls:
sentence = sentence.strip().lower()
items = sentence.split()
sentences += [items]
labels += [idx] * len(sentences_cls)
# 2. count all the words
for words in sentences:
for word in words:
try:
word_counts[word] += 1
except KeyError:
word_counts[word] = 1
# 3. then give an index number to each words depending how common they are
# build in somme indices as convention:
# 0 -> padding, 1 -> start, 2 -> OOV (words that were cut out)
# NOTE consider removing these indices
for idx, (word, count) in enumerate(
sorted(word_counts.items(), key=operator.itemgetter(1), reverse=True),
start=3):
word_index[word] = idx
# 4. convert sentences with labels to numbers
encoded_sentences = []
for idx, words in enumerate(sentences):
encoded = [word_index[word] for word in words]
#encoded_data.append([1] + encoded_sentence)
encoded_sentences.append(encoded)
# 5. save everything
# training data
encoded_sentences = np.array(encoded_sentences)
labels = np.array(labels, dtype=np.int8)
data = np.array([encoded_sentences, labels])
path = os.path.join(location, dataset)
np.save(path, data)
# word indices
filename = "{}_index.txt".format(dataset)
path = os.path.join(location, filename)
with codecs.open(path, 'w', encoding="utf-8") as output:
json.dump(word_index, output, ensure_ascii=False)
# word counts
filename = "{}_words.txt".format(dataset)
path = os.path.join(location, filename)
with open(path, 'w', encoding="utf-8", errors='replace') as output:
for word, count in sorted(word_counts.items(), key=operator.itemgetter(1), reverse=True):
output.write(f"{count} {word}\n")
def get_index(dataset='unknown', location='./data/'):
"""Retrieves the dictionary mapping word to word indices.
Arguments
path: where to cache the data (relative to `~/.keras/dataset`).
Returns
The word index dictionary.
"""
filename = "{}_index.txt".format(dataset)
path = os.path.join(location, filename)
with codecs.open(path, 'r', encoding="utf-8") as output:
word_index = json.load(output)
return word_index
# Helper functions
def max_value(np_array):
""" Returns the length of the longest sentence. """
return max([max(item) for item in np_array])
def get_word_decoder(train_data, word_index):
""" Returns a function that can decode a sentence. """
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
def reverse(idx):
return " ".join([reverse_word_index.get(i, '?') for i in train_data[idx]])
return reverse
def vectorize_sequence(sequences, dimension):
""" Turns sequences into vectors of 0s and 1s. """
results = np.zeros((len(sequences), dimension))
for i, seq in enumerate(sequences):
results[i, seq] = 1.
return results
if __name__ == "__main__":
prepare_data(dataset='korp_devel') | dataset.py | import os
import codecs
import operator
import json
import itertools
import numpy as np
def load_data(dataset='unknown',
location='./data/',
maxlen=None,
seed=1234,
limit_cls=1000):
""" Loads a dataset.
Expects dataset to exist in 'location' directory as file 'dataset.npy'.
Arguments:
maxlen : integer, sequences longer than this will be skipped
seed : integer, seed for random shuffling
limit_cls : integer, limit on how many sequences is retrieved per class
Returns:
tuple of numpy arrays: (x, y)
"""
filename = dataset + ".npy"
path = os.path.join(location, filename)
data = np.load(path)
sentences, labels = data[0], data[1]
return sentences, labels
def prepare_data(classes=['neg', 'pos'],
dataset='unknown',
location='./data/',
seed=1234):
""" Loads raw strings and writes it as a a dataset.
Expects a dataset in '{location}' as files '{dataset}_{class}.txt'.
Arguments:
seed : integer, seed for random shuffling
Side-effects:
Writes files '{dataset}.npz' with the training data as numbers
and '{dataset}_index.json' as index to the meaning of numbers.
Note that sentences should already be tokenized.
"""
sentences = []
labels = []
word_counts = {}
word_index = {}
# 1. first get all sentences
for idx, class_ in enumerate(classes):
filename = "{}_{}.txt".format(dataset, class_)
path = os.path.join(location, filename)
sentences_cls = codecs.open(path, 'r', 'utf-8').readlines()
# NOTE the strings are turned to lower case, should they?
# NOTE should there be a maxlen for a sentence?
#sentences += [list(map(str.lower, s.strip())) for s in sentences_cls]
for sentence in sentences_cls:
sentence = sentence.strip().lower()
items = sentence.split()
sentences += [items]
labels += [idx] * len(sentences_cls)
# 2. count all the words
for words in sentences:
for word in words:
try:
word_counts[word] += 1
except KeyError:
word_counts[word] = 1
# 3. then give an index number to each words depending how common they are
# build in somme indices as convention:
# 0 -> padding, 1 -> start, 2 -> OOV (words that were cut out)
# NOTE consider removing these indices
for idx, (word, count) in enumerate(
sorted(word_counts.items(), key=operator.itemgetter(1), reverse=True),
start=3):
word_index[word] = idx
# 4. convert sentences with labels to numbers
encoded_sentences = []
for idx, words in enumerate(sentences):
encoded = [word_index[word] for word in words]
#encoded_data.append([1] + encoded_sentence)
encoded_sentences.append(encoded)
# 5. save everything
# training data
encoded_sentences = np.array(encoded_sentences)
labels = np.array(labels, dtype=np.int8)
data = np.array([encoded_sentences, labels])
path = os.path.join(location, dataset)
np.save(path, data)
# word indices
filename = "{}_index.txt".format(dataset)
path = os.path.join(location, filename)
with codecs.open(path, 'w', encoding="utf-8") as output:
json.dump(word_index, output, ensure_ascii=False)
# word counts
filename = "{}_words.txt".format(dataset)
path = os.path.join(location, filename)
with open(path, 'w', encoding="utf-8", errors='replace') as output:
for word, count in sorted(word_counts.items(), key=operator.itemgetter(1), reverse=True):
output.write(f"{count} {word}\n")
def get_index(dataset='unknown', location='./data/'):
"""Retrieves the dictionary mapping word to word indices.
Arguments
path: where to cache the data (relative to `~/.keras/dataset`).
Returns
The word index dictionary.
"""
filename = "{}_index.txt".format(dataset)
path = os.path.join(location, filename)
with codecs.open(path, 'r', encoding="utf-8") as output:
word_index = json.load(output)
return word_index
# Helper functions
def max_value(np_array):
""" Returns the length of the longest sentence. """
return max([max(item) for item in np_array])
def get_word_decoder(train_data, word_index):
""" Returns a function that can decode a sentence. """
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
def reverse(idx):
return " ".join([reverse_word_index.get(i, '?') for i in train_data[idx]])
return reverse
def vectorize_sequence(sequences, dimension):
""" Turns sequences into vectors of 0s and 1s. """
results = np.zeros((len(sequences), dimension))
for i, seq in enumerate(sequences):
results[i, seq] = 1.
return results
if __name__ == "__main__":
prepare_data(dataset='korp_devel') | 0.609059 | 0.527073 |
import numpy as np
import math
import extendedMD.dtwdist as dtwdist
def prune_motifs_with_mdl(ts, motif_dic_list, r):
"""
This function returns the most relevant motifs from the original list of motif extracted from the emd algorithm,
based on the computed MDL cost and avoiding overlapping motifs
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param motif_dic_list: list of motif dictionaries returned from the emd algorithm
:type motif_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:return: list of dictionaries with the most relevant motifs. The list is ordered based on the MDL cost
:rtype: list of dic
"""
sorted_dic_list = sorted(motif_dic_list, key=lambda dic: dic['mdl_cost'])
pruned_motif_dic_list = prune_motifs(ts, sorted_dic_list, r)
return pruned_motif_dic_list
def prune_motifs_with_dist(ts, motif_dic_list, r, mdl_bins):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param motif_dic_list: list of motif dictionaries returned from the emd algorithm
:type motif_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:param mdl_bins: number of bins to break the MDL cost range
:type mdl_bins: int
:return: list of dictionaries with the most relevant motifs. The list is ordered based on MDL cost and motif's compactness
:rtype: list of dic
"""
mdl_sorted_dic_list = sorted(motif_dic_list, key=lambda dic: dic['mdl_cost'])
step = math.floor(len(mdl_sorted_dic_list) / mdl_bins)
dist_sorted_dic_list = []
for i in range(mdl_bins):
temp_dic_list = mdl_sorted_dic_list[i * step:(i + 1) * step]
temp_dist_sorted_dic_list = sorted(temp_dic_list, key=lambda dic: dic['mean_dist'])
dist_sorted_dic_list += temp_dist_sorted_dic_list
if mdl_bins * step < len(mdl_sorted_dic_list):
temp_dic_list = mdl_sorted_dic_list[mdl_bins * step:]
temp_dist_sorted_dic_list = sorted(temp_dic_list, key=lambda dic: dic['mean_dist'])
dist_sorted_dic_list += temp_dist_sorted_dic_list
pruned_motif_dic_list = prune_motifs(ts, dist_sorted_dic_list, r)
return pruned_motif_dic_list
def prune_motifs(ts, sorted_dic_list, r):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param sorted_dic_list: list of motif dictionaries returned from the emd algorithm, ordered by relevance
:type sorted_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:return: list of dictionaries with the most relevant motifs
:rtype: list of dic
"""
pruned_motif_dic_list = [sorted_dic_list[0]]
first_center_ts = extract_ts_from_pointers(ts, sorted_dic_list[0]['center_ts_pointers'])
pruned_center_ts_list = [first_center_ts]
for motif_dic in sorted_dic_list[1:]:
cur_center_ts = extract_ts_from_pointers(ts, motif_dic['center_ts_pointers'])
dist_list = dtwdist.compute_dwt_dist_between_ts_and_list(cur_center_ts, pruned_center_ts_list, 2 * r)
dist_test_list = [dist <= 2 * r for dist in dist_list]
if sum(dist_test_list) == 0:
pruned_motif_dic_list.append(motif_dic)
pruned_center_ts_list.append(cur_center_ts)
else:
continue
return pruned_motif_dic_list
def extract_ts_from_pointers(ts, pointers):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param pointers: list of indexes related to the subsequence one wishes to extract from ts
:type pointers: list of int
:return: time-series subsequence
:rtype: 1d array
"""
ts_from_pointers = np.array([ts[i] for i in pointers])
return ts_from_pointers | extendedMD/pruning.py | import numpy as np
import math
import extendedMD.dtwdist as dtwdist
def prune_motifs_with_mdl(ts, motif_dic_list, r):
"""
This function returns the most relevant motifs from the original list of motif extracted from the emd algorithm,
based on the computed MDL cost and avoiding overlapping motifs
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param motif_dic_list: list of motif dictionaries returned from the emd algorithm
:type motif_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:return: list of dictionaries with the most relevant motifs. The list is ordered based on the MDL cost
:rtype: list of dic
"""
sorted_dic_list = sorted(motif_dic_list, key=lambda dic: dic['mdl_cost'])
pruned_motif_dic_list = prune_motifs(ts, sorted_dic_list, r)
return pruned_motif_dic_list
def prune_motifs_with_dist(ts, motif_dic_list, r, mdl_bins):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param motif_dic_list: list of motif dictionaries returned from the emd algorithm
:type motif_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:param mdl_bins: number of bins to break the MDL cost range
:type mdl_bins: int
:return: list of dictionaries with the most relevant motifs. The list is ordered based on MDL cost and motif's compactness
:rtype: list of dic
"""
mdl_sorted_dic_list = sorted(motif_dic_list, key=lambda dic: dic['mdl_cost'])
step = math.floor(len(mdl_sorted_dic_list) / mdl_bins)
dist_sorted_dic_list = []
for i in range(mdl_bins):
temp_dic_list = mdl_sorted_dic_list[i * step:(i + 1) * step]
temp_dist_sorted_dic_list = sorted(temp_dic_list, key=lambda dic: dic['mean_dist'])
dist_sorted_dic_list += temp_dist_sorted_dic_list
if mdl_bins * step < len(mdl_sorted_dic_list):
temp_dic_list = mdl_sorted_dic_list[mdl_bins * step:]
temp_dist_sorted_dic_list = sorted(temp_dic_list, key=lambda dic: dic['mean_dist'])
dist_sorted_dic_list += temp_dist_sorted_dic_list
pruned_motif_dic_list = prune_motifs(ts, dist_sorted_dic_list, r)
return pruned_motif_dic_list
def prune_motifs(ts, sorted_dic_list, r):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param sorted_dic_list: list of motif dictionaries returned from the emd algorithm, ordered by relevance
:type sorted_dic_list: list of dic
:param r: maximum distance to the center of the motif
:type r: float
:return: list of dictionaries with the most relevant motifs
:rtype: list of dic
"""
pruned_motif_dic_list = [sorted_dic_list[0]]
first_center_ts = extract_ts_from_pointers(ts, sorted_dic_list[0]['center_ts_pointers'])
pruned_center_ts_list = [first_center_ts]
for motif_dic in sorted_dic_list[1:]:
cur_center_ts = extract_ts_from_pointers(ts, motif_dic['center_ts_pointers'])
dist_list = dtwdist.compute_dwt_dist_between_ts_and_list(cur_center_ts, pruned_center_ts_list, 2 * r)
dist_test_list = [dist <= 2 * r for dist in dist_list]
if sum(dist_test_list) == 0:
pruned_motif_dic_list.append(motif_dic)
pruned_center_ts_list.append(cur_center_ts)
else:
continue
return pruned_motif_dic_list
def extract_ts_from_pointers(ts, pointers):
"""
:param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series
:type ts: 1d array
:param pointers: list of indexes related to the subsequence one wishes to extract from ts
:type pointers: list of int
:return: time-series subsequence
:rtype: 1d array
"""
ts_from_pointers = np.array([ts[i] for i in pointers])
return ts_from_pointers | 0.723212 | 0.736211 |
from django.shortcuts import render, redirect
from proofs.models import Proposition, Proof
from .forms import MajorSubmissionForm
from .typeChecker import *
from django.urls import reverse
def home(request):
concobj = []
conclusions = Proof.objects.all()
for obj in conclusions:
concobj.append(obj.conclusion)
return render(request, 'home.html', {
'title': '<NAME>',
'conclusions': concobj,
})
def about(request):
return render(request, 'about.html')
def proposition_detail(request, id):
proofs = Proposition.objects.get(id=id).conclusion.all()[0]
conclusion = proofs.conclusion
major = proofs.major
minor = proofs.minor
return render(request, 'proposition_detail.html', {
'major': major,
'minor': minor,
'conclusion': conclusion,
'title': 'Önerme',
})
def submit(request):
form = MajorSubmissionForm()
if request.method == "POST":
form = MajorSubmissionForm(request.POST)
if form.is_valid():
major = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_major'],
subject=form.cleaned_data['subject_major'],
is_affirmative=form.cleaned_data['is_affirmative_major'],
predicate=form.cleaned_data['predicate_major'],
)
setPropositionType(major)
minor = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_minor'],
subject=form.cleaned_data['subject_minor'],
is_affirmative=form.cleaned_data['is_affirmative_minor'],
predicate=form.cleaned_data['predicate_minor'],
)
setPropositionType(minor)
conclusion = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_conclusion'],
subject=form.cleaned_data['subject_conclusion'],
is_affirmative=form.cleaned_data['is_affirmative_conclusion'],
predicate=form.cleaned_data['predicate_conclusion'],
)
setConclusionType(major,minor,conclusion)
major.save()
minor.save()
conclusion.save()
Proof.objects.create(
major=major,
minor=minor,
conclusion=conclusion
)
return redirect(reverse("proposition_detail", args=[conclusion.id]))
return render(request ,"submit.html", {'form': form}) | deductivereasoning/proofs/views.py | from django.shortcuts import render, redirect
from proofs.models import Proposition, Proof
from .forms import MajorSubmissionForm
from .typeChecker import *
from django.urls import reverse
def home(request):
concobj = []
conclusions = Proof.objects.all()
for obj in conclusions:
concobj.append(obj.conclusion)
return render(request, 'home.html', {
'title': '<NAME>',
'conclusions': concobj,
})
def about(request):
return render(request, 'about.html')
def proposition_detail(request, id):
proofs = Proposition.objects.get(id=id).conclusion.all()[0]
conclusion = proofs.conclusion
major = proofs.major
minor = proofs.minor
return render(request, 'proposition_detail.html', {
'major': major,
'minor': minor,
'conclusion': conclusion,
'title': 'Önerme',
})
def submit(request):
form = MajorSubmissionForm()
if request.method == "POST":
form = MajorSubmissionForm(request.POST)
if form.is_valid():
major = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_major'],
subject=form.cleaned_data['subject_major'],
is_affirmative=form.cleaned_data['is_affirmative_major'],
predicate=form.cleaned_data['predicate_major'],
)
setPropositionType(major)
minor = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_minor'],
subject=form.cleaned_data['subject_minor'],
is_affirmative=form.cleaned_data['is_affirmative_minor'],
predicate=form.cleaned_data['predicate_minor'],
)
setPropositionType(minor)
conclusion = Proposition.objects.create(
is_universal=form.cleaned_data['is_universal_conclusion'],
subject=form.cleaned_data['subject_conclusion'],
is_affirmative=form.cleaned_data['is_affirmative_conclusion'],
predicate=form.cleaned_data['predicate_conclusion'],
)
setConclusionType(major,minor,conclusion)
major.save()
minor.save()
conclusion.save()
Proof.objects.create(
major=major,
minor=minor,
conclusion=conclusion
)
return redirect(reverse("proposition_detail", args=[conclusion.id]))
return render(request ,"submit.html", {'form': form}) | 0.373876 | 0.153803 |
import torch
import torch.nn as nn
import numpy as np
from utils import softminus
import math
import numbers
from torch.nn import functional as F
class SubNet(nn.ModuleList):
def __init__(self, list):
super(SubNet, self).__init__(list)
def forward(self, input):
output = input
for l in self:
output = l(output)
return output
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups)
class NNMF(nn.Module):
def __init__(self, gmf_size, mlp_size, mlp_layers, threshold_layers):
super(NNMF, self).__init__()
self.gmf_size = gmf_size
self.mlp_size = mlp_size
self.threshold_layers = threshold_layers
self.mlp_layers = mlp_layers
self.embedding_activation = nn.functional.softplus
self.mlp_activation = nn.LeakyReLU
self.threshold_activation = nn.ReLU
self.threshold_activation_output = nn.ReLU
self.output_activation = nn.Sigmoid
self.neu_mf_input_size = self.mlp_layers[-1] * (self.mlp_size > 0) + self.gmf_size
self.mlp_input_size = 2 * self.mlp_size
self.threshold_mlp = None
self.mlp = None
self.neu_mf = None
self.num_pixels = None
self.num_frames = None
self.gmf_u = None
self.gmf_v = None
self.mlp_u = None
self.mlp_v = None
self.define_nn()
def define_nn(self):
self.threshold_mlp = SubNet([nn.Linear(1, self.threshold_layers[0]), self.threshold_activation()] +
[item for t in [(nn.Linear(self.threshold_layers[j],
self.threshold_layers[j + 1]),
self.threshold_activation())
for j in range(len(self.threshold_layers) - 1)] for item in t])
self.threshold_mlp[-1] = self.threshold_activation_output()
self.mlp = SubNet([nn.Linear(self.mlp_input_size, self.mlp_layers[0]), self.mlp_activation()] +
[item for t in [(nn.Linear(self.mlp_layers[j], self.mlp_layers[j + 1]),
self.mlp_activation())
for j in range(len(self.mlp_layers) - 1)] for item in t])
self.neu_mf = SubNet([nn.Linear(self.neu_mf_input_size, 1), self.output_activation()])
def set_matrix(self, matrix2d, embedding_nmf_init=None):
self.num_pixels = matrix2d.shape[0]
self.num_frames = matrix2d.shape[1]
initialize_embedding = lambda x: nn.Embedding.from_pretrained(torch.from_numpy(x).float(), freeze=False)
get_random_init = lambda size: softminus(np.random.normal(loc=0.5, scale=0.01, size=size))
if embedding_nmf_init:
self.gmf_u = initialize_embedding(softminus(embedding_nmf_init[0]))
self.gmf_v = initialize_embedding(softminus(embedding_nmf_init[1]))
else:
self.gmf_u = initialize_embedding(get_random_init((self.num_pixels, self.gmf_size)))
self.gmf_v = initialize_embedding(get_random_init((self.num_frames, self.gmf_size)))
self.mlp_u = initialize_embedding(get_random_init((self.num_pixels, self.mlp_size)))
self.mlp_v = initialize_embedding(get_random_init((self.num_frames, self.mlp_size)))
def init_params(self, gmf_net_init=False):
def init_weights(m):
if type(m) == nn.Sequential:
try:
nn.init.xavier_normal_(m.weight.data, gain=1)
nn.init.normal_(m.bias, mean=0.0, std=0.01)
except:
pass
self.apply(init_weights)
if gmf_net_init:
with torch.no_grad():
for l in self.mlp:
try:
l.weight.fill_(0.)
l.bias.fill_(0.)
except:
pass
for l in self.neu_mf:
try:
l.weight.fill_(1.)
l.bias.fill_(0.)
except:
pass
with torch.no_grad():
for l in self.threshold_mlp:
try:
nn.init.eye_(l.weight)
l.bias.fill_(0.)
except:
pass
def forward(self, pixel, frame, target):
neu_mf_input = []
if self.mlp_size != 0:
mlp_input = torch.cat([self.embedding_activation(self.mlp_u(pixel)),
self.embedding_activation(self.mlp_v(frame))], dim=1)
mlp_output = self.mlp(mlp_input)
neu_mf_input += [mlp_output]
if self.gmf_size != 0:
neu_mf_input += [torch.mul(self.embedding_activation(self.gmf_u(pixel)),
self.embedding_activation(self.gmf_v(frame)))]
neu_mf_input = torch.cat(neu_mf_input, dim=1)
neu_mf_output = self.neu_mf(neu_mf_input)
s_input = target - neu_mf_output
s_output = self.threshold_mlp(s_input)
return neu_mf_output, s_output
def embedding_parameters(self):
embedding_params = []
if self.mlp_size != 0:
embedding_params += list(self.mlp_u.parameters()) + list(self.mlp_v.parameters())
if self.gmf_size != 0:
embedding_params += list(self.gmf_u.parameters()) + list(self.gmf_v.parameters())
return embedding_params
def embedding_regularization(self, pixel, frame):
loss = 0
if self.gmf_size != 0:
loss += torch.norm(self.embedding_activation((self.gmf_u(pixel)))) + \
torch.norm(self.embedding_activation((self.gmf_v(frame))))
if self.mlp_size != 0:
loss += torch.norm(self.embedding_activation((self.mlp_u(pixel)))) + \
torch.norm(self.embedding_activation((self.mlp_v(frame))))
return loss / pixel.shape[0]
def spatial_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, int(np.sqrt(self.num_pixels)), int(np.sqrt(self.num_pixels)), -1])
emb_r = emb_r.permute([0, 3, 1, 2])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size-1)/2)*np.array([1, 1, 1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=2).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u),
pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_u.weight, self.gmf_size) / self.num_pixels
if self.mlp_size != 0: loss += add_loss(self.mlp_u.weight, self.mlp_size) / self.num_pixels
return loss
def temporal_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, self.num_frames, -1])
emb_r = emb_r.permute([0, 2, 1])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size - 1) / 2) * np.array([1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=1).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u), pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_v.weight, self.gmf_size) / self.num_frames
if self.mlp_size != 0: loss += add_loss(self.mlp_v.weight, self.mlp_size) / self.num_frames
return loss | source/segment/nnmf.py | import torch
import torch.nn as nn
import numpy as np
from utils import softminus
import math
import numbers
from torch.nn import functional as F
class SubNet(nn.ModuleList):
def __init__(self, list):
super(SubNet, self).__init__(list)
def forward(self, input):
output = input
for l in self:
output = l(output)
return output
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups)
class NNMF(nn.Module):
def __init__(self, gmf_size, mlp_size, mlp_layers, threshold_layers):
super(NNMF, self).__init__()
self.gmf_size = gmf_size
self.mlp_size = mlp_size
self.threshold_layers = threshold_layers
self.mlp_layers = mlp_layers
self.embedding_activation = nn.functional.softplus
self.mlp_activation = nn.LeakyReLU
self.threshold_activation = nn.ReLU
self.threshold_activation_output = nn.ReLU
self.output_activation = nn.Sigmoid
self.neu_mf_input_size = self.mlp_layers[-1] * (self.mlp_size > 0) + self.gmf_size
self.mlp_input_size = 2 * self.mlp_size
self.threshold_mlp = None
self.mlp = None
self.neu_mf = None
self.num_pixels = None
self.num_frames = None
self.gmf_u = None
self.gmf_v = None
self.mlp_u = None
self.mlp_v = None
self.define_nn()
def define_nn(self):
self.threshold_mlp = SubNet([nn.Linear(1, self.threshold_layers[0]), self.threshold_activation()] +
[item for t in [(nn.Linear(self.threshold_layers[j],
self.threshold_layers[j + 1]),
self.threshold_activation())
for j in range(len(self.threshold_layers) - 1)] for item in t])
self.threshold_mlp[-1] = self.threshold_activation_output()
self.mlp = SubNet([nn.Linear(self.mlp_input_size, self.mlp_layers[0]), self.mlp_activation()] +
[item for t in [(nn.Linear(self.mlp_layers[j], self.mlp_layers[j + 1]),
self.mlp_activation())
for j in range(len(self.mlp_layers) - 1)] for item in t])
self.neu_mf = SubNet([nn.Linear(self.neu_mf_input_size, 1), self.output_activation()])
def set_matrix(self, matrix2d, embedding_nmf_init=None):
self.num_pixels = matrix2d.shape[0]
self.num_frames = matrix2d.shape[1]
initialize_embedding = lambda x: nn.Embedding.from_pretrained(torch.from_numpy(x).float(), freeze=False)
get_random_init = lambda size: softminus(np.random.normal(loc=0.5, scale=0.01, size=size))
if embedding_nmf_init:
self.gmf_u = initialize_embedding(softminus(embedding_nmf_init[0]))
self.gmf_v = initialize_embedding(softminus(embedding_nmf_init[1]))
else:
self.gmf_u = initialize_embedding(get_random_init((self.num_pixels, self.gmf_size)))
self.gmf_v = initialize_embedding(get_random_init((self.num_frames, self.gmf_size)))
self.mlp_u = initialize_embedding(get_random_init((self.num_pixels, self.mlp_size)))
self.mlp_v = initialize_embedding(get_random_init((self.num_frames, self.mlp_size)))
def init_params(self, gmf_net_init=False):
def init_weights(m):
if type(m) == nn.Sequential:
try:
nn.init.xavier_normal_(m.weight.data, gain=1)
nn.init.normal_(m.bias, mean=0.0, std=0.01)
except:
pass
self.apply(init_weights)
if gmf_net_init:
with torch.no_grad():
for l in self.mlp:
try:
l.weight.fill_(0.)
l.bias.fill_(0.)
except:
pass
for l in self.neu_mf:
try:
l.weight.fill_(1.)
l.bias.fill_(0.)
except:
pass
with torch.no_grad():
for l in self.threshold_mlp:
try:
nn.init.eye_(l.weight)
l.bias.fill_(0.)
except:
pass
def forward(self, pixel, frame, target):
neu_mf_input = []
if self.mlp_size != 0:
mlp_input = torch.cat([self.embedding_activation(self.mlp_u(pixel)),
self.embedding_activation(self.mlp_v(frame))], dim=1)
mlp_output = self.mlp(mlp_input)
neu_mf_input += [mlp_output]
if self.gmf_size != 0:
neu_mf_input += [torch.mul(self.embedding_activation(self.gmf_u(pixel)),
self.embedding_activation(self.gmf_v(frame)))]
neu_mf_input = torch.cat(neu_mf_input, dim=1)
neu_mf_output = self.neu_mf(neu_mf_input)
s_input = target - neu_mf_output
s_output = self.threshold_mlp(s_input)
return neu_mf_output, s_output
def embedding_parameters(self):
embedding_params = []
if self.mlp_size != 0:
embedding_params += list(self.mlp_u.parameters()) + list(self.mlp_v.parameters())
if self.gmf_size != 0:
embedding_params += list(self.gmf_u.parameters()) + list(self.gmf_v.parameters())
return embedding_params
def embedding_regularization(self, pixel, frame):
loss = 0
if self.gmf_size != 0:
loss += torch.norm(self.embedding_activation((self.gmf_u(pixel)))) + \
torch.norm(self.embedding_activation((self.gmf_v(frame))))
if self.mlp_size != 0:
loss += torch.norm(self.embedding_activation((self.mlp_u(pixel)))) + \
torch.norm(self.embedding_activation((self.mlp_v(frame))))
return loss / pixel.shape[0]
def spatial_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, int(np.sqrt(self.num_pixels)), int(np.sqrt(self.num_pixels)), -1])
emb_r = emb_r.permute([0, 3, 1, 2])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size-1)/2)*np.array([1, 1, 1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=2).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u),
pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_u.weight, self.gmf_size) / self.num_pixels
if self.mlp_size != 0: loss += add_loss(self.mlp_u.weight, self.mlp_size) / self.num_pixels
return loss
def temporal_regularization(self, device):
loss = 0
def refactor_embedding(emb):
emb_r = self.embedding_activation(emb)
emb_r = emb_r.view([1, self.num_frames, -1])
emb_r = emb_r.permute([0, 2, 1])
return emb_r
def add_loss(embedding_weight, size):
kernel_size = 15
pad = list(int((kernel_size - 1) / 2) * np.array([1, 1, 0, 0, 0, 0]))
gaussian_sm = GaussianSmoothing(channels=size, kernel_size=kernel_size, sigma=1, dim=1).to(device)
gmf_u = refactor_embedding(embedding_weight)
gmf_u_sq = torch.mul(gmf_u, gmf_u)
conv_gmf = torch.nn.functional.pad(gaussian_sm(gmf_u), pad=pad, mode='constant', value=0.)
conv_gmf_sq = torch.nn.functional.pad(gaussian_sm(gmf_u_sq), pad=pad, mode='constant', value=0.)
return (torch.sum(gmf_u_sq.flatten()) + torch.sum(conv_gmf_sq) -
2 * torch.dot(gmf_u.flatten(), conv_gmf.flatten()))
if self.gmf_size != 0: loss += add_loss(self.gmf_v.weight, self.gmf_size) / self.num_frames
if self.mlp_size != 0: loss += add_loss(self.mlp_v.weight, self.mlp_size) / self.num_frames
return loss | 0.950365 | 0.578151 |
from thetae import Forecast
from thetae.util import localized_date_to_utc
from datetime import timedelta
import requests
import pandas as pd
default_model_name = 'Climacell'
def get_climacell_forecast(stid, lat, lon, api_key, forecast_date):
# Retrieve data
api_url = 'https://api.climacell.co/v3/weather/forecast/hourly'
api_options = {
'apikey': api_key,
'lat': lat,
'lon': lon,
'unit_system': 'us',
'fields': 'precipitation,temp,dewpoint,wind_speed:knots,wind_gust:knots,baro_pressure:hPa,'
'wind_direction:degrees,cloud_cover:%,weather_code'
}
response = requests.get(api_url, params=api_options)
# Raise error for invalid HTTP response
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
print('climacell: got HTTP error when querying API')
raise
clima_data = response.json()
# Convert to pandas DataFrame and fix time, units, and columns
clima_df = pd.DataFrame(clima_data)
# Drop lat, lon and get values
clima_df.drop(['lat', 'lon'], axis=1, inplace=True)
clima_df = clima_df.apply(lambda y: y.apply(lambda x: x['value']))
column_names_dict = {
'observation_time': 'DateTime',
'temp': 'temperature',
'cloud_cover': 'cloud',
'precipitation': 'rain',
'baro_pressure': 'pressure',
'wind_speed': 'windSpeed',
'wind_gust': 'windGust',
'wind_direction': 'windDirection',
'weather_code': 'condition'
}
clima_df = clima_df.rename(columns=column_names_dict)
clima_df['DateTime'] = clima_df['DateTime'].apply(lambda x: localized_date_to_utc(pd.Timestamp(x)))
clima_df.set_index('DateTime', inplace=True)
# Calculate daily values
forecast_start = forecast_date.replace(hour=6)
forecast_end = forecast_start + timedelta(days=1)
daily_high = clima_df.loc[forecast_start:forecast_end, 'temperature'].max()
daily_low = clima_df.loc[forecast_start:forecast_end, 'temperature'].min()
daily_wind = clima_df.loc[forecast_start:forecast_end, 'windSpeed'].max()
daily_rain = clima_df.loc[forecast_start:forecast_end - timedelta(hours=1), 'rain'].sum()
# Create Forecast object
forecast = Forecast(stid, default_model_name, forecast_date)
forecast.daily.set_values(daily_high, daily_low, daily_wind, daily_rain)
forecast.timeseries.data = clima_df.reset_index()
return forecast
def main(config, model, stid, forecast_date):
"""
Produce a Forecast object from Climacell.
"""
# Get latitude and longitude from the config
try:
lat = float(config['Stations'][stid]['latitude'])
lon = float(config['Stations'][stid]['longitude'])
except KeyError:
raise (KeyError('climacell: missing or invalid latitude or longitude for station %s' % stid))
# Get the API key from the config
try:
api_key = config['Models'][model]['api_key']
except KeyError:
raise KeyError('climacell: no api_key parameter defined for model %s in config!' % model)
# Get forecast
forecast = get_climacell_forecast(stid, lat, lon, api_key, forecast_date)
return forecast | thetae/data_parsers/climacell.py | from thetae import Forecast
from thetae.util import localized_date_to_utc
from datetime import timedelta
import requests
import pandas as pd
default_model_name = 'Climacell'
def get_climacell_forecast(stid, lat, lon, api_key, forecast_date):
# Retrieve data
api_url = 'https://api.climacell.co/v3/weather/forecast/hourly'
api_options = {
'apikey': api_key,
'lat': lat,
'lon': lon,
'unit_system': 'us',
'fields': 'precipitation,temp,dewpoint,wind_speed:knots,wind_gust:knots,baro_pressure:hPa,'
'wind_direction:degrees,cloud_cover:%,weather_code'
}
response = requests.get(api_url, params=api_options)
# Raise error for invalid HTTP response
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
print('climacell: got HTTP error when querying API')
raise
clima_data = response.json()
# Convert to pandas DataFrame and fix time, units, and columns
clima_df = pd.DataFrame(clima_data)
# Drop lat, lon and get values
clima_df.drop(['lat', 'lon'], axis=1, inplace=True)
clima_df = clima_df.apply(lambda y: y.apply(lambda x: x['value']))
column_names_dict = {
'observation_time': 'DateTime',
'temp': 'temperature',
'cloud_cover': 'cloud',
'precipitation': 'rain',
'baro_pressure': 'pressure',
'wind_speed': 'windSpeed',
'wind_gust': 'windGust',
'wind_direction': 'windDirection',
'weather_code': 'condition'
}
clima_df = clima_df.rename(columns=column_names_dict)
clima_df['DateTime'] = clima_df['DateTime'].apply(lambda x: localized_date_to_utc(pd.Timestamp(x)))
clima_df.set_index('DateTime', inplace=True)
# Calculate daily values
forecast_start = forecast_date.replace(hour=6)
forecast_end = forecast_start + timedelta(days=1)
daily_high = clima_df.loc[forecast_start:forecast_end, 'temperature'].max()
daily_low = clima_df.loc[forecast_start:forecast_end, 'temperature'].min()
daily_wind = clima_df.loc[forecast_start:forecast_end, 'windSpeed'].max()
daily_rain = clima_df.loc[forecast_start:forecast_end - timedelta(hours=1), 'rain'].sum()
# Create Forecast object
forecast = Forecast(stid, default_model_name, forecast_date)
forecast.daily.set_values(daily_high, daily_low, daily_wind, daily_rain)
forecast.timeseries.data = clima_df.reset_index()
return forecast
def main(config, model, stid, forecast_date):
"""
Produce a Forecast object from Climacell.
"""
# Get latitude and longitude from the config
try:
lat = float(config['Stations'][stid]['latitude'])
lon = float(config['Stations'][stid]['longitude'])
except KeyError:
raise (KeyError('climacell: missing or invalid latitude or longitude for station %s' % stid))
# Get the API key from the config
try:
api_key = config['Models'][model]['api_key']
except KeyError:
raise KeyError('climacell: no api_key parameter defined for model %s in config!' % model)
# Get forecast
forecast = get_climacell_forecast(stid, lat, lon, api_key, forecast_date)
return forecast | 0.651577 | 0.259521 |
import telethon
from telethon import TelegramClient
from telethon.tl.functions.channels import JoinChannelRequest
from redis import Redis
import random, json, pymysql
import asyncio
from my_db import DbHelper
#实例化一个redis
redis_obj = Redis(host='localhost',port=6379,password='<PASSWORD>',decode_responses=True,charset='UTF-8', encoding='UTF-8')
# 插入数据库操作
def insertDb(item,falg=True,phone=None):
# 实例化mysql
# db = DbHelper('localhost',3306,'root','root')
db = DbHelper()
# 查询是否存在
if falg == True:
sql = "select * from tg_group_bot where link='" + item['link'] + "'"
result = db.fetchOne(sql)
if result == None:
res = db.executeSql("insert into tg_group_bot (group_name,link) values('" + pymysql.escape_string(item['title']) + "','" + pymysql.escape_string(item['link']) + "')")
if res == False:
print('数据写入失败')
else:
sql = "select * from tg_group_success where link='" + item['link'] + "'"
result = db.fetchOne(sql)
if result == None:
res = db.executeSql("insert into tg_group_success (group_name,link,phone) values('" + pymysql.escape_string(item['title']) + "','" + pymysql.escape_string(item['link']) + "','" + phone + "')")
if res == False:
print('数据写入失败')
#关闭数据库连接
db.close()
# 加群动作
async def addGroupAction(client):
# 获取队列数据
if redis_obj.llen('tg_group_list') > 0:
i = 0
while i < redis_obj.llen('tg_group_list'):
item = json.loads(redis_obj.lpop('tg_group_list'))
print(item['link'])
#将链接永久存储到bot表中
insertDb(item)
# 群组判断
try:
result = await client.get_entity(item['link'])
# print(result.stringify())
if result is not None :
if type(result) is not telethon.tl.types.User: # 判断类型是否不是用户
# print(result.stringify())
# print(result.broadcast)
if result.broadcast == False: #判断是否是群组
# 加群动作
update = await client(JoinChannelRequest(item['link']))
# print(update.stringify())
print('加群成功')
# 将群信息写入加群成功的记录表
# insertDb(item,False,update.users[0].phone)
except Exception as e:
print(e)
if hasattr(e,'seconds'):
await asyncio.sleep(e.seconds)
else:
pass
else:
# 循环间隔2-3分钟 以应对电报api请求频繁的限制
seconds = random.randint(100,300)
print(seconds)
await asyncio.sleep(seconds)
i += 1
else:
print('end ========== 队列中没有数据')
async def work(client):
async with client:
await addGroupAction(client)
async def main():
await asyncio.gather(
work(TelegramClient('+86 137 8230 8818', 1848782, 'db242eb477ce069cb76d299f562adba2')),
work(TelegramClient('+86 176 3001 3170', 1970209, '382e4d2d424a8b4dcd808e319de5ea6b')),
# work(TelegramClient('+86 173 3571 1659', 2482317, 'c7504e11a7826546dff493a2944984db')),
work(TelegramClient('+86 158 3741 1100', 2174500, '9d9758505ba7a2ac24aee0a73b622c14')),
work(TelegramClient('+86 131 0371 3118', 2436793, '814af6c036a72985b346c137cc0b23e5')),
)
asyncio.run(main()) | telegram_api/task/group/add_group.py | import telethon
from telethon import TelegramClient
from telethon.tl.functions.channels import JoinChannelRequest
from redis import Redis
import random, json, pymysql
import asyncio
from my_db import DbHelper
#实例化一个redis
redis_obj = Redis(host='localhost',port=6379,password='<PASSWORD>',decode_responses=True,charset='UTF-8', encoding='UTF-8')
# 插入数据库操作
def insertDb(item,falg=True,phone=None):
# 实例化mysql
# db = DbHelper('localhost',3306,'root','root')
db = DbHelper()
# 查询是否存在
if falg == True:
sql = "select * from tg_group_bot where link='" + item['link'] + "'"
result = db.fetchOne(sql)
if result == None:
res = db.executeSql("insert into tg_group_bot (group_name,link) values('" + pymysql.escape_string(item['title']) + "','" + pymysql.escape_string(item['link']) + "')")
if res == False:
print('数据写入失败')
else:
sql = "select * from tg_group_success where link='" + item['link'] + "'"
result = db.fetchOne(sql)
if result == None:
res = db.executeSql("insert into tg_group_success (group_name,link,phone) values('" + pymysql.escape_string(item['title']) + "','" + pymysql.escape_string(item['link']) + "','" + phone + "')")
if res == False:
print('数据写入失败')
#关闭数据库连接
db.close()
# 加群动作
async def addGroupAction(client):
# 获取队列数据
if redis_obj.llen('tg_group_list') > 0:
i = 0
while i < redis_obj.llen('tg_group_list'):
item = json.loads(redis_obj.lpop('tg_group_list'))
print(item['link'])
#将链接永久存储到bot表中
insertDb(item)
# 群组判断
try:
result = await client.get_entity(item['link'])
# print(result.stringify())
if result is not None :
if type(result) is not telethon.tl.types.User: # 判断类型是否不是用户
# print(result.stringify())
# print(result.broadcast)
if result.broadcast == False: #判断是否是群组
# 加群动作
update = await client(JoinChannelRequest(item['link']))
# print(update.stringify())
print('加群成功')
# 将群信息写入加群成功的记录表
# insertDb(item,False,update.users[0].phone)
except Exception as e:
print(e)
if hasattr(e,'seconds'):
await asyncio.sleep(e.seconds)
else:
pass
else:
# 循环间隔2-3分钟 以应对电报api请求频繁的限制
seconds = random.randint(100,300)
print(seconds)
await asyncio.sleep(seconds)
i += 1
else:
print('end ========== 队列中没有数据')
async def work(client):
async with client:
await addGroupAction(client)
async def main():
await asyncio.gather(
work(TelegramClient('+86 137 8230 8818', 1848782, 'db242eb477ce069cb76d299f562adba2')),
work(TelegramClient('+86 176 3001 3170', 1970209, '382e4d2d424a8b4dcd808e319de5ea6b')),
# work(TelegramClient('+86 173 3571 1659', 2482317, 'c7504e11a7826546dff493a2944984db')),
work(TelegramClient('+86 158 3741 1100', 2174500, '9d9758505ba7a2ac24aee0a73b622c14')),
work(TelegramClient('+86 131 0371 3118', 2436793, '814af6c036a72985b346c137cc0b23e5')),
)
asyncio.run(main()) | 0.087847 | 0.096791 |
from docopt import docopt
import numpy as np
import os
import bob.io.image
import bob.io.base
import tensorflow as tf
import sys
from datetime import datetime
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def search_landmark(landmark_path, img_path):
with open(landmark_path) as f:
next(f)
for line in f:
line = line.split(",")
if img_path in line[0]:
return np.array(
[[float(line[i + 1]), float(line[i + 2])] for i in [0, 2, 4, 6, 8]]
)
else:
return None
from bob.bio.face.preprocessor import FaceCrop
def align(image, annotations, cropped_image_size=(126, 126)):
cropped_image_height, cropped_image_width = cropped_image_size
# RIGHT_EYE_POS = (40, 46)
# LEFT_EYE_POS = (40, 80)
# cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
# cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (55, 81), "reye": (55, 42)}
cropper = FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=None,
annotator=None,
)
return bob.io.image.to_matplotlib(
cropper.transform([image], [annotations])[0].astype("uint8")
)
def get_id_by_line(line):
return line.split("/")[0]
def generate_tfrecord(
base_path, landmark_path, file_list, output_tf_record_path, indexes
):
def write_single_line_tfrecord(writer, image, offset, user_id):
# Serializing
serialized_img = image.tobytes()
# Writing
feature = {
"data": _bytes_feature(serialized_img),
"label": _int64_feature(offset),
"key": _bytes_feature(str.encode(user_id)),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
current_id = None
with open(file_list) as f:
for file_name in f.readlines():
user_id = get_id_by_line(file_name)
if user_id in indexes:
img = bob.io.base.load(
os.path.join(base_path, file_name).rstrip("\n")
)
l_name = file_name.rstrip(".jpg\n")
if current_id != user_id:
current_id = user_id
sys.stdout.write(
f"Writing user {current_id}. {str(datetime.now())} \n"
)
sys.stdout.flush()
landmarks = search_landmark(landmark_path, l_name)
if landmarks[0][0] > landmarks[1][0]:
annotations = {
"reye": (landmarks[1][1], landmarks[1][0]),
"leye": (landmarks[0][1], landmarks[0][0]),
}
else:
annotations = {
"reye": (landmarks[0][1], landmarks[0][0]),
"leye": (landmarks[1][1], landmarks[1][0]),
}
if landmarks is None:
raise ValueError(f"Landmark for {file_name} not found!")
aligned_image = align(img, annotations)
write_single_line_tfrecord(
tf_writer, aligned_image, int(indexes[user_id]), user_id
)
def map_indexes(image_path, n_chunks):
"""
Create a dictionary mapping the ID to VGG2-ID, like:
{0: 'n000001'],
1: 'n000002']}
"""
indexes = sorted(list(set([l.split("/")[0] for l in open(image_path).readlines()])))
identities_map = {indexes[i]: i for i in range(len(indexes))}
# SPLIT THE DICTIONARY IN TOTAL_CHUNKS
indexes_as_list = list(identities_map.items())
dict_as_list = np.array_split(indexes_as_list, n_chunks)
dicts = [dict(d) for d in dict_as_list]
return dicts
if __name__ == "__main__":
args = docopt(__doc__)
VGG2_PATH = args["<vgg-path>"]
LANDMARK_PATH = os.path.join(VGG2_PATH, "bb_landmark", "loose_landmark_train.csv")
if "SGE_TASK_LAST" in os.environ:
TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
else:
TOTAL_CHUNKS = 1
CURRENT_CHUNK = 0
# TOTAL_CHUNKS = 140
# CURRENT_CHUNK = 0
TRAINING_LIST = os.path.join(VGG2_PATH, "train_list.txt")
# TEST_LIST = os.path.join(VGG2_PATH, "test_list.txt")
# MAP ALL INDEXES
indexes = map_indexes(TRAINING_LIST, TOTAL_CHUNKS)
generate_tfrecord(
os.path.join(VGG2_PATH, "train"),
LANDMARK_PATH,
TRAINING_LIST,
os.path.join(
args["<output-path>"], f"train_vgg2_chunk{CURRENT_CHUNK}.tfrecords"
),
indexes[CURRENT_CHUNK],
) | cnn_training/vgg2_2_tfrecords.py | from docopt import docopt
import numpy as np
import os
import bob.io.image
import bob.io.base
import tensorflow as tf
import sys
from datetime import datetime
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def search_landmark(landmark_path, img_path):
with open(landmark_path) as f:
next(f)
for line in f:
line = line.split(",")
if img_path in line[0]:
return np.array(
[[float(line[i + 1]), float(line[i + 2])] for i in [0, 2, 4, 6, 8]]
)
else:
return None
from bob.bio.face.preprocessor import FaceCrop
def align(image, annotations, cropped_image_size=(126, 126)):
cropped_image_height, cropped_image_width = cropped_image_size
# RIGHT_EYE_POS = (40, 46)
# LEFT_EYE_POS = (40, 80)
# cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
# cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (55, 81), "reye": (55, 42)}
cropper = FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=None,
annotator=None,
)
return bob.io.image.to_matplotlib(
cropper.transform([image], [annotations])[0].astype("uint8")
)
def get_id_by_line(line):
return line.split("/")[0]
def generate_tfrecord(
base_path, landmark_path, file_list, output_tf_record_path, indexes
):
def write_single_line_tfrecord(writer, image, offset, user_id):
# Serializing
serialized_img = image.tobytes()
# Writing
feature = {
"data": _bytes_feature(serialized_img),
"label": _int64_feature(offset),
"key": _bytes_feature(str.encode(user_id)),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
current_id = None
with open(file_list) as f:
for file_name in f.readlines():
user_id = get_id_by_line(file_name)
if user_id in indexes:
img = bob.io.base.load(
os.path.join(base_path, file_name).rstrip("\n")
)
l_name = file_name.rstrip(".jpg\n")
if current_id != user_id:
current_id = user_id
sys.stdout.write(
f"Writing user {current_id}. {str(datetime.now())} \n"
)
sys.stdout.flush()
landmarks = search_landmark(landmark_path, l_name)
if landmarks[0][0] > landmarks[1][0]:
annotations = {
"reye": (landmarks[1][1], landmarks[1][0]),
"leye": (landmarks[0][1], landmarks[0][0]),
}
else:
annotations = {
"reye": (landmarks[0][1], landmarks[0][0]),
"leye": (landmarks[1][1], landmarks[1][0]),
}
if landmarks is None:
raise ValueError(f"Landmark for {file_name} not found!")
aligned_image = align(img, annotations)
write_single_line_tfrecord(
tf_writer, aligned_image, int(indexes[user_id]), user_id
)
def map_indexes(image_path, n_chunks):
"""
Create a dictionary mapping the ID to VGG2-ID, like:
{0: 'n000001'],
1: 'n000002']}
"""
indexes = sorted(list(set([l.split("/")[0] for l in open(image_path).readlines()])))
identities_map = {indexes[i]: i for i in range(len(indexes))}
# SPLIT THE DICTIONARY IN TOTAL_CHUNKS
indexes_as_list = list(identities_map.items())
dict_as_list = np.array_split(indexes_as_list, n_chunks)
dicts = [dict(d) for d in dict_as_list]
return dicts
if __name__ == "__main__":
args = docopt(__doc__)
VGG2_PATH = args["<vgg-path>"]
LANDMARK_PATH = os.path.join(VGG2_PATH, "bb_landmark", "loose_landmark_train.csv")
if "SGE_TASK_LAST" in os.environ:
TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
else:
TOTAL_CHUNKS = 1
CURRENT_CHUNK = 0
# TOTAL_CHUNKS = 140
# CURRENT_CHUNK = 0
TRAINING_LIST = os.path.join(VGG2_PATH, "train_list.txt")
# TEST_LIST = os.path.join(VGG2_PATH, "test_list.txt")
# MAP ALL INDEXES
indexes = map_indexes(TRAINING_LIST, TOTAL_CHUNKS)
generate_tfrecord(
os.path.join(VGG2_PATH, "train"),
LANDMARK_PATH,
TRAINING_LIST,
os.path.join(
args["<output-path>"], f"train_vgg2_chunk{CURRENT_CHUNK}.tfrecords"
),
indexes[CURRENT_CHUNK],
) | 0.459076 | 0.243597 |
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy
CONTACT_FORM_USE_CAPTCHA = getattr(settings, 'CONTACT_FORM_USE_CAPTCHA', False)
CONTACT_FORM_USE_SIGNALS = getattr(settings, 'CONTACT_FORM_USE_SIGNALS', False)
CONTACT_FORM_SUCCESS_URL = getattr(settings, 'CONTACT_FORM_SUCCESS_URL', reverse_lazy('contact_form'))
CONTACT_FORM_USE_SITES = getattr(settings, 'CONTACT_FORM_USE_SITES', True)
CONTACT_FORM_FILTER_SENDER_NAME = getattr(settings, 'CONTACT_FORM_FILTER_SENDER_NAME', True)
CONTACT_FORM_FILTER_MESSAGE = getattr(settings, 'CONTACT_FORM_FILTER_MESSAGE', True)
CONTACT_FORM_ALLOWED_MESSAGE_TAGS = getattr(settings, 'CONTACT_FORM_ALLOWED_MESSAGE_TAGS', [])
CONTACT_FORM_STRIP_MESSAGE = getattr(settings, 'CONTACT_FORM_STRIP_MESSAGE', False)
CONTACT_FORM_VALID_MESSAGE = getattr(
settings,
'CONTACT_FORM_VALID_MESSAGE',
_('Your message is submitted.')
)
CONTACT_FORM_INVALID_MESSAGE = getattr(
settings,
'CONTACT_FORM_INVALID_MESSAGE',
_('Something went wrong, your message was not submitted!')
)
CONTACT_FORM_USE_USERNAME = getattr(settings, 'CONTACT_FORM_USE_USERNAME', True)
CONTACT_FORM_USERNAME_FIELD = getattr(settings, 'CONTACT_FORM_USERNAME_FIELD', 'username')
CONTACT_FORM_USE_USER_EMAIL = getattr(settings, 'CONTACT_FORM_USE_USER_EMAIL', True)
CONTACT_FORM_USER_EMAIL_FIELD = getattr(settings, 'CONTACT_FORM_USER_EMAIL_FIELD', 'email')
CONTACT_FORM_SENDER_NAME_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_SENDER_NAME_MAX_LENGTH', 80)
CONTACT_FORM_SUBJECT_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_SUBJECT_MAX_LENGTH', 80)
CONTACT_FORM_MESSAGE_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_MESSAGE_MAX_LENGTH', 4096)
CONTACT_FORM_MESSAGE_MIN_LENGTH = getattr(settings, 'CONTACT_FORM_MESSAGE_MIN_LENGTH', 15)
CONTACT_FORM_DEPARTMENT_NAME_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_DEPARTMENT_NAME_MAX_LENGTH', 80)
CONTACT_FORM_DEPARTMENT_PHONE_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_DEPARTMENT_PHONE_MAX_LENGTH', 20) | contact_form/conf/settings.py | from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy
CONTACT_FORM_USE_CAPTCHA = getattr(settings, 'CONTACT_FORM_USE_CAPTCHA', False)
CONTACT_FORM_USE_SIGNALS = getattr(settings, 'CONTACT_FORM_USE_SIGNALS', False)
CONTACT_FORM_SUCCESS_URL = getattr(settings, 'CONTACT_FORM_SUCCESS_URL', reverse_lazy('contact_form'))
CONTACT_FORM_USE_SITES = getattr(settings, 'CONTACT_FORM_USE_SITES', True)
CONTACT_FORM_FILTER_SENDER_NAME = getattr(settings, 'CONTACT_FORM_FILTER_SENDER_NAME', True)
CONTACT_FORM_FILTER_MESSAGE = getattr(settings, 'CONTACT_FORM_FILTER_MESSAGE', True)
CONTACT_FORM_ALLOWED_MESSAGE_TAGS = getattr(settings, 'CONTACT_FORM_ALLOWED_MESSAGE_TAGS', [])
CONTACT_FORM_STRIP_MESSAGE = getattr(settings, 'CONTACT_FORM_STRIP_MESSAGE', False)
CONTACT_FORM_VALID_MESSAGE = getattr(
settings,
'CONTACT_FORM_VALID_MESSAGE',
_('Your message is submitted.')
)
CONTACT_FORM_INVALID_MESSAGE = getattr(
settings,
'CONTACT_FORM_INVALID_MESSAGE',
_('Something went wrong, your message was not submitted!')
)
CONTACT_FORM_USE_USERNAME = getattr(settings, 'CONTACT_FORM_USE_USERNAME', True)
CONTACT_FORM_USERNAME_FIELD = getattr(settings, 'CONTACT_FORM_USERNAME_FIELD', 'username')
CONTACT_FORM_USE_USER_EMAIL = getattr(settings, 'CONTACT_FORM_USE_USER_EMAIL', True)
CONTACT_FORM_USER_EMAIL_FIELD = getattr(settings, 'CONTACT_FORM_USER_EMAIL_FIELD', 'email')
CONTACT_FORM_SENDER_NAME_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_SENDER_NAME_MAX_LENGTH', 80)
CONTACT_FORM_SUBJECT_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_SUBJECT_MAX_LENGTH', 80)
CONTACT_FORM_MESSAGE_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_MESSAGE_MAX_LENGTH', 4096)
CONTACT_FORM_MESSAGE_MIN_LENGTH = getattr(settings, 'CONTACT_FORM_MESSAGE_MIN_LENGTH', 15)
CONTACT_FORM_DEPARTMENT_NAME_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_DEPARTMENT_NAME_MAX_LENGTH', 80)
CONTACT_FORM_DEPARTMENT_PHONE_MAX_LENGTH = getattr(settings, 'CONTACT_FORM_DEPARTMENT_PHONE_MAX_LENGTH', 20) | 0.292899 | 0.064418 |
import re
import json
import bs4
from michiru import modules, personalities
## Module information.
__name__ = 'uribot.fourchan'
__author__ = 'Shiz'
__license__ = 'WTFPL'
__desc__ = 'Gives URL information for 4chan links.'
__deps__ = ['uribot']
URI_REGEXP = re.compile(r'^https?://boards\.4chan\.org/([a-z0-9]+)/thread/([0-9]+)(?:/[a-z0-9_-]+/?)?(?:#p?([0-9]+))?$')
## Module.
def uri_4chan(bot, response, matches):
""" Extract 4chan thread information. """
thread = json.loads(response.text)
# Check if we want to actually have a linked post instead of the OP.
wanted = None
if matches.group(3):
try:
wanted = int(matches.group(3))
except:
pass
title = None
comment = None
# We want a given post: get its contents.
if wanted:
for post in thread['posts']:
if post['no'] == wanted:
# Found the post!
comment = post['com']
# We want just the thread: try to use thread title or OP contents.
if not comment:
op = thread['posts'][0]
if 'sub' in op:
# Use thread title as URL title.
title = op['sub']
else:
comment = op['com']
# Build title from comment.
if not title and comment:
# Use post contents as URL title, stripped from HTML and cut down.
# We need to invent our own newlines.
comment = comment.replace('<br>', '\n')
comment = comment.replace('<s>', bot.FORMAT_CODES['spoiler'])
comment = comment.replace('</s>', bot.FORMAT_CODES['/spoiler'])
comment = comment.replace('\n', ' ')
raw_title = ''.join(bs4.BeautifulSoup(comment).find_all(text=True))
# Add ... if needed and remove unnecessary whitespace.
title = raw_title[:300] + '...' * (len(raw_title) > 300)
title = re.sub(r'\s+', ' ', title)
# Gather some metadata.
board = matches.group(1)
num_replies = thread['posts'][0]['replies']
num_images = thread['posts'][0]['images']
# And format it nicely.
type = '4chan: /{}/'.format(board)
meta = '{} replies'.format(num_replies if num_replies else 'no')
if num_images:
meta += ', {} images'.format(num_images)
return type, title, meta
def load():
from michiru.modules import uribot
uribot.URI_HANDLERS[URI_REGEXP] = {
'handler': uri_4chan,
'replacement': r'https://api.4chan.org/\1/res/\2.json'
}
def unload():
from michiru.modules import uribot
del uribot.URI_HANDLERS[URI_REGEXP] | michiru/modules/uribot/fourchan.py | import re
import json
import bs4
from michiru import modules, personalities
## Module information.
__name__ = 'uribot.fourchan'
__author__ = 'Shiz'
__license__ = 'WTFPL'
__desc__ = 'Gives URL information for 4chan links.'
__deps__ = ['uribot']
URI_REGEXP = re.compile(r'^https?://boards\.4chan\.org/([a-z0-9]+)/thread/([0-9]+)(?:/[a-z0-9_-]+/?)?(?:#p?([0-9]+))?$')
## Module.
def uri_4chan(bot, response, matches):
""" Extract 4chan thread information. """
thread = json.loads(response.text)
# Check if we want to actually have a linked post instead of the OP.
wanted = None
if matches.group(3):
try:
wanted = int(matches.group(3))
except:
pass
title = None
comment = None
# We want a given post: get its contents.
if wanted:
for post in thread['posts']:
if post['no'] == wanted:
# Found the post!
comment = post['com']
# We want just the thread: try to use thread title or OP contents.
if not comment:
op = thread['posts'][0]
if 'sub' in op:
# Use thread title as URL title.
title = op['sub']
else:
comment = op['com']
# Build title from comment.
if not title and comment:
# Use post contents as URL title, stripped from HTML and cut down.
# We need to invent our own newlines.
comment = comment.replace('<br>', '\n')
comment = comment.replace('<s>', bot.FORMAT_CODES['spoiler'])
comment = comment.replace('</s>', bot.FORMAT_CODES['/spoiler'])
comment = comment.replace('\n', ' ')
raw_title = ''.join(bs4.BeautifulSoup(comment).find_all(text=True))
# Add ... if needed and remove unnecessary whitespace.
title = raw_title[:300] + '...' * (len(raw_title) > 300)
title = re.sub(r'\s+', ' ', title)
# Gather some metadata.
board = matches.group(1)
num_replies = thread['posts'][0]['replies']
num_images = thread['posts'][0]['images']
# And format it nicely.
type = '4chan: /{}/'.format(board)
meta = '{} replies'.format(num_replies if num_replies else 'no')
if num_images:
meta += ', {} images'.format(num_images)
return type, title, meta
def load():
from michiru.modules import uribot
uribot.URI_HANDLERS[URI_REGEXP] = {
'handler': uri_4chan,
'replacement': r'https://api.4chan.org/\1/res/\2.json'
}
def unload():
from michiru.modules import uribot
del uribot.URI_HANDLERS[URI_REGEXP] | 0.288369 | 0.138637 |
from math import floor
from astropy.time import Time
from sqlalchemy import Column, String, Integer, BigInteger, Text
from . import MCDeclarativeBase
class SubsystemError(MCDeclarativeBase):
"""
Definition of subsystem_error table.
Attributes
----------
id : BigInteger Column
Autoincrementing error id. Primary_key
time : BigInteger Column
GPS time of this error, floored.
subsystem : String Column
Name of subsystem.
mc_time : BigInteger Column
GPS time error was report to M&C, floored.
severity : Integer Column
Integer indicating severity level, 1 is most severe.
log : Text Column
Error message.
"""
__tablename__ = 'subsystem_error'
id = Column(BigInteger, primary_key=True, autoincrement=True) # noqa A003
time = Column(BigInteger, nullable=False)
subsystem = Column(String(32), nullable=False)
mc_time = Column(BigInteger, nullable=False)
severity = Column(Integer, nullable=False)
log = Column(Text, nullable=False)
@classmethod
def create(cls, db_time, time, subsystem, severity, log):
"""
Create a new subsystem_error object.
Parameters
----------
db_time : astropy Time object
Astropy time object based on a timestamp from the database.
Usually generated from MCSession.get_current_db_time()
time : astropy Time object
Time of this error report.
subsystem : str
Name of subsystem with error.
severity : int
Integer indicating severity level, 1 is most severe.
log : str
error message or log file name (TBD).
Returns
-------
SubsystemError object
"""
if not isinstance(db_time, Time):
raise ValueError('db_time must be an astropy Time object')
mc_time = floor(db_time.gps)
if not isinstance(time, Time):
raise ValueError('time must be an astropy Time object')
time = floor(time.gps)
return cls(time=time, subsystem=subsystem, mc_time=mc_time,
severity=severity, log=log) | hera_mc/subsystem_error.py | from math import floor
from astropy.time import Time
from sqlalchemy import Column, String, Integer, BigInteger, Text
from . import MCDeclarativeBase
class SubsystemError(MCDeclarativeBase):
"""
Definition of subsystem_error table.
Attributes
----------
id : BigInteger Column
Autoincrementing error id. Primary_key
time : BigInteger Column
GPS time of this error, floored.
subsystem : String Column
Name of subsystem.
mc_time : BigInteger Column
GPS time error was report to M&C, floored.
severity : Integer Column
Integer indicating severity level, 1 is most severe.
log : Text Column
Error message.
"""
__tablename__ = 'subsystem_error'
id = Column(BigInteger, primary_key=True, autoincrement=True) # noqa A003
time = Column(BigInteger, nullable=False)
subsystem = Column(String(32), nullable=False)
mc_time = Column(BigInteger, nullable=False)
severity = Column(Integer, nullable=False)
log = Column(Text, nullable=False)
@classmethod
def create(cls, db_time, time, subsystem, severity, log):
"""
Create a new subsystem_error object.
Parameters
----------
db_time : astropy Time object
Astropy time object based on a timestamp from the database.
Usually generated from MCSession.get_current_db_time()
time : astropy Time object
Time of this error report.
subsystem : str
Name of subsystem with error.
severity : int
Integer indicating severity level, 1 is most severe.
log : str
error message or log file name (TBD).
Returns
-------
SubsystemError object
"""
if not isinstance(db_time, Time):
raise ValueError('db_time must be an astropy Time object')
mc_time = floor(db_time.gps)
if not isinstance(time, Time):
raise ValueError('time must be an astropy Time object')
time = floor(time.gps)
return cls(time=time, subsystem=subsystem, mc_time=mc_time,
severity=severity, log=log) | 0.890425 | 0.34054 |
import datetime
from PyQt5 import QtWidgets, QtCore, QtGui
from src.utils.log_system import LogSystem
from src.widgets.syntax_highlighter import SyntaxHighlighter
from src.hack_compiler import HackAssemblyCompiler, InvalidSyntaxException, InternalException
class ActionSystem(object):
main_form = None
@classmethod
def initialize(cls, main_form):
"""
Set action for specific form, in our case we want these actions on our main form.
"""
cls.main_form = main_form
@classmethod
def new_file(cls, file_path=None):
"""
Create new file and open a new tab for that file, if file path is not provided it will only open empty tab.
"""
LogSystem.information("Creating new file")
cls.main_form.tab_bar.create_new_tab(file_path)
@classmethod
def open_file(cls):
"""
Open file dialog to select specific file to open in new tab.
"""
LogSystem.information("Open file")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getOpenFileName(cls.main_form, "Open File", options=options)
if ok:
cls.new_file(file_path)
except Exception as e:
LogSystem.error(e)
@classmethod
def open_folder(cls):
"""
Open file dialog to select specific folder to open and will show directory view dock.
"""
LogSystem.information("Open folder")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
directory_path = QtWidgets.QFileDialog.getExistingDirectory(cls.main_form, "", "./repository", options=options)
if directory_path:
LogSystem.success("Opening directory: {0}".format(directory_path))
cls.main_form.directory_view.dock.show()
cls.main_form.directory_view.filesystem.setRootPath(directory_path)
cls.main_form.directory_view.tree.setModel(cls.main_form.directory_view.filesystem)
cls.main_form.directory_view.tree.setRootIndex(cls.main_form.directory_view.filesystem.index(directory_path))
for col in range(1, 4):
cls.main_form.directory_view.tree.hideColumn(col)
cls.main_form.directory_view.cwd = directory_path
else:
LogSystem.warning("Ignoring open folder request!")
except Exception as e:
LogSystem.error(e)
@classmethod
def save_file(cls):
"""
Open file dialog for saving files.
"""
LogSystem.information("Save file")
try:
current_tab = cls.main_form.tab_bar.current
if not current_tab.file_path:
LogSystem.information("Opening save file dialog!")
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", options=options)
if ok:
for tab in cls.main_form.tab_bar.tabs:
if tab.file_path == file_path:
cls.main_form.tab_bar.remove(tab)
current_tab.file_path = file_path
current_tab.title = file_path.split("/")[-1]
current_tab.extension = file_path.split(".")[-1]
try:
if current_tab.extension == "asm":
current_tab.syntax = SyntaxHighlighter(current_tab.textarea.document(), file_path)
except Exception as e:
LogSystem.error(e)
cls.main_form.tab_bar.get.setTabText(cls.main_form.tab_bar.get.indexOf(current_tab.widget), current_tab.title)
if not current_tab.saved and current_tab.file_path:
LogSystem.success("Saving file: {0}".format(current_tab.file_path))
text_buffer = current_tab.textarea.toPlainText()
file_path = current_tab.file_path
with open(file_path, "w") as file:
file.write(text_buffer)
current_tab.saved = True
else:
LogSystem.warning("No changes made to file: {0}".format(current_tab.file_path))
except Exception as e:
LogSystem.error(e)
@classmethod
def save_file_as(cls):
"""
Open file dialog for saving files.
"""
LogSystem.information("Save file as")
try:
current_tab = cls.main_form.tab_bar.current
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", options=options)
if ok:
current_tab.file_path = file_path
current_tab.title = file_path.split("/")[-1]
current_tab.extension = file_path.split(".")[-1]
current_tab.saved = True
cls.main_form.tab_bar.get.setTabText(cls.main_form.tab_bar.get.indexOf(current_tab.widget), current_tab.title)
try:
if current_tab.extension == "asm":
current_tab.syntax = SyntaxHighlighter(current_tab.textarea.document(), file_path)
except Exception as e:
LogSystem.error(e)
text_buffer = current_tab.textarea.toPlainText()
file_path = current_tab.file_path
with open(file_path, "w") as file:
file.write(text_buffer)
LogSystem.information("File saved as: {0}".format(file_path))
except Exception as e:
LogSystem.error(e)
@classmethod
def load_comparison_file(cls):
"""
Load hack file for comparison and display comparison dock widget.
"""
LogSystem.information("Starting Action Load Comparison File!")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getOpenFileName(cls.main_form, "Open File", "./repository", "Hack files (*.hack)", options=options)
if ok:
with open(file_path, "r") as file:
cls.main_form.comparison_dock.list.clear()
cls.main_form.comparison_dock.file = file_path
text_buffer = file.read().split("\n")
for line in text_buffer:
if line:
list_item = QtWidgets.QListWidgetItem(line)
cls.main_form.comparison_dock.list.addItem(list_item)
cls.main_form.comparison_dock.show()
except Exception as e:
LogSystem.error(e)
@classmethod
def clear_comparison_file(cls):
"""
Clear content in comparison dock widget.
"""
LogSystem.information("Starting Action Clear Comparison File!")
try:
cls.main_form.comparison_dock.list.clear()
cls.main_form.comparison_dock.file = None
except Exception as e:
LogSystem.error(e)
@classmethod
def compile(cls):
"""
Compile hack assembly code that is opened in current tab.
"""
LogSystem.information("Starting Action Compile!")
try:
# Check if file was saved
current_tab = cls.main_form.tab_bar.current
if not current_tab.saved:
cls.save_file()
if current_tab.saved == False:
return
file_path = current_tab.file_path
cls.main_form.destination_dock.pc = None
cls.main_form.destination_dock.file_path = None
cls.main_form.compilation_dock.textarea.clear()
cls.main_form.compilation_dock.show()
cls.main_form.compilation_dock.textarea.appendPlainText("Time: {0}".format(datetime.datetime.now()))
cls.main_form.destination_dock.dock.show()
cls.main_form.tab_bar.current.textarea.setExtraSelections([])
for i in range(cls.main_form.comparison_dock.list.count()):
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 255))
try:
cls.main_form.destination_dock.list.clear()
hack_assembly_compiler = HackAssemblyCompiler(file_path, "temp.hack")
hack_assembly_compiler.compile()
for binary in hack_assembly_compiler.binary_data:
list_item = QtWidgets.QListWidgetItem(binary)
cls.main_form.destination_dock.list.addItem(list_item)
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Success... ✔️")
cls.main_form.destination_dock.pc = hack_assembly_compiler.program_counter_and_lines.copy()
cls.main_form.destination_dock.file_path = cls.main_form.tab_bar.current.file_path
except InvalidSyntaxException as e:
LogSystem.error("Invalid syntax error")
error_msg = str(e)
error_line = ""
error = ""
for i in range(len(error_msg)):
if error_msg[i] == ":":
error = error_msg[i+1:]
break
error_line += error_msg[i]
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error on line {0} - {1} ❌".format(error_line, error))
cls.main_form.tab_bar.current.textarea.highlightErrorLine(int(error_line) - 1)
return
except InternalException as e:
LogSystem.error("Internal error")
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error {0} ❌".format(e))
return
except Exception as e:
LogSystem.error(e)
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error {0} ❌".format(e))
return
if not cls.main_form.comparison_dock.file:
return
destination_items_counter = cls.main_form.destination_dock.list.count()
comparison_items_counter = cls.main_form.comparison_dock.list.count()
max_items = destination_items_counter if destination_items_counter > comparison_items_counter else comparison_items_counter
min_items = destination_items_counter if destination_items_counter < comparison_items_counter else comparison_items_counter
try:
for i in range(max_items):
try:
destination_item = cls.main_form.destination_dock.list.item(i).text()
except:
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed - There are more lines of code in comparison file! ❌")
return
try:
comparison_item = cls.main_form.comparison_dock.list.item(i).text()
except:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed at line {0} ❌".format(hack_assembly_compiler.program_counter_and_lines[i]))
cls.main_form.tab_bar.current.textarea.highlightComparisonLine(int(hack_assembly_compiler.program_counter_and_lines[i]) - 1)
return
if destination_item == comparison_item:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(170, 255, 170))
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(170, 255, 170))
else:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed at line {0} ❌".format(hack_assembly_compiler.program_counter_and_lines[i]))
cls.main_form.tab_bar.current.textarea.highlightComparisonLine(int(hack_assembly_compiler.program_counter_and_lines[i]) - 1)
return
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Success... ✔️")
except Exception as e:
LogSystem.error(e)
except Exception as e:
LogSystem.error(e)
@classmethod
def export_destination(cls):
"""
Save compiled data.
"""
try:
if cls.main_form.destination_dock.list.count() == 0:
LogSystem.warning("Nothing to export!")
dialog = QtWidgets.QMessageBox()
dialog.setIcon(QtWidgets.QMessageBox.Information)
dialog.setText("Exporting")
dialog.setInformativeText("There is nothing to export!")
dialog.setWindowTitle("Export information")
dialog.setStandardButtons(QtWidgets.QMessageBox.Ok)
dialog.exec_()
return
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", ".hack", "Hack files (*.hack)", options=options)
if ok:
with open(file_path, "w") as file:
for i in range(cls.main_form.destination_dock.list.count()):
destination_item = cls.main_form.destination_dock.list.item(i).text()
file.write(destination_item + "\n")
LogSystem.warning("Destination saved to: {0}".format(file_path))
except Exception as e:
LogSystem.error(e) | src/utils/action_system.py | import datetime
from PyQt5 import QtWidgets, QtCore, QtGui
from src.utils.log_system import LogSystem
from src.widgets.syntax_highlighter import SyntaxHighlighter
from src.hack_compiler import HackAssemblyCompiler, InvalidSyntaxException, InternalException
class ActionSystem(object):
main_form = None
@classmethod
def initialize(cls, main_form):
"""
Set action for specific form, in our case we want these actions on our main form.
"""
cls.main_form = main_form
@classmethod
def new_file(cls, file_path=None):
"""
Create new file and open a new tab for that file, if file path is not provided it will only open empty tab.
"""
LogSystem.information("Creating new file")
cls.main_form.tab_bar.create_new_tab(file_path)
@classmethod
def open_file(cls):
"""
Open file dialog to select specific file to open in new tab.
"""
LogSystem.information("Open file")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getOpenFileName(cls.main_form, "Open File", options=options)
if ok:
cls.new_file(file_path)
except Exception as e:
LogSystem.error(e)
@classmethod
def open_folder(cls):
"""
Open file dialog to select specific folder to open and will show directory view dock.
"""
LogSystem.information("Open folder")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
directory_path = QtWidgets.QFileDialog.getExistingDirectory(cls.main_form, "", "./repository", options=options)
if directory_path:
LogSystem.success("Opening directory: {0}".format(directory_path))
cls.main_form.directory_view.dock.show()
cls.main_form.directory_view.filesystem.setRootPath(directory_path)
cls.main_form.directory_view.tree.setModel(cls.main_form.directory_view.filesystem)
cls.main_form.directory_view.tree.setRootIndex(cls.main_form.directory_view.filesystem.index(directory_path))
for col in range(1, 4):
cls.main_form.directory_view.tree.hideColumn(col)
cls.main_form.directory_view.cwd = directory_path
else:
LogSystem.warning("Ignoring open folder request!")
except Exception as e:
LogSystem.error(e)
@classmethod
def save_file(cls):
"""
Open file dialog for saving files.
"""
LogSystem.information("Save file")
try:
current_tab = cls.main_form.tab_bar.current
if not current_tab.file_path:
LogSystem.information("Opening save file dialog!")
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", options=options)
if ok:
for tab in cls.main_form.tab_bar.tabs:
if tab.file_path == file_path:
cls.main_form.tab_bar.remove(tab)
current_tab.file_path = file_path
current_tab.title = file_path.split("/")[-1]
current_tab.extension = file_path.split(".")[-1]
try:
if current_tab.extension == "asm":
current_tab.syntax = SyntaxHighlighter(current_tab.textarea.document(), file_path)
except Exception as e:
LogSystem.error(e)
cls.main_form.tab_bar.get.setTabText(cls.main_form.tab_bar.get.indexOf(current_tab.widget), current_tab.title)
if not current_tab.saved and current_tab.file_path:
LogSystem.success("Saving file: {0}".format(current_tab.file_path))
text_buffer = current_tab.textarea.toPlainText()
file_path = current_tab.file_path
with open(file_path, "w") as file:
file.write(text_buffer)
current_tab.saved = True
else:
LogSystem.warning("No changes made to file: {0}".format(current_tab.file_path))
except Exception as e:
LogSystem.error(e)
@classmethod
def save_file_as(cls):
"""
Open file dialog for saving files.
"""
LogSystem.information("Save file as")
try:
current_tab = cls.main_form.tab_bar.current
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", options=options)
if ok:
current_tab.file_path = file_path
current_tab.title = file_path.split("/")[-1]
current_tab.extension = file_path.split(".")[-1]
current_tab.saved = True
cls.main_form.tab_bar.get.setTabText(cls.main_form.tab_bar.get.indexOf(current_tab.widget), current_tab.title)
try:
if current_tab.extension == "asm":
current_tab.syntax = SyntaxHighlighter(current_tab.textarea.document(), file_path)
except Exception as e:
LogSystem.error(e)
text_buffer = current_tab.textarea.toPlainText()
file_path = current_tab.file_path
with open(file_path, "w") as file:
file.write(text_buffer)
LogSystem.information("File saved as: {0}".format(file_path))
except Exception as e:
LogSystem.error(e)
@classmethod
def load_comparison_file(cls):
"""
Load hack file for comparison and display comparison dock widget.
"""
LogSystem.information("Starting Action Load Comparison File!")
try:
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getOpenFileName(cls.main_form, "Open File", "./repository", "Hack files (*.hack)", options=options)
if ok:
with open(file_path, "r") as file:
cls.main_form.comparison_dock.list.clear()
cls.main_form.comparison_dock.file = file_path
text_buffer = file.read().split("\n")
for line in text_buffer:
if line:
list_item = QtWidgets.QListWidgetItem(line)
cls.main_form.comparison_dock.list.addItem(list_item)
cls.main_form.comparison_dock.show()
except Exception as e:
LogSystem.error(e)
@classmethod
def clear_comparison_file(cls):
"""
Clear content in comparison dock widget.
"""
LogSystem.information("Starting Action Clear Comparison File!")
try:
cls.main_form.comparison_dock.list.clear()
cls.main_form.comparison_dock.file = None
except Exception as e:
LogSystem.error(e)
@classmethod
def compile(cls):
"""
Compile hack assembly code that is opened in current tab.
"""
LogSystem.information("Starting Action Compile!")
try:
# Check if file was saved
current_tab = cls.main_form.tab_bar.current
if not current_tab.saved:
cls.save_file()
if current_tab.saved == False:
return
file_path = current_tab.file_path
cls.main_form.destination_dock.pc = None
cls.main_form.destination_dock.file_path = None
cls.main_form.compilation_dock.textarea.clear()
cls.main_form.compilation_dock.show()
cls.main_form.compilation_dock.textarea.appendPlainText("Time: {0}".format(datetime.datetime.now()))
cls.main_form.destination_dock.dock.show()
cls.main_form.tab_bar.current.textarea.setExtraSelections([])
for i in range(cls.main_form.comparison_dock.list.count()):
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 255))
try:
cls.main_form.destination_dock.list.clear()
hack_assembly_compiler = HackAssemblyCompiler(file_path, "temp.hack")
hack_assembly_compiler.compile()
for binary in hack_assembly_compiler.binary_data:
list_item = QtWidgets.QListWidgetItem(binary)
cls.main_form.destination_dock.list.addItem(list_item)
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Success... ✔️")
cls.main_form.destination_dock.pc = hack_assembly_compiler.program_counter_and_lines.copy()
cls.main_form.destination_dock.file_path = cls.main_form.tab_bar.current.file_path
except InvalidSyntaxException as e:
LogSystem.error("Invalid syntax error")
error_msg = str(e)
error_line = ""
error = ""
for i in range(len(error_msg)):
if error_msg[i] == ":":
error = error_msg[i+1:]
break
error_line += error_msg[i]
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error on line {0} - {1} ❌".format(error_line, error))
cls.main_form.tab_bar.current.textarea.highlightErrorLine(int(error_line) - 1)
return
except InternalException as e:
LogSystem.error("Internal error")
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error {0} ❌".format(e))
return
except Exception as e:
LogSystem.error(e)
cls.main_form.destination_dock.pc = None
cls.main_form.compilation_dock.textarea.appendPlainText("Compilation: Error {0} ❌".format(e))
return
if not cls.main_form.comparison_dock.file:
return
destination_items_counter = cls.main_form.destination_dock.list.count()
comparison_items_counter = cls.main_form.comparison_dock.list.count()
max_items = destination_items_counter if destination_items_counter > comparison_items_counter else comparison_items_counter
min_items = destination_items_counter if destination_items_counter < comparison_items_counter else comparison_items_counter
try:
for i in range(max_items):
try:
destination_item = cls.main_form.destination_dock.list.item(i).text()
except:
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed - There are more lines of code in comparison file! ❌")
return
try:
comparison_item = cls.main_form.comparison_dock.list.item(i).text()
except:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed at line {0} ❌".format(hack_assembly_compiler.program_counter_and_lines[i]))
cls.main_form.tab_bar.current.textarea.highlightComparisonLine(int(hack_assembly_compiler.program_counter_and_lines[i]) - 1)
return
if destination_item == comparison_item:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(170, 255, 170))
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(170, 255, 170))
else:
cls.main_form.destination_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.comparison_dock.list.item(i).setBackground(QtGui.QColor(255, 255, 100))
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Failed at line {0} ❌".format(hack_assembly_compiler.program_counter_and_lines[i]))
cls.main_form.tab_bar.current.textarea.highlightComparisonLine(int(hack_assembly_compiler.program_counter_and_lines[i]) - 1)
return
cls.main_form.compilation_dock.textarea.appendPlainText("Comparison: Success... ✔️")
except Exception as e:
LogSystem.error(e)
except Exception as e:
LogSystem.error(e)
@classmethod
def export_destination(cls):
"""
Save compiled data.
"""
try:
if cls.main_form.destination_dock.list.count() == 0:
LogSystem.warning("Nothing to export!")
dialog = QtWidgets.QMessageBox()
dialog.setIcon(QtWidgets.QMessageBox.Information)
dialog.setText("Exporting")
dialog.setInformativeText("There is nothing to export!")
dialog.setWindowTitle("Export information")
dialog.setStandardButtons(QtWidgets.QMessageBox.Ok)
dialog.exec_()
return
options = QtWidgets.QFileDialog.Option() | QtWidgets.QFileDialog.DontUseNativeDialog
file_path, ok = QtWidgets.QFileDialog.getSaveFileName(cls.main_form, "Save file", ".hack", "Hack files (*.hack)", options=options)
if ok:
with open(file_path, "w") as file:
for i in range(cls.main_form.destination_dock.list.count()):
destination_item = cls.main_form.destination_dock.list.item(i).text()
file.write(destination_item + "\n")
LogSystem.warning("Destination saved to: {0}".format(file_path))
except Exception as e:
LogSystem.error(e) | 0.280222 | 0.072571 |
import os, copy, logging
import torch
from torch import nn
from allennlp.modules.conditional_random_field import ConditionalRandomField
from util import func as H
from . import transformer as T
class EmbeddingClfHead(T.BaseClfHead):
def __init__(self, config, lm_model, lm_config, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from util import config as C
super(EmbeddingClfHead, self).__init__(config, lm_model, lm_config, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=config.task_type in ['entlmnt', 'sentsim'] and task_params.setdefault('sentsim_func', None) is not None, task_params=task_params, **kwargs)
self.dim_mulriple = 2 if self.task_type in ['entlmnt', 'sentsim'] and (self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat') else 1
self.embed_type = embed_type
if embed_type.startswith('w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.n_embd = self.w2v_model.syn0.shape[1] + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
elif embed_type.startswith('elmo'):
self.vocab_size = 793471
self.n_embd = lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0) # two ELMo layer * ELMo embedding dimensions
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
elif embed_type.startswith('elmo_w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.vocab_size = 793471
self.n_embd = self.w2v_model.syn0.shape[1] + lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
self.norm = C.NORM_TYPE_MAP[norm_type](self.maxlen) if self.task_type == 'nmt' else C.NORM_TYPE_MAP[norm_type](self.n_embd)
self._int_actvtn = C.ACTVTN_MAP[iactvtn]
self._out_actvtn = C.ACTVTN_MAP[oactvtn]
self.fchdim = fchdim
self.extfc = extfc
self.hdim = self.dim_mulriple * self.n_embd if self.mlt_trnsfmr and self.task_type in ['entlmnt', 'sentsim'] else self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
if self.do_extlin:
self.extlinear = nn.Linear(self.n_embd, self.n_embd)
if (initln): self.extlinear.apply(H._weights_init(mean=initln_mean, std=initln_std))
self.crf = ConditionalRandomField(num_lbs) if do_crf else None
def __init_linear__(self):
use_gpu = next(self.parameters()).is_cuda
linear = (nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.num_lbs))) if self.fchdim else (nn.Sequential(*([nn.Linear(self.hdim, self.hdim), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.hdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(self.hdim, self.num_lbs))
return linear.to('cuda') if use_gpu else linear
def __lm_head__(self):
return EmbeddingHead(self)
def _w2v(self, input_ids, use_gpu=False):
wembd_tnsr = torch.tensor([self.w2v_model.syn0[s] for s in input_ids])
if use_gpu: wembd_tnsr = wembd_tnsr.to('cuda')
return wembd_tnsr
def _sentvec(self, input_ids, use_gpu=False):
pass
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False, ret_mask=False):
use_gpu = next(self.parameters()).is_cuda
if self.sample_weights and len(extra_inputs) > 0:
sample_weights = extra_inputs[-1]
extra_inputs = extra_inputs[:-1]
else:
sample_weights = None
unsolved_input_keys, unsolved_inputs = self.embed_type.split('_'), [input_ids]+list(extra_inputs)
extra_inputs_dict = dict(zip([x for x in self.input_keys if x != 'input_ids'], extra_inputs))
pool_idx = extra_inputs_dict['mask'].sum(1)
mask = extra_inputs_dict['mask'] # mask of the original textual input
clf_hs = []
if self.task_type in ['entlmnt', 'sentsim']:
if (self.embed_type.startswith('elmo')):
embeddings = (self.lm_model(input_ids[0]), self.lm_model(input_ids[1]))
clf_hs.append((torch.cat(embeddings[0]['elmo_representations'], dim=-1), torch.cat(embeddings[1]['elmo_representations'], dim=-1)))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append([getattr(self, '_%s'%input_key)(input_tnsr[x], use_gpu=use_gpu) for x in [0,1]])
clf_h = [torch.cat(embds, dim=-1) for embds in zip(*clf_hs)]
else:
if (self.embed_type.startswith('elmo')):
embeddings = self.lm_model(input_ids)
clf_hs.append(torch.cat(embeddings['elmo_representations'], dim=-1))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append(getattr(self, '_%s'%input_key)(input_tnsr, use_gpu=use_gpu))
clf_h = torch.cat(clf_hs, dim=-1)
if labels is None:
return (clf_h, mask) if ret_mask else (clf_h,)
# Calculate language model loss
if (self.lm_loss):
lm_logits, lm_target = self.lm_logit(input_ids, clf_h, extra_inputs_dict)
lm_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
lm_loss = lm_loss_func(lm_logits.contiguous().view(-1, lm_logits.size(-1)), lm_target.contiguous().view(-1)).view(input_ids.size(0), -1)
if sample_weights is not None: lm_loss *= sample_weights
else:
lm_loss = None
return (clf_h, lm_loss, mask) if ret_mask else (clf_h, lm_loss)
def _forward(self, clf_h, mask, labels=None, weights=None): # For fine-tune task
if self.task_type in ['entlmnt', 'sentsim']:
if self.do_norm: clf_h = [self.norm(clf_h[x]) for x in [0,1]]
clf_h = [self.dropout(clf_h[x]) for x in [0,1]]
if (self.task_type == 'entlmnt' or self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat'):
if task_params.setdefault('concat_strategy', 'normal') == 'diff':
clf_h = torch.cat(clf_h+[torch.abs(clf_h[0]-clf_h[1]), clf_h[0]*clf_h[1]], dim=-1)
elif task_params.setdefault('concat_strategy', 'normal') == 'flipflop':
clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
else:
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.linear(clf_h) if self.linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.linear(clf_h[0]), self.linear(clf_h[1]), 2, eps=1e-12) if self.task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.linear(clf_h[0]), self.linear(clf_h[1]), dim=1, eps=1e-12)
else:
if self.do_norm: clf_h = self.norm(clf_h)
clf_h = self.dropout(clf_h)
clf_logits = self.linear(clf_h)
if self.do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.crf:
tag_seq, score = zip(*self.crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), torch.ones(*(input_ids.size()[:2])).int()))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
clf_logits = torch.zeros((*tag_seq.size(), self.num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
if (self.task_type == 'sentsim' and self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != self.task_params['ymode']): return 1 - clf_logits.view(-1, self.num_lbs)
return clf_logits.view(-1, self.num_lbs)
if self.crf:
clf_loss = -self.crf(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), mask.long())
elif self.task_type == 'mltc-clf' or self.task_type == 'entlmnt' or self.task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1))
elif self.task_type == 'mltl-clf':
loss_func = nn.BCEWithLogitsLoss(pos_weight=10*weights if weights is not None else None, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1, self.num_lbs).float())
elif self.task_type == 'sentsim':
from util import config as C
loss_cls = C.RGRSN_LOSS_MAP[self.task_params.setdefault('loss', 'contrastive')]
loss_func = loss_cls(reduction='none', x_mode=C.SIM_FUNC_MAP.setdefault(self.task_params['sentsim_func'], 'dist'), y_mode=self.task_params.setdefault('ymode', 'sim')) if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else nn.MSELoss(reduction='none')
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
return clf_loss
def _filter_vocab(self):
pass
@classmethod
def callback_update_w2v_model(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'w2v_model', model.w2v_model)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
@classmethod
def callback_update_elmo_config(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'lm_config', model.lm_config)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
class EmbeddingPool(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, pooler=None, pool_params={'kernel_size':8, 'stride':4}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingPool, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
self.maxlen = self.task_params.setdefault('maxlen', 128)
if pooler:
self.pooler = nn.MaxPool2d(**pool_params) if pooler == 'max' else nn.AvgPool2d(**pool_params)
encoder_odim = int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) if pooler == 'max' else int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1)
self.norm = C.NORM_TYPE_MAP[norm_type](encoder_odim)
self.hdim = self.dim_mulriple * encoder_odim if self.task_type in ['entlmnt', 'sentsim'] else encoder_odim
else:
self.pooler = None
self.norm = C.NORM_TYPE_MAP[norm_type](self.n_embd)
self.hdim = self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
outputs = super(EmbeddingPool, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.pooler:
clf_h = [clf_h[x].view(clf_h[x].size(0), 2*clf_h[x].size(1), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.view(clf_h.size(0), 2*clf_h.size(1), -1)
clf_h = [self.pooler(clf_h[x]).view(clf_h[x].size(0), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.pooler(clf_h).view(clf_h.size(0), -1)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingSeq2Vec(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, seq2vec=None, s2v_params={'hdim':768}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingSeq2Vec, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
if seq2vec:
params = {}
if seq2vec.startswith('pytorch-'):
pth_mdl = '-'.join(seq2vec.split('-')[1:])
_ = [params.update(x) for x in [C.SEQ2VEC_MDL_PARAMS.setdefault('pytorch', {}).setdefault(embed_type, {}), C.SEQ2VEC_TASK_PARAMS.setdefault('pytorch', {}).setdefault(self.task_type, {})]]
_ = [params.update({p:s2v_params[k]}) for k, p in C.SEQ2VEC_LM_PARAMS_MAP.setdefault('pytorch', []) if k in s2v_params]
if (embed_type == 'w2v'): params[pth_mdl]['input_size'] = self.w2v_model.syn0.shape[1]
if (embed_type == 'elmo_w2v'): params[pth_mdl]['input_size'] = params[pth_mdl]['input_size'] + self.w2v_model.syn0.shape[1]
self.seq2vec = H.gen_pytorch_wrapper('seq2vec', pth_mdl, **params[pth_mdl])
encoder_odim = C.SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [C.SEQ2VEC_MDL_PARAMS.setdefault(seq2vec, {}).setdefault(embed_type, {}), C.SEQ2VEC_TASK_PARAMS.setdefault(seq2vec, {}).setdefault(self.task_type, {})]]
_ = [params.update({p:s2v_params[k]}) for k, p in C.SEQ2VEC_LM_PARAMS_MAP.setdefault(seq2vec, []) if k in s2v_params]
if (embed_type == 'w2v'): params['embedding_dim'] = self.w2v_model.syn0.shape[1]
if (embed_type == 'elmo_w2v'): params['embedding_dim'] = params['embedding_dim'] + self.w2v_model.syn0.shape[1]
self.seq2vec = C.SEQ2VEC_MAP[seq2vec](**params)
if hasattr(self.seq2vec, 'get_output_dim') and seq2vec != 'boe':
encoder_odim = self.seq2vec.get_output_dim()
else:
encoder_odim = C.SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params])
else:
self.seq2vec = None
encoder_odim = self.n_embd
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = C.NORM_TYPE_MAP[norm_type](encoder_odim)
self.hdim = self.dim_mulriple * encoder_odim if self.task_type in ['entlmnt', 'sentsim'] else encoder_odim
self.linear = self.__init_linear__()
if (self.linear and initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
outputs = super(EmbeddingSeq2Vec, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.seq2vec:
clf_h = [self.seq2vec(clf_h[x], mask=mask[x]) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.seq2vec(clf_h, mask=mask)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingSeq2Seq(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, seq2seq=None, s2s_params={'hdim':768}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from util import config as C
super(EmbeddingSeq2Seq, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
if seq2seq:
params = {}
if seq2seq.startswith('pytorch-'):
pth_mdl = '-'.join(seq2seq.split('-')[1:])
_ = [params.update(x) for x in [C.SEQ2SEQ_MDL_PARAMS.setdefault('pytorch', {}).setdefault('elmo', {}), C.SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(self.task_type, {})]]
self.seq2seq = H.gen_pytorch_wrapper('seq2seq', pth_mdl, **params[pth_mdl])
encoder_odim = C.SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [C.SEQ2SEQ_MDL_PARAMS.setdefault(seq2seq, {}).setdefault('elmo', {}), C.SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(self.task_type, {})]]
self.seq2seq = C.SEQ2SEQ_MAP[seq2seq](**params)
if hasattr(self.seq2seq, 'get_output_dim'):
encoder_odim = self.seq2seq.get_output_dim()
else:
encoder_odim = C.SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params])
else:
self.seq2seq = None
encoder_odim = self.n_embd
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = C.NORM_TYPE_MAP[norm_type](self.maxlen)
# self.norm = nn.LayerNorm([128,2048])
self.hdim = encoder_odim
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def __init_linear__(self):
use_gpu = next(self.parameters()).is_cuda
linear = nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.num_lbs), self._out_actvtn()) if self.fchdim else nn.Sequential(nn.Linear(self.hdim, self.num_lbs), self._out_actvtn())
return linear.to('cuda') if use_gpu else linear
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
clf_h, lm_loss, mask = super(EmbeddingSeq2Seq, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
if self.seq2seq:
clf_h = self.seq2seq(clf_h, mask=mask)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class SentVecEmbeddingSeq2Vec(EmbeddingSeq2Vec):
def __init__(self, config, lm_model, lm_config, sentvec_path=None, seq2vec=None, s2v_params={'hdim':768}, embed_type='w2v_sentvec', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
import sent2vec
if type(sentvec_path) is sent2vec.Sent2vecModel:
self.sentvec_model = w2v_path
elif sentvec_path and os.path.isfile(sentvec_path):
self.sentvec_model = sent2vec.Sent2vecModel()
self.sentvec_model.load_model(sentvec_path)
else:
self.sentvec_model = None
assert(self.sentvec_model)
self.n_embd = self.sentvec_model.get_emb_size()
super(SentVecEmbeddingSeq2Vec, self).__init__(config, lm_model, lm_config, seq2vec=seq2vec, s2v_params=s2v_params, embed_type=embed_type.replace('_sentvec', ''), w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
sample_weights, entvec_tnsr, extra_inputs = (extra_inputs[0], extra_inputs[1], extra_inputs[2:]) if self.sample_weights else (None, extra_inputs[0], extra_inputs[1:])
outputs = EmbeddingClfHead.forward(self, input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.seq2vec:
clf_h = [self.seq2vec(clf_h[x], mask=mask[x]) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.seq2vec(clf_h, mask=mask)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
clf_h = [torch.cat([clf_h[x], sentvec_tnsr[x]], dim=-1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else torch.cat([clf_h, sentvec_tnsr], dim=-1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingHead(nn.Module):
def __init__(self, base_model):
super(EmbeddingHead, self).__init__()
self.base_model = dict(zip(['model'], [base_model]))
def forward(self, hidden_states, mask, labels=None): # For language model task
use_gpu = next(self.base_model['model'].parameters()).is_cuda
clf_h = hidden_states
pool_idx = mask.sum(1)
if (self.base_model['model'].task_params.setdefault('sentsim_func', None) == 'concat'):
if self.base_model['model'].task_params.setdefault('concat_strategy', 'normal') == 'diff':
clf_h = torch.cat(clf_h+[torch.abs(clf_h[0]-clf_h[1]), clf_h[0]*clf_h[1]], dim=-1)
elif self.base_model['model'].task_params.setdefault('concat_strategy', 'normal') == 'flipflop':
clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
else:
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.base_model['model'].linear(clf_h) if self.base_model['model'].linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.base_model['model'].linear(clf_h[0]), self.base_model['model'].linear(clf_h[1]), 2, eps=1e-12) if self.base_model['model'].task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.base_model['model'].linear(clf_h[0]), self.base_model['model'].linear(clf_h[1]), dim=1, eps=1e-12)
if self.base_model['model'].thrshlder: self.base_model['model'].thrshld = self.base_model['model'].thrshlder(clf_h)
if self.base_model['model'].do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.base_model['model'].crf:
tag_seq, score = zip(*self.base_model['model'].crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.base_model['model'].num_lbs), torch.ones_like(input_ids)))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
logging.debug((tag_seq.min(), tag_seq.max(), score))
clf_logits = torch.zeros((*tag_seq.size(), self.base_model['model'].num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.base_model['model'].num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
for cnstrnt in self.base_model['model'].constraints: clf_logits = cnstrnt(clf_logits)
if (self.base_model['model'].mlt_trnsfmr and self.base_model['model'].task_type in ['entlmnt', 'sentsim'] and self.base_model['model'].task_params.setdefault('sentsim_func', None) is not None and self.base_model['model'].task_params['sentsim_func'] != 'concat' and self.base_model['model'].task_params['sentsim_func'] != self.base_model['model'].task_params.setdefault('ymode', 'sim')): return 1 - clf_logits.view(-1, self.base_model['model'].num_lbs)
return clf_logits.view(-1, self.base_model['model'].num_lbs)
if self.base_model['model'].crf:
clf_loss = -self.base_model['model'].crf(clf_logits.view(input_ids.size()[0], -1, self.base_model['model'].num_lbs), pool_idx)
if sample_weights is not None: clf_loss *= sample_weights
return clf_loss, None
else:
for cnstrnt in self.base_model['model'].constraints: clf_logits = cnstrnt(clf_logits)
if self.base_model['model'].task_type == 'mltc-clf' or (self.base_model['model'].task_type == 'entlmnt' and self.base_model['model'].num_lbs > 1) or self.base_model['model'].task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.base_model['model'].num_lbs), labels.view(-1))
elif self.base_model['model'].task_type == 'mltl-clf' or (self.base_model['model'].task_type == 'entlmnt' and self.base_model['model'].num_lbs == 1):
loss_func = nn.BCEWithLogitsLoss(pos_weight=10*weights if weights is not None else None, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.base_model['model'].num_lbs), labels.view(-1, self.base_model['model'].num_lbs).float())
elif self.base_model['model'].task_type == 'sentsim':
from util import config as C
loss_cls = C.RGRSN_LOSS_MAP[self.base_model['model'].task_params.setdefault('loss', 'contrastive' if self.base_model['model'].task_params.setdefault('sentsim_func', None) and self.base_model['model'].task_params['sentsim_func'] != 'concat' else 'mse')]
loss_func = loss_cls(reduction='none', x_mode=C.SIM_FUNC_MAP.setdefault(self.base_model['model'].task_params['sentsim_func'], 'dist'), y_mode=self.base_model['model'].task_params.setdefault('ymode', 'sim')) if self.base_model['model'].task_params.setdefault('sentsim_func', None) and self.base_model['model'].task_params['sentsim_func'] != 'concat' else (loss_cls(reduction='none', x_mode='sim', y_mode=self.base_model['model'].task_params.setdefault('ymode', 'sim')) if self.base_model['model'].task_params['sentsim_func'] == 'concat' else nn.MSELoss(reduction='none'))
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
if self.base_model['model'].thrshlder:
num_lbs = labels.view(-1, self.base_model['model'].num_lbs).sum(1)
clf_loss = 0.8 * clf_loss + 0.2 * F.mse_loss(self.base_model['model'].thrshld, torch.sigmoid(torch.topk(clf_logits, k=num_lbs.max(), dim=1, sorted=True)[0][:,num_lbs-1]), reduction='mean')
if sample_weights is not None: clf_loss *= sample_weights
return clf_loss, lm_loss | modules/embedding.py |
import os, copy, logging
import torch
from torch import nn
from allennlp.modules.conditional_random_field import ConditionalRandomField
from util import func as H
from . import transformer as T
class EmbeddingClfHead(T.BaseClfHead):
def __init__(self, config, lm_model, lm_config, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from util import config as C
super(EmbeddingClfHead, self).__init__(config, lm_model, lm_config, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=config.task_type in ['entlmnt', 'sentsim'] and task_params.setdefault('sentsim_func', None) is not None, task_params=task_params, **kwargs)
self.dim_mulriple = 2 if self.task_type in ['entlmnt', 'sentsim'] and (self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat') else 1
self.embed_type = embed_type
if embed_type.startswith('w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.n_embd = self.w2v_model.syn0.shape[1] + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
elif embed_type.startswith('elmo'):
self.vocab_size = 793471
self.n_embd = lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0) # two ELMo layer * ELMo embedding dimensions
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
elif embed_type.startswith('elmo_w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.vocab_size = 793471
self.n_embd = self.w2v_model.syn0.shape[1] + lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
self.norm = C.NORM_TYPE_MAP[norm_type](self.maxlen) if self.task_type == 'nmt' else C.NORM_TYPE_MAP[norm_type](self.n_embd)
self._int_actvtn = C.ACTVTN_MAP[iactvtn]
self._out_actvtn = C.ACTVTN_MAP[oactvtn]
self.fchdim = fchdim
self.extfc = extfc
self.hdim = self.dim_mulriple * self.n_embd if self.mlt_trnsfmr and self.task_type in ['entlmnt', 'sentsim'] else self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
if self.do_extlin:
self.extlinear = nn.Linear(self.n_embd, self.n_embd)
if (initln): self.extlinear.apply(H._weights_init(mean=initln_mean, std=initln_std))
self.crf = ConditionalRandomField(num_lbs) if do_crf else None
def __init_linear__(self):
use_gpu = next(self.parameters()).is_cuda
linear = (nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.num_lbs))) if self.fchdim else (nn.Sequential(*([nn.Linear(self.hdim, self.hdim), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.hdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(self.hdim, self.num_lbs))
return linear.to('cuda') if use_gpu else linear
def __lm_head__(self):
return EmbeddingHead(self)
def _w2v(self, input_ids, use_gpu=False):
wembd_tnsr = torch.tensor([self.w2v_model.syn0[s] for s in input_ids])
if use_gpu: wembd_tnsr = wembd_tnsr.to('cuda')
return wembd_tnsr
def _sentvec(self, input_ids, use_gpu=False):
pass
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False, ret_mask=False):
use_gpu = next(self.parameters()).is_cuda
if self.sample_weights and len(extra_inputs) > 0:
sample_weights = extra_inputs[-1]
extra_inputs = extra_inputs[:-1]
else:
sample_weights = None
unsolved_input_keys, unsolved_inputs = self.embed_type.split('_'), [input_ids]+list(extra_inputs)
extra_inputs_dict = dict(zip([x for x in self.input_keys if x != 'input_ids'], extra_inputs))
pool_idx = extra_inputs_dict['mask'].sum(1)
mask = extra_inputs_dict['mask'] # mask of the original textual input
clf_hs = []
if self.task_type in ['entlmnt', 'sentsim']:
if (self.embed_type.startswith('elmo')):
embeddings = (self.lm_model(input_ids[0]), self.lm_model(input_ids[1]))
clf_hs.append((torch.cat(embeddings[0]['elmo_representations'], dim=-1), torch.cat(embeddings[1]['elmo_representations'], dim=-1)))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append([getattr(self, '_%s'%input_key)(input_tnsr[x], use_gpu=use_gpu) for x in [0,1]])
clf_h = [torch.cat(embds, dim=-1) for embds in zip(*clf_hs)]
else:
if (self.embed_type.startswith('elmo')):
embeddings = self.lm_model(input_ids)
clf_hs.append(torch.cat(embeddings['elmo_representations'], dim=-1))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append(getattr(self, '_%s'%input_key)(input_tnsr, use_gpu=use_gpu))
clf_h = torch.cat(clf_hs, dim=-1)
if labels is None:
return (clf_h, mask) if ret_mask else (clf_h,)
# Calculate language model loss
if (self.lm_loss):
lm_logits, lm_target = self.lm_logit(input_ids, clf_h, extra_inputs_dict)
lm_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
lm_loss = lm_loss_func(lm_logits.contiguous().view(-1, lm_logits.size(-1)), lm_target.contiguous().view(-1)).view(input_ids.size(0), -1)
if sample_weights is not None: lm_loss *= sample_weights
else:
lm_loss = None
return (clf_h, lm_loss, mask) if ret_mask else (clf_h, lm_loss)
def _forward(self, clf_h, mask, labels=None, weights=None): # For fine-tune task
if self.task_type in ['entlmnt', 'sentsim']:
if self.do_norm: clf_h = [self.norm(clf_h[x]) for x in [0,1]]
clf_h = [self.dropout(clf_h[x]) for x in [0,1]]
if (self.task_type == 'entlmnt' or self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat'):
if task_params.setdefault('concat_strategy', 'normal') == 'diff':
clf_h = torch.cat(clf_h+[torch.abs(clf_h[0]-clf_h[1]), clf_h[0]*clf_h[1]], dim=-1)
elif task_params.setdefault('concat_strategy', 'normal') == 'flipflop':
clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
else:
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.linear(clf_h) if self.linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.linear(clf_h[0]), self.linear(clf_h[1]), 2, eps=1e-12) if self.task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.linear(clf_h[0]), self.linear(clf_h[1]), dim=1, eps=1e-12)
else:
if self.do_norm: clf_h = self.norm(clf_h)
clf_h = self.dropout(clf_h)
clf_logits = self.linear(clf_h)
if self.do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.crf:
tag_seq, score = zip(*self.crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), torch.ones(*(input_ids.size()[:2])).int()))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
clf_logits = torch.zeros((*tag_seq.size(), self.num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
if (self.task_type == 'sentsim' and self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != self.task_params['ymode']): return 1 - clf_logits.view(-1, self.num_lbs)
return clf_logits.view(-1, self.num_lbs)
if self.crf:
clf_loss = -self.crf(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), mask.long())
elif self.task_type == 'mltc-clf' or self.task_type == 'entlmnt' or self.task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1))
elif self.task_type == 'mltl-clf':
loss_func = nn.BCEWithLogitsLoss(pos_weight=10*weights if weights is not None else None, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1, self.num_lbs).float())
elif self.task_type == 'sentsim':
from util import config as C
loss_cls = C.RGRSN_LOSS_MAP[self.task_params.setdefault('loss', 'contrastive')]
loss_func = loss_cls(reduction='none', x_mode=C.SIM_FUNC_MAP.setdefault(self.task_params['sentsim_func'], 'dist'), y_mode=self.task_params.setdefault('ymode', 'sim')) if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else nn.MSELoss(reduction='none')
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
return clf_loss
def _filter_vocab(self):
pass
@classmethod
def callback_update_w2v_model(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'w2v_model', model.w2v_model)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
@classmethod
def callback_update_elmo_config(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'lm_config', model.lm_config)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
class EmbeddingPool(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, pooler=None, pool_params={'kernel_size':8, 'stride':4}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingPool, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
self.maxlen = self.task_params.setdefault('maxlen', 128)
if pooler:
self.pooler = nn.MaxPool2d(**pool_params) if pooler == 'max' else nn.AvgPool2d(**pool_params)
encoder_odim = int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) if pooler == 'max' else int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1)
self.norm = C.NORM_TYPE_MAP[norm_type](encoder_odim)
self.hdim = self.dim_mulriple * encoder_odim if self.task_type in ['entlmnt', 'sentsim'] else encoder_odim
else:
self.pooler = None
self.norm = C.NORM_TYPE_MAP[norm_type](self.n_embd)
self.hdim = self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
outputs = super(EmbeddingPool, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.pooler:
clf_h = [clf_h[x].view(clf_h[x].size(0), 2*clf_h[x].size(1), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.view(clf_h.size(0), 2*clf_h.size(1), -1)
clf_h = [self.pooler(clf_h[x]).view(clf_h[x].size(0), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.pooler(clf_h).view(clf_h.size(0), -1)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingSeq2Vec(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, seq2vec=None, s2v_params={'hdim':768}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingSeq2Vec, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
if seq2vec:
params = {}
if seq2vec.startswith('pytorch-'):
pth_mdl = '-'.join(seq2vec.split('-')[1:])
_ = [params.update(x) for x in [C.SEQ2VEC_MDL_PARAMS.setdefault('pytorch', {}).setdefault(embed_type, {}), C.SEQ2VEC_TASK_PARAMS.setdefault('pytorch', {}).setdefault(self.task_type, {})]]
_ = [params.update({p:s2v_params[k]}) for k, p in C.SEQ2VEC_LM_PARAMS_MAP.setdefault('pytorch', []) if k in s2v_params]
if (embed_type == 'w2v'): params[pth_mdl]['input_size'] = self.w2v_model.syn0.shape[1]
if (embed_type == 'elmo_w2v'): params[pth_mdl]['input_size'] = params[pth_mdl]['input_size'] + self.w2v_model.syn0.shape[1]
self.seq2vec = H.gen_pytorch_wrapper('seq2vec', pth_mdl, **params[pth_mdl])
encoder_odim = C.SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [C.SEQ2VEC_MDL_PARAMS.setdefault(seq2vec, {}).setdefault(embed_type, {}), C.SEQ2VEC_TASK_PARAMS.setdefault(seq2vec, {}).setdefault(self.task_type, {})]]
_ = [params.update({p:s2v_params[k]}) for k, p in C.SEQ2VEC_LM_PARAMS_MAP.setdefault(seq2vec, []) if k in s2v_params]
if (embed_type == 'w2v'): params['embedding_dim'] = self.w2v_model.syn0.shape[1]
if (embed_type == 'elmo_w2v'): params['embedding_dim'] = params['embedding_dim'] + self.w2v_model.syn0.shape[1]
self.seq2vec = C.SEQ2VEC_MAP[seq2vec](**params)
if hasattr(self.seq2vec, 'get_output_dim') and seq2vec != 'boe':
encoder_odim = self.seq2vec.get_output_dim()
else:
encoder_odim = C.SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params])
else:
self.seq2vec = None
encoder_odim = self.n_embd
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = C.NORM_TYPE_MAP[norm_type](encoder_odim)
self.hdim = self.dim_mulriple * encoder_odim if self.task_type in ['entlmnt', 'sentsim'] else encoder_odim
self.linear = self.__init_linear__()
if (self.linear and initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
outputs = super(EmbeddingSeq2Vec, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.seq2vec:
clf_h = [self.seq2vec(clf_h[x], mask=mask[x]) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.seq2vec(clf_h, mask=mask)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingSeq2Seq(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, seq2seq=None, s2s_params={'hdim':768}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from util import config as C
super(EmbeddingSeq2Seq, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
if seq2seq:
params = {}
if seq2seq.startswith('pytorch-'):
pth_mdl = '-'.join(seq2seq.split('-')[1:])
_ = [params.update(x) for x in [C.SEQ2SEQ_MDL_PARAMS.setdefault('pytorch', {}).setdefault('elmo', {}), C.SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(self.task_type, {})]]
self.seq2seq = H.gen_pytorch_wrapper('seq2seq', pth_mdl, **params[pth_mdl])
encoder_odim = C.SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [C.SEQ2SEQ_MDL_PARAMS.setdefault(seq2seq, {}).setdefault('elmo', {}), C.SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(self.task_type, {})]]
self.seq2seq = C.SEQ2SEQ_MAP[seq2seq](**params)
if hasattr(self.seq2seq, 'get_output_dim'):
encoder_odim = self.seq2seq.get_output_dim()
else:
encoder_odim = C.SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params])
else:
self.seq2seq = None
encoder_odim = self.n_embd
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = C.NORM_TYPE_MAP[norm_type](self.maxlen)
# self.norm = nn.LayerNorm([128,2048])
self.hdim = encoder_odim
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def __init_linear__(self):
use_gpu = next(self.parameters()).is_cuda
linear = nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.num_lbs), self._out_actvtn()) if self.fchdim else nn.Sequential(nn.Linear(self.hdim, self.num_lbs), self._out_actvtn())
return linear.to('cuda') if use_gpu else linear
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
clf_h, lm_loss, mask = super(EmbeddingSeq2Seq, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
if self.seq2seq:
clf_h = self.seq2seq(clf_h, mask=mask)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class SentVecEmbeddingSeq2Vec(EmbeddingSeq2Vec):
def __init__(self, config, lm_model, lm_config, sentvec_path=None, seq2vec=None, s2v_params={'hdim':768}, embed_type='w2v_sentvec', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
import sent2vec
if type(sentvec_path) is sent2vec.Sent2vecModel:
self.sentvec_model = w2v_path
elif sentvec_path and os.path.isfile(sentvec_path):
self.sentvec_model = sent2vec.Sent2vecModel()
self.sentvec_model.load_model(sentvec_path)
else:
self.sentvec_model = None
assert(self.sentvec_model)
self.n_embd = self.sentvec_model.get_emb_size()
super(SentVecEmbeddingSeq2Vec, self).__init__(config, lm_model, lm_config, seq2vec=seq2vec, s2v_params=s2v_params, embed_type=embed_type.replace('_sentvec', ''), w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
sample_weights, entvec_tnsr, extra_inputs = (extra_inputs[0], extra_inputs[1], extra_inputs[2:]) if self.sample_weights else (None, extra_inputs[0], extra_inputs[1:])
outputs = EmbeddingClfHead.forward(self, input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.seq2vec:
clf_h = [self.seq2vec(clf_h[x], mask=mask[x]) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.seq2vec(clf_h, mask=mask)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
clf_h = [torch.cat([clf_h[x], sentvec_tnsr[x]], dim=-1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else torch.cat([clf_h, sentvec_tnsr], dim=-1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingHead(nn.Module):
def __init__(self, base_model):
super(EmbeddingHead, self).__init__()
self.base_model = dict(zip(['model'], [base_model]))
def forward(self, hidden_states, mask, labels=None): # For language model task
use_gpu = next(self.base_model['model'].parameters()).is_cuda
clf_h = hidden_states
pool_idx = mask.sum(1)
if (self.base_model['model'].task_params.setdefault('sentsim_func', None) == 'concat'):
if self.base_model['model'].task_params.setdefault('concat_strategy', 'normal') == 'diff':
clf_h = torch.cat(clf_h+[torch.abs(clf_h[0]-clf_h[1]), clf_h[0]*clf_h[1]], dim=-1)
elif self.base_model['model'].task_params.setdefault('concat_strategy', 'normal') == 'flipflop':
clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
else:
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.base_model['model'].linear(clf_h) if self.base_model['model'].linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.base_model['model'].linear(clf_h[0]), self.base_model['model'].linear(clf_h[1]), 2, eps=1e-12) if self.base_model['model'].task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.base_model['model'].linear(clf_h[0]), self.base_model['model'].linear(clf_h[1]), dim=1, eps=1e-12)
if self.base_model['model'].thrshlder: self.base_model['model'].thrshld = self.base_model['model'].thrshlder(clf_h)
if self.base_model['model'].do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.base_model['model'].crf:
tag_seq, score = zip(*self.base_model['model'].crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.base_model['model'].num_lbs), torch.ones_like(input_ids)))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
logging.debug((tag_seq.min(), tag_seq.max(), score))
clf_logits = torch.zeros((*tag_seq.size(), self.base_model['model'].num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.base_model['model'].num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
for cnstrnt in self.base_model['model'].constraints: clf_logits = cnstrnt(clf_logits)
if (self.base_model['model'].mlt_trnsfmr and self.base_model['model'].task_type in ['entlmnt', 'sentsim'] and self.base_model['model'].task_params.setdefault('sentsim_func', None) is not None and self.base_model['model'].task_params['sentsim_func'] != 'concat' and self.base_model['model'].task_params['sentsim_func'] != self.base_model['model'].task_params.setdefault('ymode', 'sim')): return 1 - clf_logits.view(-1, self.base_model['model'].num_lbs)
return clf_logits.view(-1, self.base_model['model'].num_lbs)
if self.base_model['model'].crf:
clf_loss = -self.base_model['model'].crf(clf_logits.view(input_ids.size()[0], -1, self.base_model['model'].num_lbs), pool_idx)
if sample_weights is not None: clf_loss *= sample_weights
return clf_loss, None
else:
for cnstrnt in self.base_model['model'].constraints: clf_logits = cnstrnt(clf_logits)
if self.base_model['model'].task_type == 'mltc-clf' or (self.base_model['model'].task_type == 'entlmnt' and self.base_model['model'].num_lbs > 1) or self.base_model['model'].task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.base_model['model'].num_lbs), labels.view(-1))
elif self.base_model['model'].task_type == 'mltl-clf' or (self.base_model['model'].task_type == 'entlmnt' and self.base_model['model'].num_lbs == 1):
loss_func = nn.BCEWithLogitsLoss(pos_weight=10*weights if weights is not None else None, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.base_model['model'].num_lbs), labels.view(-1, self.base_model['model'].num_lbs).float())
elif self.base_model['model'].task_type == 'sentsim':
from util import config as C
loss_cls = C.RGRSN_LOSS_MAP[self.base_model['model'].task_params.setdefault('loss', 'contrastive' if self.base_model['model'].task_params.setdefault('sentsim_func', None) and self.base_model['model'].task_params['sentsim_func'] != 'concat' else 'mse')]
loss_func = loss_cls(reduction='none', x_mode=C.SIM_FUNC_MAP.setdefault(self.base_model['model'].task_params['sentsim_func'], 'dist'), y_mode=self.base_model['model'].task_params.setdefault('ymode', 'sim')) if self.base_model['model'].task_params.setdefault('sentsim_func', None) and self.base_model['model'].task_params['sentsim_func'] != 'concat' else (loss_cls(reduction='none', x_mode='sim', y_mode=self.base_model['model'].task_params.setdefault('ymode', 'sim')) if self.base_model['model'].task_params['sentsim_func'] == 'concat' else nn.MSELoss(reduction='none'))
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
if self.base_model['model'].thrshlder:
num_lbs = labels.view(-1, self.base_model['model'].num_lbs).sum(1)
clf_loss = 0.8 * clf_loss + 0.2 * F.mse_loss(self.base_model['model'].thrshld, torch.sigmoid(torch.topk(clf_logits, k=num_lbs.max(), dim=1, sorted=True)[0][:,num_lbs-1]), reduction='mean')
if sample_weights is not None: clf_loss *= sample_weights
return clf_loss, lm_loss | 0.779406 | 0.209167 |
from multiprocessing import Event
import grpc
import pytest
from uuid import uuid4
from google.protobuf import json_format
from google.protobuf.empty_pb2 import Empty
from common.cryptographer import Cryptographer, hash_160
from teos.watcher import Watcher
from teos.responder import Responder
from teos.gatekeeper import UserInfo
from teos.internal_api import InternalAPI
from teos.protobuf.tower_services_pb2_grpc import TowerServicesStub
from teos.protobuf.tower_services_pb2 import GetTowerInfoResponse
from teos.protobuf.user_pb2 import (
RegisterRequest,
RegisterResponse,
GetUsersResponse,
GetUserRequest,
GetUserResponse,
GetSubscriptionInfoRequest,
)
from teos.protobuf.appointment_pb2 import (
Appointment,
AddAppointmentRequest,
AddAppointmentResponse,
GetAppointmentRequest,
GetAppointmentResponse,
GetAllAppointmentsResponse,
)
from test.teos.conftest import config
from test.teos.unit.conftest import generate_keypair, get_random_value_hex
internal_api_endpoint = "{}:{}".format(config.get("INTERNAL_API_HOST"), config.get("INTERNAL_API_PORT"))
MAX_APPOINTMENTS = 100
teos_sk, teos_pk = generate_keypair()
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
@pytest.fixture(scope="module")
def internal_api(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(
db_manager, gatekeeper, block_processor, responder, teos_sk, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE")
)
watcher.last_known_block = block_processor.get_best_block_hash()
i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event())
i_api.rpc_server.start()
yield i_api
i_api.rpc_server.stop(None)
@pytest.fixture()
def clear_state(internal_api, db_manager):
"""If added to a test, it will clear the db and all the appointments in the watcher and responder before running
the test"""
internal_api.watcher.gatekeeper.registered_users = dict()
internal_api.watcher.appointments = dict()
internal_api.watcher.responder.trackers = dict()
for key, _ in db_manager.db.iterator():
db_manager.db.delete(key)
@pytest.fixture()
def stub():
return TowerServicesStub(grpc.insecure_channel(internal_api_endpoint))
def send_appointment(stub, appointment, signature):
response = stub.add_appointment(
AddAppointmentRequest(
appointment=Appointment(
locator=appointment.locator,
encrypted_blob=appointment.encrypted_blob,
to_self_delay=appointment.to_self_delay,
),
signature=signature,
)
)
return response
def send_wrong_appointment(stub, appointment, signature):
with pytest.raises(grpc.RpcError) as e:
send_appointment(stub, appointment, signature)
return e
# METHODS ACCESSIBLE BY THE CLIENT
# The following collection of tests are of methods the client can reach and, therefore, need to be properly
# authenticated at the application level as well as check for input data correctness
def test_register(internal_api, stub):
# Normal request should work just fine
response = stub.register(RegisterRequest(user_id=user_id))
assert isinstance(response, RegisterResponse)
def test_register_wrong_user_id(internal_api, stub):
# If the user id is wrong we should get INVALID_ARGUMENT with the proper message
wrong_user_id = get_random_value_hex(32)
with pytest.raises(grpc.RpcError) as e:
stub.register(RegisterRequest(user_id=wrong_user_id))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert "Provided public key does not match expected format" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment(internal_api, stub, generate_dummy_appointment):
# Normal request should work just fine (user needs to be registered)
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
response = send_appointment(stub, appointment, appointment_signature)
assert isinstance(response, AddAppointmentResponse)
# FIXME: 194 will do with dummy appointment
def test_add_appointment_non_registered(internal_api, stub, generate_dummy_appointment):
# If the user is not registered we should get UNAUTHENTICATED + the proper message
another_user_sk, another_user_pk = generate_keypair()
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), another_user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_not_enough_slots(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED should also be get if the user does not have enough appointment slots
# Register the user and set the slots to 0
stub.register(RegisterRequest(user_id=user_id))
internal_api.watcher.gatekeeper.registered_users[user_id].available_slots = 0
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED is returned if the subscription has expired
# Register the user and set the expiry to the current block
stub.register(RegisterRequest(user_id=user_id))
internal_api.watcher.gatekeeper.registered_users[
user_id
].subscription_expiry = internal_api.watcher.block_processor.get_block_count()
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_limit_reached(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the tower appointment limit is reached RESOURCE_EXHAUSTED should be returned
monkeypatch.setattr(internal_api.watcher, "max_appointments", 0)
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.RESOURCE_EXHAUSTED
assert "Appointment limit reached" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_already_triggered(internal_api, stub, generate_dummy_appointment):
# If the appointment has already been trigger we should get ALREADY_EXISTS
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_uuid = hash_160("{}{}".format(appointment.locator, user_id))
# Adding the uuid to the Responder trackers so the Watcher thinks it is in there. The data does not actually matters
internal_api.watcher.responder.trackers[appointment_uuid] = {}
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.ALREADY_EXISTS
assert "The provided appointment has already been triggered" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_get_appointment(internal_api, stub, generate_dummy_appointment):
# Requests should work provided the user is registered and the appointment exists for him
stub.register(RegisterRequest(user_id=user_id))
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert isinstance(response, GetAppointmentResponse)
# FIXME: 194 will do with dummy appointment
def test_get_appointment_non_registered(internal_api, stub, generate_dummy_appointment):
# If the user is not registered or the appointment does not belong to him the response should be NOT_FOUND
stub.register(RegisterRequest(user_id=user_id))
another_user_sk, another_user_pk = generate_keypair()
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), another_user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
# Notice how the request will succeed if `user` (user_id) requests it
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert isinstance(response, GetAppointmentResponse)
def test_get_appointment_non_existent(internal_api, stub):
# Non-existing appointment will also return NOT_FOUND
stub.register(RegisterRequest(user_id=user_id))
# Request it back
locator = get_random_value_hex(16)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_get_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED is returned if the subscription has expired
stub.register(RegisterRequest(user_id=user_id))
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Modify the user data so the subscription has already ended
expiry = internal_api.watcher.block_processor.get_block_count() - internal_api.watcher.gatekeeper.expiry_delta - 1
internal_api.watcher.gatekeeper.registered_users[user_id].subscription_expiry = expiry
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
def test_get_subscription_info(internal_api, stub):
stub.register(RegisterRequest(user_id=user_id))
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert isinstance(response, GetUserResponse)
def test_get_subscription_info_non_registered(internal_api, stub):
# Now let's try sending an invalid signature with the correct user key, but the wrong message signed.
message = "wrong message"
wrong_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=wrong_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "User not found. Have you registered?" in e.value.details()
def test_get_subscription_info_expired(internal_api, stub):
stub.register(RegisterRequest(user_id=user_id))
# Modify the user data so the subscription has already ended
expiry = internal_api.watcher.block_processor.get_block_count() - internal_api.watcher.gatekeeper.expiry_delta - 1
internal_api.watcher.gatekeeper.registered_users[user_id].subscription_expiry = expiry
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
# METHODS ACCESSIBLE BY THE CLI
# The following collection of tests are for methods the CLI can reach and, therefore, have a softer security model than
# the previous set. Notice the currently there is not even authentication for the CLI (FIXME)
def test_get_all_appointments(clear_state, internal_api, stub):
response = stub.get_all_appointments(Empty())
assert isinstance(response, GetAllAppointmentsResponse)
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 0 and len(appointments.get("responder_trackers")) == 0
# FIXME: 194 will do with dummy appointment
def test_get_all_appointments_watcher(clear_state, internal_api, generate_dummy_appointment, stub):
# Data is pulled straight from the database, so we need to feed some
appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
internal_api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 1 and len(appointments.get("responder_trackers")) == 0
assert dict(appointments.get("watcher_appointments")[uuid]) == appointment.to_dict()
# FIXME: 194 will do with dummy tracker
def test_get_all_appointments_responder(clear_state, internal_api, generate_dummy_tracker, stub):
# Data is pulled straight from the database, so we need to feed some
tracker = generate_dummy_tracker()
uuid = uuid4().hex
internal_api.watcher.db_manager.store_responder_tracker(uuid, tracker.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 0 and len(appointments.get("responder_trackers")) == 1
assert dict(appointments.get("responder_trackers")[uuid]) == tracker.to_dict()
# FIXME: 194 will do with dummy appointments and trackers
def test_get_all_appointments_both(clear_state, internal_api, generate_dummy_appointment, generate_dummy_tracker, stub):
# Data is pulled straight from the database, so we need to feed some
appointment, _ = generate_dummy_appointment()
uuid_appointment = uuid4().hex
internal_api.watcher.db_manager.store_watcher_appointment(uuid_appointment, appointment.to_dict())
tracker = generate_dummy_tracker()
uuid_tracker = uuid4().hex
internal_api.watcher.db_manager.store_responder_tracker(uuid_tracker, tracker.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 1 and len(appointments.get("responder_trackers")) == 1
assert dict(appointments.get("watcher_appointments")[uuid_appointment]) == appointment.to_dict()
assert dict(appointments.get("responder_trackers")[uuid_tracker]) == tracker.to_dict()
def test_get_tower_info_empty(clear_state, internal_api, stub):
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == Cryptographer.get_compressed_pk(teos_pk)
assert response.n_registered_users == 0
assert response.n_watcher_appointments == 0
assert response.n_responder_trackers == 0
def test_get_tower_info(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher.gatekeeper, "registered_users", {"uid1": {}})
monkeypatch.setattr(
internal_api.watcher,
"appointments",
{
"uid1": {"locator": "locator1", "user_id": "user_id1"},
"uid2": {"locator": "locator2", "user_id": "user_id2"},
},
)
monkeypatch.setattr(
internal_api.watcher.responder,
"trackers",
{
"uid1": {"penalty_txid": "txid1", "locator": "locator1", "user_id": "user_id1"},
"uid2": {"penalty_txid": "txid2", "locator": "locator2", "user_id": "user_id2"},
"uid3": {"penalty_txid": "txid3", "locator": "locator2", "user_id": "user_id3"},
},
)
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == Cryptographer.get_compressed_pk(internal_api.watcher.signing_key.public_key)
assert response.n_registered_users == 1
assert response.n_watcher_appointments == 2
assert response.n_responder_trackers == 3
def test_get_users(internal_api, stub, monkeypatch):
# it doesn't matter they are not valid user ids for the test
mock_users = ["user1", "user2", "user3"]
monkeypatch.setattr(
internal_api.watcher.gatekeeper, "registered_users", {"user1": dict(), "user2": dict(), "user3": dict()}
)
response = stub.get_users(Empty())
assert isinstance(response, GetUsersResponse)
assert response.user_ids == mock_users
def test_get_user(internal_api, stub, monkeypatch):
# it doesn't matter they are not valid user ids and user data object for this test
mock_user_id = "02c73bad28b78dd7e3bcad609d330e0d60b97fa0e08ca1cf486cb6cab8dd6140ac"
mock_available_slots = 100
mock_subscription_expiry = 1234
mock_user_info = UserInfo(mock_available_slots, mock_subscription_expiry)
def mock_get_user_info(user_id):
if user_id == mock_user_id:
return mock_user_info
else:
raise RuntimeError(f"called with an unexpected user_id: {user_id}")
monkeypatch.setattr(internal_api.watcher, "get_user_info", mock_get_user_info)
response = stub.get_user(GetUserRequest(user_id=mock_user_id))
assert isinstance(response, GetUserResponse)
# FIXME: numbers are currently returned as floats, even if they are integers
assert json_format.MessageToDict(response.user) == {
"appointments": [],
"available_slots": float(mock_available_slots),
"subscription_expiry": float(mock_subscription_expiry),
}
def test_get_user_not_found(internal_api, stub):
mock_user_id = "some_non_existing_user_id"
with pytest.raises(grpc.RpcError) as e:
stub.get_user(GetUserRequest(user_id=mock_user_id))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "User not found" in e.value.details()
def test_stop(internal_api, stub):
stub.stop(Empty())
assert internal_api.stop_command_event.is_set() | test/teos/unit/test_internal_api.py | from multiprocessing import Event
import grpc
import pytest
from uuid import uuid4
from google.protobuf import json_format
from google.protobuf.empty_pb2 import Empty
from common.cryptographer import Cryptographer, hash_160
from teos.watcher import Watcher
from teos.responder import Responder
from teos.gatekeeper import UserInfo
from teos.internal_api import InternalAPI
from teos.protobuf.tower_services_pb2_grpc import TowerServicesStub
from teos.protobuf.tower_services_pb2 import GetTowerInfoResponse
from teos.protobuf.user_pb2 import (
RegisterRequest,
RegisterResponse,
GetUsersResponse,
GetUserRequest,
GetUserResponse,
GetSubscriptionInfoRequest,
)
from teos.protobuf.appointment_pb2 import (
Appointment,
AddAppointmentRequest,
AddAppointmentResponse,
GetAppointmentRequest,
GetAppointmentResponse,
GetAllAppointmentsResponse,
)
from test.teos.conftest import config
from test.teos.unit.conftest import generate_keypair, get_random_value_hex
internal_api_endpoint = "{}:{}".format(config.get("INTERNAL_API_HOST"), config.get("INTERNAL_API_PORT"))
MAX_APPOINTMENTS = 100
teos_sk, teos_pk = generate_keypair()
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
@pytest.fixture(scope="module")
def internal_api(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(
db_manager, gatekeeper, block_processor, responder, teos_sk, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE")
)
watcher.last_known_block = block_processor.get_best_block_hash()
i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event())
i_api.rpc_server.start()
yield i_api
i_api.rpc_server.stop(None)
@pytest.fixture()
def clear_state(internal_api, db_manager):
"""If added to a test, it will clear the db and all the appointments in the watcher and responder before running
the test"""
internal_api.watcher.gatekeeper.registered_users = dict()
internal_api.watcher.appointments = dict()
internal_api.watcher.responder.trackers = dict()
for key, _ in db_manager.db.iterator():
db_manager.db.delete(key)
@pytest.fixture()
def stub():
return TowerServicesStub(grpc.insecure_channel(internal_api_endpoint))
def send_appointment(stub, appointment, signature):
response = stub.add_appointment(
AddAppointmentRequest(
appointment=Appointment(
locator=appointment.locator,
encrypted_blob=appointment.encrypted_blob,
to_self_delay=appointment.to_self_delay,
),
signature=signature,
)
)
return response
def send_wrong_appointment(stub, appointment, signature):
with pytest.raises(grpc.RpcError) as e:
send_appointment(stub, appointment, signature)
return e
# METHODS ACCESSIBLE BY THE CLIENT
# The following collection of tests are of methods the client can reach and, therefore, need to be properly
# authenticated at the application level as well as check for input data correctness
def test_register(internal_api, stub):
# Normal request should work just fine
response = stub.register(RegisterRequest(user_id=user_id))
assert isinstance(response, RegisterResponse)
def test_register_wrong_user_id(internal_api, stub):
# If the user id is wrong we should get INVALID_ARGUMENT with the proper message
wrong_user_id = get_random_value_hex(32)
with pytest.raises(grpc.RpcError) as e:
stub.register(RegisterRequest(user_id=wrong_user_id))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert "Provided public key does not match expected format" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment(internal_api, stub, generate_dummy_appointment):
# Normal request should work just fine (user needs to be registered)
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
response = send_appointment(stub, appointment, appointment_signature)
assert isinstance(response, AddAppointmentResponse)
# FIXME: 194 will do with dummy appointment
def test_add_appointment_non_registered(internal_api, stub, generate_dummy_appointment):
# If the user is not registered we should get UNAUTHENTICATED + the proper message
another_user_sk, another_user_pk = generate_keypair()
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), another_user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_not_enough_slots(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED should also be get if the user does not have enough appointment slots
# Register the user and set the slots to 0
stub.register(RegisterRequest(user_id=user_id))
internal_api.watcher.gatekeeper.registered_users[user_id].available_slots = 0
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED is returned if the subscription has expired
# Register the user and set the expiry to the current block
stub.register(RegisterRequest(user_id=user_id))
internal_api.watcher.gatekeeper.registered_users[
user_id
].subscription_expiry = internal_api.watcher.block_processor.get_block_count()
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_limit_reached(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the tower appointment limit is reached RESOURCE_EXHAUSTED should be returned
monkeypatch.setattr(internal_api.watcher, "max_appointments", 0)
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.RESOURCE_EXHAUSTED
assert "Appointment limit reached" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_add_appointment_already_triggered(internal_api, stub, generate_dummy_appointment):
# If the appointment has already been trigger we should get ALREADY_EXISTS
stub.register(RegisterRequest(user_id=user_id))
appointment, _ = generate_dummy_appointment()
appointment_uuid = hash_160("{}{}".format(appointment.locator, user_id))
# Adding the uuid to the Responder trackers so the Watcher thinks it is in there. The data does not actually matters
internal_api.watcher.responder.trackers[appointment_uuid] = {}
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.ALREADY_EXISTS
assert "The provided appointment has already been triggered" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_get_appointment(internal_api, stub, generate_dummy_appointment):
# Requests should work provided the user is registered and the appointment exists for him
stub.register(RegisterRequest(user_id=user_id))
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert isinstance(response, GetAppointmentResponse)
# FIXME: 194 will do with dummy appointment
def test_get_appointment_non_registered(internal_api, stub, generate_dummy_appointment):
# If the user is not registered or the appointment does not belong to him the response should be NOT_FOUND
stub.register(RegisterRequest(user_id=user_id))
another_user_sk, another_user_pk = generate_keypair()
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), another_user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
# Notice how the request will succeed if `user` (user_id) requests it
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert isinstance(response, GetAppointmentResponse)
def test_get_appointment_non_existent(internal_api, stub):
# Non-existing appointment will also return NOT_FOUND
stub.register(RegisterRequest(user_id=user_id))
# Request it back
locator = get_random_value_hex(16)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
# FIXME: 194 will do with dummy appointment
def test_get_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment):
# UNAUTHENTICATED is returned if the subscription has expired
stub.register(RegisterRequest(user_id=user_id))
# Send the appointment first
appointment, _ = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
send_appointment(stub, appointment, appointment_signature)
# Modify the user data so the subscription has already ended
expiry = internal_api.watcher.block_processor.get_block_count() - internal_api.watcher.gatekeeper.expiry_delta - 1
internal_api.watcher.gatekeeper.registered_users[user_id].subscription_expiry = expiry
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
def test_get_subscription_info(internal_api, stub):
stub.register(RegisterRequest(user_id=user_id))
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert isinstance(response, GetUserResponse)
def test_get_subscription_info_non_registered(internal_api, stub):
# Now let's try sending an invalid signature with the correct user key, but the wrong message signed.
message = "wrong message"
wrong_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=wrong_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "User not found. Have you registered?" in e.value.details()
def test_get_subscription_info_expired(internal_api, stub):
stub.register(RegisterRequest(user_id=user_id))
# Modify the user data so the subscription has already ended
expiry = internal_api.watcher.block_processor.get_block_count() - internal_api.watcher.gatekeeper.expiry_delta - 1
internal_api.watcher.gatekeeper.registered_users[user_id].subscription_expiry = expiry
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
# METHODS ACCESSIBLE BY THE CLI
# The following collection of tests are for methods the CLI can reach and, therefore, have a softer security model than
# the previous set. Notice the currently there is not even authentication for the CLI (FIXME)
def test_get_all_appointments(clear_state, internal_api, stub):
response = stub.get_all_appointments(Empty())
assert isinstance(response, GetAllAppointmentsResponse)
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 0 and len(appointments.get("responder_trackers")) == 0
# FIXME: 194 will do with dummy appointment
def test_get_all_appointments_watcher(clear_state, internal_api, generate_dummy_appointment, stub):
# Data is pulled straight from the database, so we need to feed some
appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
internal_api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 1 and len(appointments.get("responder_trackers")) == 0
assert dict(appointments.get("watcher_appointments")[uuid]) == appointment.to_dict()
# FIXME: 194 will do with dummy tracker
def test_get_all_appointments_responder(clear_state, internal_api, generate_dummy_tracker, stub):
# Data is pulled straight from the database, so we need to feed some
tracker = generate_dummy_tracker()
uuid = uuid4().hex
internal_api.watcher.db_manager.store_responder_tracker(uuid, tracker.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 0 and len(appointments.get("responder_trackers")) == 1
assert dict(appointments.get("responder_trackers")[uuid]) == tracker.to_dict()
# FIXME: 194 will do with dummy appointments and trackers
def test_get_all_appointments_both(clear_state, internal_api, generate_dummy_appointment, generate_dummy_tracker, stub):
# Data is pulled straight from the database, so we need to feed some
appointment, _ = generate_dummy_appointment()
uuid_appointment = uuid4().hex
internal_api.watcher.db_manager.store_watcher_appointment(uuid_appointment, appointment.to_dict())
tracker = generate_dummy_tracker()
uuid_tracker = uuid4().hex
internal_api.watcher.db_manager.store_responder_tracker(uuid_tracker, tracker.to_dict())
response = stub.get_all_appointments(Empty())
appointments = dict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 1 and len(appointments.get("responder_trackers")) == 1
assert dict(appointments.get("watcher_appointments")[uuid_appointment]) == appointment.to_dict()
assert dict(appointments.get("responder_trackers")[uuid_tracker]) == tracker.to_dict()
def test_get_tower_info_empty(clear_state, internal_api, stub):
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == Cryptographer.get_compressed_pk(teos_pk)
assert response.n_registered_users == 0
assert response.n_watcher_appointments == 0
assert response.n_responder_trackers == 0
def test_get_tower_info(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher.gatekeeper, "registered_users", {"uid1": {}})
monkeypatch.setattr(
internal_api.watcher,
"appointments",
{
"uid1": {"locator": "locator1", "user_id": "user_id1"},
"uid2": {"locator": "locator2", "user_id": "user_id2"},
},
)
monkeypatch.setattr(
internal_api.watcher.responder,
"trackers",
{
"uid1": {"penalty_txid": "txid1", "locator": "locator1", "user_id": "user_id1"},
"uid2": {"penalty_txid": "txid2", "locator": "locator2", "user_id": "user_id2"},
"uid3": {"penalty_txid": "txid3", "locator": "locator2", "user_id": "user_id3"},
},
)
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == Cryptographer.get_compressed_pk(internal_api.watcher.signing_key.public_key)
assert response.n_registered_users == 1
assert response.n_watcher_appointments == 2
assert response.n_responder_trackers == 3
def test_get_users(internal_api, stub, monkeypatch):
# it doesn't matter they are not valid user ids for the test
mock_users = ["user1", "user2", "user3"]
monkeypatch.setattr(
internal_api.watcher.gatekeeper, "registered_users", {"user1": dict(), "user2": dict(), "user3": dict()}
)
response = stub.get_users(Empty())
assert isinstance(response, GetUsersResponse)
assert response.user_ids == mock_users
def test_get_user(internal_api, stub, monkeypatch):
# it doesn't matter they are not valid user ids and user data object for this test
mock_user_id = "02c73bad28b78dd7e3bcad609d330e0d60b97fa0e08ca1cf486cb6cab8dd6140ac"
mock_available_slots = 100
mock_subscription_expiry = 1234
mock_user_info = UserInfo(mock_available_slots, mock_subscription_expiry)
def mock_get_user_info(user_id):
if user_id == mock_user_id:
return mock_user_info
else:
raise RuntimeError(f"called with an unexpected user_id: {user_id}")
monkeypatch.setattr(internal_api.watcher, "get_user_info", mock_get_user_info)
response = stub.get_user(GetUserRequest(user_id=mock_user_id))
assert isinstance(response, GetUserResponse)
# FIXME: numbers are currently returned as floats, even if they are integers
assert json_format.MessageToDict(response.user) == {
"appointments": [],
"available_slots": float(mock_available_slots),
"subscription_expiry": float(mock_subscription_expiry),
}
def test_get_user_not_found(internal_api, stub):
mock_user_id = "some_non_existing_user_id"
with pytest.raises(grpc.RpcError) as e:
stub.get_user(GetUserRequest(user_id=mock_user_id))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "User not found" in e.value.details()
def test_stop(internal_api, stub):
stub.stop(Empty())
assert internal_api.stop_command_event.is_set() | 0.396419 | 0.11427 |
import math
import zipfile
import os
import xml.etree.ElementTree as ElementTree
import copy
import urllib.request
import shutil
import tempfile
from or_datasets import Bunch
from typing import List, Tuple, Optional
def fetch_vrp_rep(name: str, instance: str = None, return_raw=True) -> Bunch:
"""
Fetches data sets from [VRP-REP](http://www.vrp-rep.org).
Usage for getting a VRPTW instance is:
```python
bunch = fetch_vrp_rep(
"solomon-1987-r1", instance="R101_025"
)
name, n, E, c, d, Q, t, a, b, x, y = bunch["instance"]
```
Parameters:
name: String identifier of the dataset. Can contain multiple instances
instance: String identifier of the instance. If `None` the entire set is
returned.
return_raw: If `True` returns the raw data as a tuple
Returns:
Network information.
"""
# http://www.vrp-rep.org/datasets/download/solomon-1987-c1.zip
filename = os.path.join(tempfile.gettempdir(), f"{name}.zip")
if not os.path.exists(filename):
url = f"http://www.vrp-rep.org/datasets/download/{name}.zip"
headers = {"Accept": "application/xml"}
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
with open(filename, "wb") as out_file:
shutil.copyfileobj(response, out_file)
zf = zipfile.ZipFile(filename, "r")
trees = []
for instancefile in zf.namelist():
if not instancefile.endswith(".xml"):
continue
if instance:
if instancefile == f"{instance}.xml":
with zf.open(instancefile) as f:
trees.append(ElementTree.parse(f))
break
else:
with zf.open(instancefile) as f:
trees.append(ElementTree.parse(f))
bunch = Bunch(data=[], instance=None, DESCR="VRPTW")
for tree in trees:
root = tree.getroot()
instanceName: Optional[str] = _get_name(root)
node_list = _get_node_list(root)
n: int = len(node_list)
# edges, distance, time
m, c, t, x, y = _get_distance(n, node_list)
# vehicle profile
fleet = root.find("fleet")
Q, T = _get_vehicle_profile(fleet)
# requests
requests = root.find("requests")
d, a, b = _get_requests(requests, n, m, t)
# set tw for duplicate depot node
a[n - 1] = a[0]
b[n - 1] = T
if return_raw:
data = (instanceName, n, m, c, d, Q, t, a, b, x, y)
else:
# TODO
# generate model based on data
# milp = mip.Model()
# mapping = Mapping()
# graphs: List[igraph.Graph] = []
# data = Model(milp, mapping, graphs)
pass
bunch["data"].append(data)
if instance:
bunch["instance"] = data
return bunch
def _get_name(root: ElementTree.Element) -> Optional[str]:
info = root.find("info")
if info:
name = info.find("name")
if name is not None and name.text:
return name.text
else:
raise KeyError("no 'name' element")
else:
raise KeyError("no 'info' element")
return None
num = 27
useNumer = False
def _get_node_list(root: ElementTree.Element):
network = root.find("network")
if network:
nodes = network.find("nodes")
if nodes:
node_list = nodes.findall("node")
else:
raise KeyError("no 'nodes' element")
else:
raise KeyError("no 'network' element")
if useNumer:
node_list = node_list[:num]
# duplicate depot node
end_node = copy.deepcopy(node_list[0])
end_node.set("id", str(len(node_list)))
node_list.append(end_node)
return node_list
def _get_distance(n, nodes: List[ElementTree.Element]):
x: List[int] = [0] * n
y: List[int] = [0] * n
m: List[Tuple[int, int]] = []
c: List[float] = []
t: List[float] = []
# calculate distance
for node in nodes:
id_attr = node.get("id")
if id_attr:
i = int(id_attr)
else:
raise KeyError("no 'id' attribute in 'node' element")
cx = node.find("cx")
if cx is not None and cx.text:
x[i] = int(float(cx.text))
else:
raise KeyError("no 'cx' element")
cy = node.find("cy")
if cy is not None and cy.text:
y[i] = int(float(cy.text))
else:
raise KeyError("no 'cy' element")
for i in range(n):
for j in range(n):
if j <= i:
continue
value = (
int(math.sqrt(math.pow(x[i] - x[j], 2) + math.pow(y[i] - y[j], 2)) * 10)
/ 10
)
if i != n - 1 and j != 0 and not (i == 0 and j == n - 1):
c.append(value)
t.append(value)
m.append((i, j))
if j != n - 1 and i != 0:
c.append(value)
t.append(value)
m.append((j, i))
return m, c, t, x, y
def _get_vehicle_profile(fleet: Optional[ElementTree.Element]):
if fleet:
vehicle = fleet.find("vehicle_profile")
else:
raise KeyError("no 'vehicle_profile' element")
if vehicle:
# capacity
capacity = vehicle.find("capacity")
if capacity is not None and capacity.text:
Q = int(float(capacity.text))
else:
raise KeyError("no 'capacity' element")
# time limit
max_travel_time = vehicle.find("max_travel_time")
if max_travel_time is not None and max_travel_time.text:
t_limit = int(float(max_travel_time.text))
T = t_limit
else:
raise KeyError("no 'max_travel_time' element")
return Q, T
def _get_requests(
requests: Optional[ElementTree.Element],
n: int,
m: List[Tuple[int, int]],
t: List[float],
):
d: List[int] = [0] * n
a: List[int] = [0] * n
b: List[int] = [0] * n
if requests:
request_list = requests.findall("request")
if useNumer:
request_list = request_list[: num - 1]
for request in request_list:
id_attr = request.get("id")
if id_attr:
i = int(id_attr)
else:
raise KeyError("no 'id' attribute in 'request' element")
# demand
quantity = request.find("quantity")
if quantity is not None and quantity.text:
d[i] = int(float(quantity.text))
else:
raise KeyError("no 'quantity' element")
# time windows
tw = request.find("tw")
_get_tw(tw, i, a, b)
service_time = request.find("service_time")
_get_service_time(service_time, t, i, m)
else:
raise KeyError("no 'requests' element")
return d, a, b
def _get_tw(tw, i, a, b):
if tw is not None:
start = tw.find("start")
if start is not None and start.text:
a[i] = int(start.text)
else:
raise KeyError("no 'start' element")
end = tw.find("end")
if end is not None and end.text:
b[i] = int(end.text)
else:
raise KeyError("no 'end' element")
else:
raise KeyError("no 'tw' element")
def _get_service_time(service_time, t, i, m):
if service_time is not None and service_time.text:
s: int = int(float(service_time.text))
else:
raise KeyError("no 'service_time' element")
for j, e in enumerate(m):
if e[0] == i:
t[j] += s | or_datasets/vrp_rep.py | import math
import zipfile
import os
import xml.etree.ElementTree as ElementTree
import copy
import urllib.request
import shutil
import tempfile
from or_datasets import Bunch
from typing import List, Tuple, Optional
def fetch_vrp_rep(name: str, instance: str = None, return_raw=True) -> Bunch:
"""
Fetches data sets from [VRP-REP](http://www.vrp-rep.org).
Usage for getting a VRPTW instance is:
```python
bunch = fetch_vrp_rep(
"solomon-1987-r1", instance="R101_025"
)
name, n, E, c, d, Q, t, a, b, x, y = bunch["instance"]
```
Parameters:
name: String identifier of the dataset. Can contain multiple instances
instance: String identifier of the instance. If `None` the entire set is
returned.
return_raw: If `True` returns the raw data as a tuple
Returns:
Network information.
"""
# http://www.vrp-rep.org/datasets/download/solomon-1987-c1.zip
filename = os.path.join(tempfile.gettempdir(), f"{name}.zip")
if not os.path.exists(filename):
url = f"http://www.vrp-rep.org/datasets/download/{name}.zip"
headers = {"Accept": "application/xml"}
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req) as response:
with open(filename, "wb") as out_file:
shutil.copyfileobj(response, out_file)
zf = zipfile.ZipFile(filename, "r")
trees = []
for instancefile in zf.namelist():
if not instancefile.endswith(".xml"):
continue
if instance:
if instancefile == f"{instance}.xml":
with zf.open(instancefile) as f:
trees.append(ElementTree.parse(f))
break
else:
with zf.open(instancefile) as f:
trees.append(ElementTree.parse(f))
bunch = Bunch(data=[], instance=None, DESCR="VRPTW")
for tree in trees:
root = tree.getroot()
instanceName: Optional[str] = _get_name(root)
node_list = _get_node_list(root)
n: int = len(node_list)
# edges, distance, time
m, c, t, x, y = _get_distance(n, node_list)
# vehicle profile
fleet = root.find("fleet")
Q, T = _get_vehicle_profile(fleet)
# requests
requests = root.find("requests")
d, a, b = _get_requests(requests, n, m, t)
# set tw for duplicate depot node
a[n - 1] = a[0]
b[n - 1] = T
if return_raw:
data = (instanceName, n, m, c, d, Q, t, a, b, x, y)
else:
# TODO
# generate model based on data
# milp = mip.Model()
# mapping = Mapping()
# graphs: List[igraph.Graph] = []
# data = Model(milp, mapping, graphs)
pass
bunch["data"].append(data)
if instance:
bunch["instance"] = data
return bunch
def _get_name(root: ElementTree.Element) -> Optional[str]:
info = root.find("info")
if info:
name = info.find("name")
if name is not None and name.text:
return name.text
else:
raise KeyError("no 'name' element")
else:
raise KeyError("no 'info' element")
return None
num = 27
useNumer = False
def _get_node_list(root: ElementTree.Element):
network = root.find("network")
if network:
nodes = network.find("nodes")
if nodes:
node_list = nodes.findall("node")
else:
raise KeyError("no 'nodes' element")
else:
raise KeyError("no 'network' element")
if useNumer:
node_list = node_list[:num]
# duplicate depot node
end_node = copy.deepcopy(node_list[0])
end_node.set("id", str(len(node_list)))
node_list.append(end_node)
return node_list
def _get_distance(n, nodes: List[ElementTree.Element]):
x: List[int] = [0] * n
y: List[int] = [0] * n
m: List[Tuple[int, int]] = []
c: List[float] = []
t: List[float] = []
# calculate distance
for node in nodes:
id_attr = node.get("id")
if id_attr:
i = int(id_attr)
else:
raise KeyError("no 'id' attribute in 'node' element")
cx = node.find("cx")
if cx is not None and cx.text:
x[i] = int(float(cx.text))
else:
raise KeyError("no 'cx' element")
cy = node.find("cy")
if cy is not None and cy.text:
y[i] = int(float(cy.text))
else:
raise KeyError("no 'cy' element")
for i in range(n):
for j in range(n):
if j <= i:
continue
value = (
int(math.sqrt(math.pow(x[i] - x[j], 2) + math.pow(y[i] - y[j], 2)) * 10)
/ 10
)
if i != n - 1 and j != 0 and not (i == 0 and j == n - 1):
c.append(value)
t.append(value)
m.append((i, j))
if j != n - 1 and i != 0:
c.append(value)
t.append(value)
m.append((j, i))
return m, c, t, x, y
def _get_vehicle_profile(fleet: Optional[ElementTree.Element]):
if fleet:
vehicle = fleet.find("vehicle_profile")
else:
raise KeyError("no 'vehicle_profile' element")
if vehicle:
# capacity
capacity = vehicle.find("capacity")
if capacity is not None and capacity.text:
Q = int(float(capacity.text))
else:
raise KeyError("no 'capacity' element")
# time limit
max_travel_time = vehicle.find("max_travel_time")
if max_travel_time is not None and max_travel_time.text:
t_limit = int(float(max_travel_time.text))
T = t_limit
else:
raise KeyError("no 'max_travel_time' element")
return Q, T
def _get_requests(
requests: Optional[ElementTree.Element],
n: int,
m: List[Tuple[int, int]],
t: List[float],
):
d: List[int] = [0] * n
a: List[int] = [0] * n
b: List[int] = [0] * n
if requests:
request_list = requests.findall("request")
if useNumer:
request_list = request_list[: num - 1]
for request in request_list:
id_attr = request.get("id")
if id_attr:
i = int(id_attr)
else:
raise KeyError("no 'id' attribute in 'request' element")
# demand
quantity = request.find("quantity")
if quantity is not None and quantity.text:
d[i] = int(float(quantity.text))
else:
raise KeyError("no 'quantity' element")
# time windows
tw = request.find("tw")
_get_tw(tw, i, a, b)
service_time = request.find("service_time")
_get_service_time(service_time, t, i, m)
else:
raise KeyError("no 'requests' element")
return d, a, b
def _get_tw(tw, i, a, b):
if tw is not None:
start = tw.find("start")
if start is not None and start.text:
a[i] = int(start.text)
else:
raise KeyError("no 'start' element")
end = tw.find("end")
if end is not None and end.text:
b[i] = int(end.text)
else:
raise KeyError("no 'end' element")
else:
raise KeyError("no 'tw' element")
def _get_service_time(service_time, t, i, m):
if service_time is not None and service_time.text:
s: int = int(float(service_time.text))
else:
raise KeyError("no 'service_time' element")
for j, e in enumerate(m):
if e[0] == i:
t[j] += s | 0.648689 | 0.69022 |
import datetime, threading, time
from abc import abstractmethod
from typing import Mapping
from sqlalchemy import MetaData, Table, Column, Integer, String, ForeignKey
from sqlalchemy.orm import mapper, relationship, reconstructor
from cassiopeia.dto.common import DtoObject
metadata = MetaData()
class SQLBaseObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, "_relationships") and key in self._relationships:
# Create a new Object for that relation, so sqlalchemy knows how to handle it
clazz = self._relationships[key][0]
if type(value) is list:
setattr(self, key, [clazz(**v) for v in value])
else:
setattr(self, key, clazz(**value))
elif hasattr(self, "_constants") and key in self._constants:
# Create constant object for sqlalchemy
setattr(self, key + "Id", Constant.create(value).id)
else:
setattr(self, key, value)
@reconstructor
def init_on_load(self):
if hasattr(self, "_constants"):
for constant in self._constants:
setattr(self, constant, Constant.create(None, getattr(self, constant + "Id")).value)
def to_dto(self):
map = {}
for column in self._table.columns:
map[column.name] = getattr(self, column.name)
# Go over relationships and convert them to a dto recursively
if hasattr(self, "_relationships"):
for rel in self._relationships:
value = getattr(self, rel)
if isinstance(value, list):
map[rel] = [v.to_dto() for v in value]
elif hasattr(value, "to_dto"):
map[rel] = value.to_dto()
else:
map[rel] = value
if hasattr(self, "_constants"):
for constant in self._constants:
value = getattr(self, constant)
if value:
map[constant] = value
del map[constant + "Id"]
else:
map[constant] = None
del map[constant + "Id"]
return self._dto_type(map)
def has_expired(self, expirations: Mapping[type, float]) -> bool:
if hasattr(self, "lastUpdate"):
expire_seconds = expirations.get(self._dto_type, -1)
if expire_seconds > 0:
now = datetime.datetime.now().timestamp()
return now > (self.lastUpdate if self.lastUpdate else 0) + expire_seconds
return False
def updated(self):
if hasattr(self, "lastUpdate"):
self.lastUpdate = datetime.datetime.now().timestamp()
@classmethod
def _create_properties(cls):
prop = {}
if hasattr(cls, '_relationships'):
for key, value in cls._relationships.items():
if not "lazy" in value[1]:
value[1]["lazy"] = "joined"
prop[key] = relationship(value[0], cascade="all, delete-orphan", **value[1])
if hasattr(cls, '_constants'):
for key in cls._constants:
column_name = key + "Id"
if not column_name in cls._table.c:
cls._table.append_column(Column(column_name, Integer))
return prop
@classmethod
def expire(cls, session, expirations: Mapping[type, float]):
if "lastUpdate" in cls._table.columns:
expire_seconds = expirations.get(cls._dto_type, -1)
now = datetime.datetime.now().timestamp()
session.query(cls).filter(cls.lastUpdate < now - expire_seconds).delete()
session.commit()
@abstractmethod
def _table(self):
pass
@abstractmethod
def _dto_type(self):
pass
sql_classes = set()
def map_object(cls):
# Add cls to set so they can be called to expire later on
sql_classes.add(cls)
properties = cls._create_properties()
if not properties:
mapper(cls, cls._table)
else:
mapper(cls, cls._table, properties=properties)
class ConstantDto(DtoObject):
pass
class Constant:
_session = None
_lock = threading.Lock()
_cache_by_value = {}
_cache_by_id = {}
@classmethod
def create(cls, value=None, id=None):
with cls._lock:
if value == "" and not id:
raise ValueError("Either value or id must be provided")
elif value and id:
return cls(value, id)
elif value:
if value in cls._cache_by_value:
return cls(value, cls._cache_by_value[value])
else:
session = cls._session()
const = session.query(SQLConstant).filter_by(value=value).first()
if not const:
const = SQLConstant(value)
session.add(const)
session.commit()
cls._cache_by_value[value] = const.id
cls._cache_by_id[const.id] = value
return cls(const.value, const.id)
elif id:
if id in cls._cache_by_id:
return cls(cls._cache_by_id[id], id)
else:
session = cls._session()
const = session.query(SQLConstant).filter_by(id=id).first()
cls._cache_by_value[const.value] = const.id
cls._cache_by_id[const.id] = const.value
return cls(const.value, const.id)
else:
# The constant is None return it with id -1
return cls(value, -1)
def __init__(self, value, id):
self.value = value
self.id = id
def to_dto(self):
return self.value
class SQLConstant(SQLBaseObject):
_dto_type = ConstantDto
_table = Table("constant", metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("value", String(30), unique=True))
def __init__(self, constant, id=None):
setattr(self, "value", constant)
setattr(self, "id", id)
def to_dto(self):
return getattr(self, "value")
map_object(SQLConstant) | cassiopeia-sqlstore/cassiopeia_sqlstore/common.py | import datetime, threading, time
from abc import abstractmethod
from typing import Mapping
from sqlalchemy import MetaData, Table, Column, Integer, String, ForeignKey
from sqlalchemy.orm import mapper, relationship, reconstructor
from cassiopeia.dto.common import DtoObject
metadata = MetaData()
class SQLBaseObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, "_relationships") and key in self._relationships:
# Create a new Object for that relation, so sqlalchemy knows how to handle it
clazz = self._relationships[key][0]
if type(value) is list:
setattr(self, key, [clazz(**v) for v in value])
else:
setattr(self, key, clazz(**value))
elif hasattr(self, "_constants") and key in self._constants:
# Create constant object for sqlalchemy
setattr(self, key + "Id", Constant.create(value).id)
else:
setattr(self, key, value)
@reconstructor
def init_on_load(self):
if hasattr(self, "_constants"):
for constant in self._constants:
setattr(self, constant, Constant.create(None, getattr(self, constant + "Id")).value)
def to_dto(self):
map = {}
for column in self._table.columns:
map[column.name] = getattr(self, column.name)
# Go over relationships and convert them to a dto recursively
if hasattr(self, "_relationships"):
for rel in self._relationships:
value = getattr(self, rel)
if isinstance(value, list):
map[rel] = [v.to_dto() for v in value]
elif hasattr(value, "to_dto"):
map[rel] = value.to_dto()
else:
map[rel] = value
if hasattr(self, "_constants"):
for constant in self._constants:
value = getattr(self, constant)
if value:
map[constant] = value
del map[constant + "Id"]
else:
map[constant] = None
del map[constant + "Id"]
return self._dto_type(map)
def has_expired(self, expirations: Mapping[type, float]) -> bool:
if hasattr(self, "lastUpdate"):
expire_seconds = expirations.get(self._dto_type, -1)
if expire_seconds > 0:
now = datetime.datetime.now().timestamp()
return now > (self.lastUpdate if self.lastUpdate else 0) + expire_seconds
return False
def updated(self):
if hasattr(self, "lastUpdate"):
self.lastUpdate = datetime.datetime.now().timestamp()
@classmethod
def _create_properties(cls):
prop = {}
if hasattr(cls, '_relationships'):
for key, value in cls._relationships.items():
if not "lazy" in value[1]:
value[1]["lazy"] = "joined"
prop[key] = relationship(value[0], cascade="all, delete-orphan", **value[1])
if hasattr(cls, '_constants'):
for key in cls._constants:
column_name = key + "Id"
if not column_name in cls._table.c:
cls._table.append_column(Column(column_name, Integer))
return prop
@classmethod
def expire(cls, session, expirations: Mapping[type, float]):
if "lastUpdate" in cls._table.columns:
expire_seconds = expirations.get(cls._dto_type, -1)
now = datetime.datetime.now().timestamp()
session.query(cls).filter(cls.lastUpdate < now - expire_seconds).delete()
session.commit()
@abstractmethod
def _table(self):
pass
@abstractmethod
def _dto_type(self):
pass
sql_classes = set()
def map_object(cls):
# Add cls to set so they can be called to expire later on
sql_classes.add(cls)
properties = cls._create_properties()
if not properties:
mapper(cls, cls._table)
else:
mapper(cls, cls._table, properties=properties)
class ConstantDto(DtoObject):
pass
class Constant:
_session = None
_lock = threading.Lock()
_cache_by_value = {}
_cache_by_id = {}
@classmethod
def create(cls, value=None, id=None):
with cls._lock:
if value == "" and not id:
raise ValueError("Either value or id must be provided")
elif value and id:
return cls(value, id)
elif value:
if value in cls._cache_by_value:
return cls(value, cls._cache_by_value[value])
else:
session = cls._session()
const = session.query(SQLConstant).filter_by(value=value).first()
if not const:
const = SQLConstant(value)
session.add(const)
session.commit()
cls._cache_by_value[value] = const.id
cls._cache_by_id[const.id] = value
return cls(const.value, const.id)
elif id:
if id in cls._cache_by_id:
return cls(cls._cache_by_id[id], id)
else:
session = cls._session()
const = session.query(SQLConstant).filter_by(id=id).first()
cls._cache_by_value[const.value] = const.id
cls._cache_by_id[const.id] = const.value
return cls(const.value, const.id)
else:
# The constant is None return it with id -1
return cls(value, -1)
def __init__(self, value, id):
self.value = value
self.id = id
def to_dto(self):
return self.value
class SQLConstant(SQLBaseObject):
_dto_type = ConstantDto
_table = Table("constant", metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("value", String(30), unique=True))
def __init__(self, constant, id=None):
setattr(self, "value", constant)
setattr(self, "id", id)
def to_dto(self):
return getattr(self, "value")
map_object(SQLConstant) | 0.751739 | 0.224608 |
import logging
import os
import subprocess
from pip.basecommand import Command
from pip.commands.show import search_packages_info
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
import sys
class ViewCommand(Command):
"""
Views the package source directory with the editor defined in
$EDITOR.
"""
name = 'view'
usage = """
%prog <package>"""
summary = 'View installed package in the editor'
def __init__(self, *args, **kw):
super(ViewCommand, self).__init__(*args, **kw)
def run(self, options, args):
if not args:
sys.stdout.write('ERROR: Please provide a package name or names.\n')
return ERROR
if not os.getenv('EDITOR'):
sys.stdout.write(
'ERROR: Please set $EDITOR to open the package.\n')
return ERROR
query = args
shell_command = os.getenv('EDITOR').split()
results = list(search_packages_info(query))
installed = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
if len(results) is 0:
sys.stdout.write("ERROR: Could not find package(s).\n")
return ERROR
for dist in results:
pkg = installed[dist['name'].lower()]
names = list(pkg.get_metadata_lines('top_level.txt'))
for i in range(len(names)):
fullpath = os.path.join(dist['location'], names[i])
if os.path.isdir(fullpath):
names[i] = fullpath
elif os.path.isfile(fullpath + '.py'):
names[i] = fullpath + '.py'
elif os.path.isfile(fullpath + '.so'):
names[i] = fullpath + '.so'
elif os.path.isfile(fullpath + '.dll'):
names[i] = fullpath + '.dll'
elif os.path.isfile(fullpath + '.pyd'):
names[i] = fullpath + '.pyd'
else:
return ERROR
status_code = subprocess.call(shell_command + names)
if status_code is not SUCCESS:
return ERROR
return SUCCESS
def main():
args = sys.argv
args.pop(0)
view_cmd = ViewCommand()
view_cmd.run({}, sys.argv) | pipview/view.py | import logging
import os
import subprocess
from pip.basecommand import Command
from pip.commands.show import search_packages_info
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
import sys
class ViewCommand(Command):
"""
Views the package source directory with the editor defined in
$EDITOR.
"""
name = 'view'
usage = """
%prog <package>"""
summary = 'View installed package in the editor'
def __init__(self, *args, **kw):
super(ViewCommand, self).__init__(*args, **kw)
def run(self, options, args):
if not args:
sys.stdout.write('ERROR: Please provide a package name or names.\n')
return ERROR
if not os.getenv('EDITOR'):
sys.stdout.write(
'ERROR: Please set $EDITOR to open the package.\n')
return ERROR
query = args
shell_command = os.getenv('EDITOR').split()
results = list(search_packages_info(query))
installed = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
if len(results) is 0:
sys.stdout.write("ERROR: Could not find package(s).\n")
return ERROR
for dist in results:
pkg = installed[dist['name'].lower()]
names = list(pkg.get_metadata_lines('top_level.txt'))
for i in range(len(names)):
fullpath = os.path.join(dist['location'], names[i])
if os.path.isdir(fullpath):
names[i] = fullpath
elif os.path.isfile(fullpath + '.py'):
names[i] = fullpath + '.py'
elif os.path.isfile(fullpath + '.so'):
names[i] = fullpath + '.so'
elif os.path.isfile(fullpath + '.dll'):
names[i] = fullpath + '.dll'
elif os.path.isfile(fullpath + '.pyd'):
names[i] = fullpath + '.pyd'
else:
return ERROR
status_code = subprocess.call(shell_command + names)
if status_code is not SUCCESS:
return ERROR
return SUCCESS
def main():
args = sys.argv
args.pop(0)
view_cmd = ViewCommand()
view_cmd.run({}, sys.argv) | 0.234933 | 0.067087 |
import torch
from kaolin.metrics import tetmesh
class TestTetMeshMetrics:
def test_tetrahedron_volume(self):
tetrahedrons = torch.tensor([[[[0.5000, 0.5000, 0.4500],
[0.4500, 0.5000, 0.5000],
[0.4750, 0.4500, 0.4500],
[0.5000, 0.5000, 0.5000]]]])
assert torch.allclose(tetmesh.tetrahedron_volume(tetrahedrons), torch.tensor([[-2.0833e-05]]))
def test_amips(self):
tetrahedrons = torch.tensor([[[
[1.7000, 2.3000, 4.4500],
[3.4800, 0.2000, 5.3000],
[4.9000, 9.4500, 6.4500],
[6.2000, 8.5000, 7.1000]],
[[-1.3750, 1.4500, 3.2500],
[4.9000, 1.8000, 2.7000],
[3.6000, 1.9000, 2.3000],
[1.5500, 1.3500, 2.9000]]],
[[[1.7000, 2.3000, 4.4500],
[3.4800, 0.2000, 5.3000],
[4.9000, 9.4500, 6.4500],
[6.2000, 8.5000, 7.1000]],
[[-1.3750, 1.4500, 3.2500],
[4.9000, 1.8000, 2.7000],
[3.6000, 1.9000, 2.3000],
[1.5500, 1.3500, 2.9000]]]])
inverse_offset_matrix = torch.tensor([[[[-1.1561, -1.1512, -1.9049],
[1.5138, 1.0108, 3.4302],
[1.6538, 1.0346, 4.2223]],
[[2.9020, -1.0995, -1.8744],
[1.1554, 1.1519, 1.7780],
[-0.0766, 1.6350, 1.1064]]],
[[[-0.9969, 1.4321, -0.3075],
[-1.3414, 1.5795, -1.6571],
[-0.1775, -0.4349, 1.1772]],
[[-1.1077, -1.2441, 1.8037],
[-0.5722, 0.1755, -2.4364],
[-0.5263, 1.5765, 1.5607]]]])
torch.allclose(tetmesh.amips(tetrahedrons, inverse_offset_matrix), torch.tensor([[13042.3408], [2376.2517]]))
def test_equivolume(self):
tetrahedrons = torch.tensor([[[[0.5000, 0.5000, 0.7500],
[0.4500, 0.8000, 0.6000],
[0.4750, 0.4500, 0.2500],
[0.5000, 0.3000, 0.3000]],
[[0.4750, 0.4500, 0.2500],
[0.5000, 0.9000, 0.3000],
[0.4500, 0.4000, 0.9000],
[0.4500, 0.4500, 0.7000]]],
[[[0.7000, 0.3000, 0.4500],
[0.4800, 0.2000, 0.3000],
[0.9000, 0.4500, 0.4500],
[0.2000, 0.5000, 0.1000]],
[[0.3750, 0.4500, 0.2500],
[0.9000, 0.8000, 0.7000],
[0.6000, 0.9000, 0.3000],
[0.5500, 0.3500, 0.9000]]]])
assert torch.allclose(tetmesh.equivolume(tetrahedrons, pow=4), torch.tensor([[2.2898e-15], [2.9661e-10]])) | tests/python/kaolin/metrics/test_tetmesh.py |
import torch
from kaolin.metrics import tetmesh
class TestTetMeshMetrics:
def test_tetrahedron_volume(self):
tetrahedrons = torch.tensor([[[[0.5000, 0.5000, 0.4500],
[0.4500, 0.5000, 0.5000],
[0.4750, 0.4500, 0.4500],
[0.5000, 0.5000, 0.5000]]]])
assert torch.allclose(tetmesh.tetrahedron_volume(tetrahedrons), torch.tensor([[-2.0833e-05]]))
def test_amips(self):
tetrahedrons = torch.tensor([[[
[1.7000, 2.3000, 4.4500],
[3.4800, 0.2000, 5.3000],
[4.9000, 9.4500, 6.4500],
[6.2000, 8.5000, 7.1000]],
[[-1.3750, 1.4500, 3.2500],
[4.9000, 1.8000, 2.7000],
[3.6000, 1.9000, 2.3000],
[1.5500, 1.3500, 2.9000]]],
[[[1.7000, 2.3000, 4.4500],
[3.4800, 0.2000, 5.3000],
[4.9000, 9.4500, 6.4500],
[6.2000, 8.5000, 7.1000]],
[[-1.3750, 1.4500, 3.2500],
[4.9000, 1.8000, 2.7000],
[3.6000, 1.9000, 2.3000],
[1.5500, 1.3500, 2.9000]]]])
inverse_offset_matrix = torch.tensor([[[[-1.1561, -1.1512, -1.9049],
[1.5138, 1.0108, 3.4302],
[1.6538, 1.0346, 4.2223]],
[[2.9020, -1.0995, -1.8744],
[1.1554, 1.1519, 1.7780],
[-0.0766, 1.6350, 1.1064]]],
[[[-0.9969, 1.4321, -0.3075],
[-1.3414, 1.5795, -1.6571],
[-0.1775, -0.4349, 1.1772]],
[[-1.1077, -1.2441, 1.8037],
[-0.5722, 0.1755, -2.4364],
[-0.5263, 1.5765, 1.5607]]]])
torch.allclose(tetmesh.amips(tetrahedrons, inverse_offset_matrix), torch.tensor([[13042.3408], [2376.2517]]))
def test_equivolume(self):
tetrahedrons = torch.tensor([[[[0.5000, 0.5000, 0.7500],
[0.4500, 0.8000, 0.6000],
[0.4750, 0.4500, 0.2500],
[0.5000, 0.3000, 0.3000]],
[[0.4750, 0.4500, 0.2500],
[0.5000, 0.9000, 0.3000],
[0.4500, 0.4000, 0.9000],
[0.4500, 0.4500, 0.7000]]],
[[[0.7000, 0.3000, 0.4500],
[0.4800, 0.2000, 0.3000],
[0.9000, 0.4500, 0.4500],
[0.2000, 0.5000, 0.1000]],
[[0.3750, 0.4500, 0.2500],
[0.9000, 0.8000, 0.7000],
[0.6000, 0.9000, 0.3000],
[0.5500, 0.3500, 0.9000]]]])
assert torch.allclose(tetmesh.equivolume(tetrahedrons, pow=4), torch.tensor([[2.2898e-15], [2.9661e-10]])) | 0.652906 | 0.680877 |
import os
import requests
from django import forms
from utilities.exceptions import CloudBoltException
from resourcehandlers.forms import (
BaseResourceHandlerCredentialsForm, BaseResourceHandlerSettingsForm,
)
from .models import RhevResourceHandler
from infrastructure.models import Environment
from ovirtsdk4 import Connection, AuthError, ConnectionError, Error
class RhevCredentialsForm(BaseResourceHandlerCredentialsForm):
class Meta(BaseResourceHandlerCredentialsForm.Meta):
model = RhevResourceHandler
fields = ('protocol',) + BaseResourceHandlerCredentialsForm.Meta.fields
protocol = forms.ChoiceField(
label='Protocol',
choices=(('https', 'HTTPS'), ('http', 'HTTP'),),
required=True,
)
def clean(self):
super(RhevCredentialsForm, self).clean()
ip = self.cleaned_data.get('ip')
protocol = self.cleaned_data.get('protocol')
port = self.cleaned_data.get('port')
serviceaccount = self.cleaned_data.get('serviceaccount')
servicepasswd = self.cleaned_data.get('servicepasswd')
# NOTE: If either of these is not set, the form will display
# errors, because they have "required" set to True.
if serviceaccount and servicepasswd:
api_url = RhevResourceHandler.get_api_url(protocol, ip, port)
cert_filename = RhevResourceHandler.get_cert_filename(ip, port)
# Locate the cert file
if not os.path.exists(cert_filename):
raise forms.ValidationError("CA certificate for "
"{0} does not exist. ({1})".format(ip,
cert_filename))
try:
Connection(
url=api_url,
username=serviceaccount,
password=<PASSWORD>,
ca_file=cert_filename)
except (Error, ConnectionError,
AuthError):
raise forms.ValidationError("Unable to connect to RHEV-M with"
" the information provided.")
return self.cleaned_data
class RhevSettingsForm(BaseResourceHandlerSettingsForm):
class Meta(BaseResourceHandlerSettingsForm.Meta):
model = RhevResourceHandler
fields = (BaseResourceHandlerSettingsForm.Meta.fields
+ ("clusterName",))
clusterName = forms.CharField(label="Cluster name")
environments = forms.ModelMultipleChoiceField(
queryset=Environment.objects.exclude(name="Unassigned"),
required=False,
)
def __init__(self, *args, **kwargs):
rh = kwargs.get("instance")
super(RhevSettingsForm, self).__init__(*args, **kwargs)
if rh:
self.fields["environments"].initial = rh.environment_set.all()
def save(self, *args, **kwargs):
new_envs = self.cleaned_data["environments"]
rh = super(RhevSettingsForm, self).save()
rh.environment_set = new_envs
return rh
class RhevQuickSetupSettingsForm(RhevSettingsForm):
class Meta(RhevSettingsForm.Meta):
model = RhevResourceHandler
exclude = ('custom_fields', ) | forms.py | import os
import requests
from django import forms
from utilities.exceptions import CloudBoltException
from resourcehandlers.forms import (
BaseResourceHandlerCredentialsForm, BaseResourceHandlerSettingsForm,
)
from .models import RhevResourceHandler
from infrastructure.models import Environment
from ovirtsdk4 import Connection, AuthError, ConnectionError, Error
class RhevCredentialsForm(BaseResourceHandlerCredentialsForm):
class Meta(BaseResourceHandlerCredentialsForm.Meta):
model = RhevResourceHandler
fields = ('protocol',) + BaseResourceHandlerCredentialsForm.Meta.fields
protocol = forms.ChoiceField(
label='Protocol',
choices=(('https', 'HTTPS'), ('http', 'HTTP'),),
required=True,
)
def clean(self):
super(RhevCredentialsForm, self).clean()
ip = self.cleaned_data.get('ip')
protocol = self.cleaned_data.get('protocol')
port = self.cleaned_data.get('port')
serviceaccount = self.cleaned_data.get('serviceaccount')
servicepasswd = self.cleaned_data.get('servicepasswd')
# NOTE: If either of these is not set, the form will display
# errors, because they have "required" set to True.
if serviceaccount and servicepasswd:
api_url = RhevResourceHandler.get_api_url(protocol, ip, port)
cert_filename = RhevResourceHandler.get_cert_filename(ip, port)
# Locate the cert file
if not os.path.exists(cert_filename):
raise forms.ValidationError("CA certificate for "
"{0} does not exist. ({1})".format(ip,
cert_filename))
try:
Connection(
url=api_url,
username=serviceaccount,
password=<PASSWORD>,
ca_file=cert_filename)
except (Error, ConnectionError,
AuthError):
raise forms.ValidationError("Unable to connect to RHEV-M with"
" the information provided.")
return self.cleaned_data
class RhevSettingsForm(BaseResourceHandlerSettingsForm):
class Meta(BaseResourceHandlerSettingsForm.Meta):
model = RhevResourceHandler
fields = (BaseResourceHandlerSettingsForm.Meta.fields
+ ("clusterName",))
clusterName = forms.CharField(label="Cluster name")
environments = forms.ModelMultipleChoiceField(
queryset=Environment.objects.exclude(name="Unassigned"),
required=False,
)
def __init__(self, *args, **kwargs):
rh = kwargs.get("instance")
super(RhevSettingsForm, self).__init__(*args, **kwargs)
if rh:
self.fields["environments"].initial = rh.environment_set.all()
def save(self, *args, **kwargs):
new_envs = self.cleaned_data["environments"]
rh = super(RhevSettingsForm, self).save()
rh.environment_set = new_envs
return rh
class RhevQuickSetupSettingsForm(RhevSettingsForm):
class Meta(RhevSettingsForm.Meta):
model = RhevResourceHandler
exclude = ('custom_fields', ) | 0.560493 | 0.043937 |
__all__ = ['DirectScrolledWindowFrame']
from panda3d.core import *
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectScrolledFrame import DirectScrolledFrame
class DirectScrolledWindowFrame(DirectScrolledFrame):
"""
A moveable window with a scrolled content frame
"""
def __init__(self, parent = None, **kw):
optiondefs = (
# Define type of DirectGuiWidget
# The height of the area to drag the widget around
('dragAreaHeight', 0.1, None),
('resortOnDrag', True, None),
('showClose', True, None),
('closeButtonPosition', 'Right', None),
('closeButtonScale', 0.05, None)
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize superclasses
DirectScrolledFrame.__init__(self, parent)
# Call option initialization functions
self.initialiseoptions(DirectScrolledWindowFrame)
self.dragDropTask = None
b = self.bounds
c = self.createcomponent(
'dragFrame', (), 'dragFrame',
DirectFrame,
# set the parent of the frame to this class
(self,),
state=DGG.NORMAL,
suppressMouse=True,
frameColor=(0.5,0.5,0.5,1),
relief=1,
pos=(0,0,b[3]),
# set the size
frameSize=(b[0],b[1],0, self['dragAreaHeight']))
c.bind(DGG.B1PRESS, self.dragStart)
c.bind(DGG.B1RELEASE, self.dragStop)
scale = self['closeButtonScale']
pos = (0,0,self['dragAreaHeight']*0.5)
if self['closeButtonPosition'] == 'Right':
pos = (b[1]-scale*0.5,0,self['dragAreaHeight']*0.5)
elif self['closeButtonPosition'] == 'Left':
pos = (b[0]+scale*0.5,0,self['dragAreaHeight']*0.5)
closeBtn = self.createcomponent(
'closeButton', (), 'closeButton',
DirectButton,
(c,),
text='x',
scale=scale,
pos=pos,
command=self.destroy)
def dragStart(self, event):
"""
Start dragging the window around
"""
if self.dragDropTask is not None:
# remove any existing tasks
taskMgr.remove(self.dragDropTask)
if self['resortOnDrag']:
self.reparentTo(self.parent, 0)
# get the windows position as seen from render2d
vWidget2render2d = self.getPos(render2d)
# get the mouse position as seen from render2d
vMouse2render2d = Point3(event.getMouse()[0], 0, event.getMouse()[1])
# calculate the vector between the mosue and the window
editVec = Vec3(vWidget2render2d - vMouse2render2d)
# create the task and store the values in it, so we can use it in there
self.dragDropTask = taskMgr.add(self.dragTask, self.taskName("dragDropTask"))
self.dragDropTask.editVec = editVec
self.dragDropTask.mouseVec = vMouse2render2d
def dragTask(self, t):
"""
Calculate the new window position ever frame
"""
# chec if we have the mouse
mwn = base.mouseWatcherNode
if mwn.hasMouse():
# get the mouse position
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
# calculate the new position using the mouse position and the start
# vector of the window
newPos = vMouse2render2d + t.editVec
# Now set the new windows new position
self.setPos(render2d, newPos)
return t.cont
def dragStop(self, event):
"""
Stop dragging the window around
"""
# kill the drag and drop task
taskMgr.remove(self.dragDropTask) | DirectGuiExtension/DirectScrolledWindowFrame.py | __all__ = ['DirectScrolledWindowFrame']
from panda3d.core import *
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectScrolledFrame import DirectScrolledFrame
class DirectScrolledWindowFrame(DirectScrolledFrame):
"""
A moveable window with a scrolled content frame
"""
def __init__(self, parent = None, **kw):
optiondefs = (
# Define type of DirectGuiWidget
# The height of the area to drag the widget around
('dragAreaHeight', 0.1, None),
('resortOnDrag', True, None),
('showClose', True, None),
('closeButtonPosition', 'Right', None),
('closeButtonScale', 0.05, None)
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize superclasses
DirectScrolledFrame.__init__(self, parent)
# Call option initialization functions
self.initialiseoptions(DirectScrolledWindowFrame)
self.dragDropTask = None
b = self.bounds
c = self.createcomponent(
'dragFrame', (), 'dragFrame',
DirectFrame,
# set the parent of the frame to this class
(self,),
state=DGG.NORMAL,
suppressMouse=True,
frameColor=(0.5,0.5,0.5,1),
relief=1,
pos=(0,0,b[3]),
# set the size
frameSize=(b[0],b[1],0, self['dragAreaHeight']))
c.bind(DGG.B1PRESS, self.dragStart)
c.bind(DGG.B1RELEASE, self.dragStop)
scale = self['closeButtonScale']
pos = (0,0,self['dragAreaHeight']*0.5)
if self['closeButtonPosition'] == 'Right':
pos = (b[1]-scale*0.5,0,self['dragAreaHeight']*0.5)
elif self['closeButtonPosition'] == 'Left':
pos = (b[0]+scale*0.5,0,self['dragAreaHeight']*0.5)
closeBtn = self.createcomponent(
'closeButton', (), 'closeButton',
DirectButton,
(c,),
text='x',
scale=scale,
pos=pos,
command=self.destroy)
def dragStart(self, event):
"""
Start dragging the window around
"""
if self.dragDropTask is not None:
# remove any existing tasks
taskMgr.remove(self.dragDropTask)
if self['resortOnDrag']:
self.reparentTo(self.parent, 0)
# get the windows position as seen from render2d
vWidget2render2d = self.getPos(render2d)
# get the mouse position as seen from render2d
vMouse2render2d = Point3(event.getMouse()[0], 0, event.getMouse()[1])
# calculate the vector between the mosue and the window
editVec = Vec3(vWidget2render2d - vMouse2render2d)
# create the task and store the values in it, so we can use it in there
self.dragDropTask = taskMgr.add(self.dragTask, self.taskName("dragDropTask"))
self.dragDropTask.editVec = editVec
self.dragDropTask.mouseVec = vMouse2render2d
def dragTask(self, t):
"""
Calculate the new window position ever frame
"""
# chec if we have the mouse
mwn = base.mouseWatcherNode
if mwn.hasMouse():
# get the mouse position
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
# calculate the new position using the mouse position and the start
# vector of the window
newPos = vMouse2render2d + t.editVec
# Now set the new windows new position
self.setPos(render2d, newPos)
return t.cont
def dragStop(self, event):
"""
Stop dragging the window around
"""
# kill the drag and drop task
taskMgr.remove(self.dragDropTask) | 0.565299 | 0.296158 |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.SetKey import SetKey
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_behaviors.action_turn_sm import action_turnSM
from sara_flexbe_states.SetRosParam import SetRosParam
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jun 1 2018
@author: <NAME>
'''
class Action_countSM(Behavior):
'''
Count instances of entity class around sara (will only rotate, won't move).
'''
def __init__(self):
super(Action_countSM, self).__init__()
self.name = 'Action_count'
# parameters of this behavior
# references to used behaviors
self.add_behavior(action_turnSM, 'action_turn')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:475 y:412, x:73 y:374
_state_machine = OperatableStateMachine(outcomes=['done', 'failed'], input_keys=['className'], output_keys=['Count'])
_state_machine.userdata.className = "bottle"
_state_machine.userdata.Count = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:756 y:397
_sm_move_head_0 = OperatableStateMachine(outcomes=['finished'], input_keys=['className', 'Count'], output_keys=['Count'])
with _sm_move_head_0:
# x:19 y:95
OperatableStateMachine.add('set left',
SaraSetHeadAngle(pitch=-0.6, yaw=1.2),
transitions={'done': 'wait1'},
autonomy={'done': Autonomy.Off})
# x:5 y:229
OperatableStateMachine.add('count',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add', 'none_found': 'add'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:10 y:326
OperatableStateMachine.add('add',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:241 y:88
OperatableStateMachine.add('set center',
SaraSetHeadAngle(pitch=-0.6, yaw=0),
transitions={'done': 'wait 2'},
autonomy={'done': Autonomy.Off})
# x:266 y:154
OperatableStateMachine.add('wait 2',
WaitState(wait_time=10),
transitions={'done': 'count2'},
autonomy={'done': Autonomy.Off})
# x:245 y:224
OperatableStateMachine.add('count2',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add2', 'none_found': 'add2'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:252 y:321
OperatableStateMachine.add('add2',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'geb text 2'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:24 y:162
OperatableStateMachine.add('wait1',
WaitState(wait_time=12),
transitions={'done': 'count'},
autonomy={'done': Autonomy.Off})
# x:445 y:90
OperatableStateMachine.add('set right',
SaraSetHeadAngle(pitch=-0.6, yaw=-1.2),
transitions={'done': 'wait 3'},
autonomy={'done': Autonomy.Off})
# x:464 y:164
OperatableStateMachine.add('wait 3',
WaitState(wait_time=10),
transitions={'done': 'count3'},
autonomy={'done': Autonomy.Off})
# x:443 y:237
OperatableStateMachine.add('count3',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add3', 'none_found': 'add3'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:457 y:334
OperatableStateMachine.add('add3',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text3'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:30 y:412
OperatableStateMachine.add('gen text',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'say_1'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:253 y:392
OperatableStateMachine.add('geb text 2',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'sara_2'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:461 y:405
OperatableStateMachine.add('gen text3',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'Say_3'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:53 y:492
OperatableStateMachine.add('say_1',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set center'},
autonomy={'done': Autonomy.Off})
# x:264 y:471
OperatableStateMachine.add('sara_2',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set right'},
autonomy={'done': Autonomy.Off})
# x:486 y:485
OperatableStateMachine.add('Say_3',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:55 y:34
OperatableStateMachine.add('init count',
SetKey(Value=0),
transitions={'done': 'set angle'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Count'})
# x:444 y:326
OperatableStateMachine.add('Log Count',
LogKeyState(text="Found: {} objects", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'Count'})
# x:40 y:183
OperatableStateMachine.add('Move head',
_sm_move_head_0,
transitions={'finished': 'for 1'},
autonomy={'finished': Autonomy.Inherit},
remapping={'className': 'className', 'Count': 'Count'})
# x:419 y:254
OperatableStateMachine.add('Look Center Found',
SaraSetHeadAngle(pitch=-0.4, yaw=0),
transitions={'done': 'Log Count'},
autonomy={'done': Autonomy.Off})
# x:234 y:227
OperatableStateMachine.add('for 1',
ForLoop(repeat=0),
transitions={'do': 'action_turn', 'end': 'Log Count'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:38 y:275
OperatableStateMachine.add('action_turn',
self.use_behavior(action_turnSM, 'action_turn'),
transitions={'finished': 'Move head', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'rotation': 'rotation'})
# x:56 y:102
OperatableStateMachine.add('set angle',
SetKey(Value=3.14159),
transitions={'done': 'Move head'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'rotation'})
# x:417 y:37
OperatableStateMachine.add('store count',
SetRosParam(ParamName="behavior/Count/CountedObjets"),
transitions={'done': 'concat'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Count'})
# x:400 y:114
OperatableStateMachine.add('concat',
FlexibleCalculationState(calculation=lambda x: "I counted "+str(x[0])+" "+str(x[1])+".", input_keys=["Count", "className"]),
transitions={'done': 'say_count'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'className': 'className', 'output_value': 'Text'})
# x:419 y:186
OperatableStateMachine.add('say_count',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=1, block=True),
transitions={'done': 'Look Center Found'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_count_sm.py |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.SetKey import SetKey
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_behaviors.action_turn_sm import action_turnSM
from sara_flexbe_states.SetRosParam import SetRosParam
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jun 1 2018
@author: <NAME>
'''
class Action_countSM(Behavior):
'''
Count instances of entity class around sara (will only rotate, won't move).
'''
def __init__(self):
super(Action_countSM, self).__init__()
self.name = 'Action_count'
# parameters of this behavior
# references to used behaviors
self.add_behavior(action_turnSM, 'action_turn')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:475 y:412, x:73 y:374
_state_machine = OperatableStateMachine(outcomes=['done', 'failed'], input_keys=['className'], output_keys=['Count'])
_state_machine.userdata.className = "bottle"
_state_machine.userdata.Count = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:756 y:397
_sm_move_head_0 = OperatableStateMachine(outcomes=['finished'], input_keys=['className', 'Count'], output_keys=['Count'])
with _sm_move_head_0:
# x:19 y:95
OperatableStateMachine.add('set left',
SaraSetHeadAngle(pitch=-0.6, yaw=1.2),
transitions={'done': 'wait1'},
autonomy={'done': Autonomy.Off})
# x:5 y:229
OperatableStateMachine.add('count',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add', 'none_found': 'add'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:10 y:326
OperatableStateMachine.add('add',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:241 y:88
OperatableStateMachine.add('set center',
SaraSetHeadAngle(pitch=-0.6, yaw=0),
transitions={'done': 'wait 2'},
autonomy={'done': Autonomy.Off})
# x:266 y:154
OperatableStateMachine.add('wait 2',
WaitState(wait_time=10),
transitions={'done': 'count2'},
autonomy={'done': Autonomy.Off})
# x:245 y:224
OperatableStateMachine.add('count2',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add2', 'none_found': 'add2'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:252 y:321
OperatableStateMachine.add('add2',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'geb text 2'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:24 y:162
OperatableStateMachine.add('wait1',
WaitState(wait_time=12),
transitions={'done': 'count'},
autonomy={'done': Autonomy.Off})
# x:445 y:90
OperatableStateMachine.add('set right',
SaraSetHeadAngle(pitch=-0.6, yaw=-1.2),
transitions={'done': 'wait 3'},
autonomy={'done': Autonomy.Off})
# x:464 y:164
OperatableStateMachine.add('wait 3',
WaitState(wait_time=10),
transitions={'done': 'count3'},
autonomy={'done': Autonomy.Off})
# x:443 y:237
OperatableStateMachine.add('count3',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add3', 'none_found': 'add3'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:457 y:334
OperatableStateMachine.add('add3',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text3'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:30 y:412
OperatableStateMachine.add('gen text',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'say_1'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:253 y:392
OperatableStateMachine.add('geb text 2',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'sara_2'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:461 y:405
OperatableStateMachine.add('gen text3',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'Say_3'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:53 y:492
OperatableStateMachine.add('say_1',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set center'},
autonomy={'done': Autonomy.Off})
# x:264 y:471
OperatableStateMachine.add('sara_2',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set right'},
autonomy={'done': Autonomy.Off})
# x:486 y:485
OperatableStateMachine.add('Say_3',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:55 y:34
OperatableStateMachine.add('init count',
SetKey(Value=0),
transitions={'done': 'set angle'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Count'})
# x:444 y:326
OperatableStateMachine.add('Log Count',
LogKeyState(text="Found: {} objects", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'Count'})
# x:40 y:183
OperatableStateMachine.add('Move head',
_sm_move_head_0,
transitions={'finished': 'for 1'},
autonomy={'finished': Autonomy.Inherit},
remapping={'className': 'className', 'Count': 'Count'})
# x:419 y:254
OperatableStateMachine.add('Look Center Found',
SaraSetHeadAngle(pitch=-0.4, yaw=0),
transitions={'done': 'Log Count'},
autonomy={'done': Autonomy.Off})
# x:234 y:227
OperatableStateMachine.add('for 1',
ForLoop(repeat=0),
transitions={'do': 'action_turn', 'end': 'Log Count'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:38 y:275
OperatableStateMachine.add('action_turn',
self.use_behavior(action_turnSM, 'action_turn'),
transitions={'finished': 'Move head', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'rotation': 'rotation'})
# x:56 y:102
OperatableStateMachine.add('set angle',
SetKey(Value=3.14159),
transitions={'done': 'Move head'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'rotation'})
# x:417 y:37
OperatableStateMachine.add('store count',
SetRosParam(ParamName="behavior/Count/CountedObjets"),
transitions={'done': 'concat'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Count'})
# x:400 y:114
OperatableStateMachine.add('concat',
FlexibleCalculationState(calculation=lambda x: "I counted "+str(x[0])+" "+str(x[1])+".", input_keys=["Count", "className"]),
transitions={'done': 'say_count'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'className': 'className', 'output_value': 'Text'})
# x:419 y:186
OperatableStateMachine.add('say_count',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=1, block=True),
transitions={'done': 'Look Center Found'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | 0.311532 | 0.241808 |
STATS = [
{
"num_node_expansions": 0,
"search_time": 0.0127327,
"total_time": 0.0637178,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 264,
"objects_total": 374,
"neural_net_time": 0.09185624122619629,
"num_replanning_steps": 14,
"wall_time": 3.7494454383850098
},
{
"num_node_expansions": 0,
"search_time": 0.0639766,
"total_time": 0.424378,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 315,
"objects_total": 374,
"neural_net_time": 0.04388070106506348,
"num_replanning_steps": 19,
"wall_time": 14.684161901473999
},
{
"num_node_expansions": 0,
"search_time": 0.0234142,
"total_time": 0.148142,
"plan_length": 49,
"plan_cost": 49,
"objects_used": 260,
"objects_total": 374,
"neural_net_time": 0.0436246395111084,
"num_replanning_steps": 8,
"wall_time": 3.5716323852539062
},
{
"num_node_expansions": 0,
"search_time": 0.0237469,
"total_time": 0.126784,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 229,
"objects_total": 374,
"neural_net_time": 0.04356646537780762,
"num_replanning_steps": 5,
"wall_time": 2.0004935264587402
},
{
"num_node_expansions": 0,
"search_time": 0.0533077,
"total_time": 0.427103,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 315,
"objects_total": 374,
"neural_net_time": 0.04310035705566406,
"num_replanning_steps": 15,
"wall_time": 11.804622888565063
},
{
"num_node_expansions": 0,
"search_time": 0.012068,
"total_time": 0.0532375,
"plan_length": 69,
"plan_cost": 69,
"objects_used": 134,
"objects_total": 212,
"neural_net_time": 0.023810863494873047,
"num_replanning_steps": 4,
"wall_time": 1.4087488651275635
},
{
"num_node_expansions": 0,
"search_time": 0.0196328,
"total_time": 0.116857,
"plan_length": 63,
"plan_cost": 63,
"objects_used": 146,
"objects_total": 212,
"neural_net_time": 0.023676156997680664,
"num_replanning_steps": 4,
"wall_time": 2.050039052963257
},
{
"num_node_expansions": 0,
"search_time": 0.0101841,
"total_time": 0.038334,
"plan_length": 62,
"plan_cost": 62,
"objects_used": 106,
"objects_total": 212,
"neural_net_time": 0.02423882484436035,
"num_replanning_steps": 1,
"wall_time": 0.6713206768035889
},
{
"num_node_expansions": 0,
"search_time": 0.689695,
"total_time": 0.732443,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 133,
"objects_total": 212,
"neural_net_time": 0.023630619049072266,
"num_replanning_steps": 6,
"wall_time": 2.553912401199341
},
{
"num_node_expansions": 0,
"search_time": 0.0103352,
"total_time": 0.0815693,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 128,
"objects_total": 212,
"neural_net_time": 0.02439117431640625,
"num_replanning_steps": 1,
"wall_time": 1.0330884456634521
},
{
"num_node_expansions": 0,
"search_time": 0.0117348,
"total_time": 0.0572425,
"plan_length": 55,
"plan_cost": 55,
"objects_used": 158,
"objects_total": 315,
"neural_net_time": 0.03613710403442383,
"num_replanning_steps": 2,
"wall_time": 1.115511417388916
},
{
"num_node_expansions": 0,
"search_time": 0.580413,
"total_time": 0.607457,
"plan_length": 97,
"plan_cost": 97,
"objects_used": 140,
"objects_total": 315,
"neural_net_time": 0.03647589683532715,
"num_replanning_steps": 2,
"wall_time": 1.4513070583343506
},
{
"num_node_expansions": 0,
"search_time": 0.00837245,
"total_time": 0.0391559,
"plan_length": 58,
"plan_cost": 58,
"objects_used": 133,
"objects_total": 315,
"neural_net_time": 0.03521132469177246,
"num_replanning_steps": 1,
"wall_time": 0.6604447364807129
},
{
"num_node_expansions": 0,
"search_time": 0.0499121,
"total_time": 0.245917,
"plan_length": 84,
"plan_cost": 84,
"objects_used": 213,
"objects_total": 315,
"neural_net_time": 0.036103248596191406,
"num_replanning_steps": 6,
"wall_time": 3.635627031326294
},
{
"num_node_expansions": 0,
"search_time": 0.0277287,
"total_time": 0.0718385,
"plan_length": 102,
"plan_cost": 102,
"objects_used": 171,
"objects_total": 315,
"neural_net_time": 0.036237478256225586,
"num_replanning_steps": 4,
"wall_time": 1.551818609237671
},
{
"num_node_expansions": 0,
"search_time": 0.0844936,
"total_time": 0.238564,
"plan_length": 89,
"plan_cost": 89,
"objects_used": 205,
"objects_total": 300,
"neural_net_time": 0.03444242477416992,
"num_replanning_steps": 6,
"wall_time": 3.9205574989318848
},
{
"num_node_expansions": 0,
"search_time": 0.00977649,
"total_time": 0.0545079,
"plan_length": 61,
"plan_cost": 61,
"objects_used": 186,
"objects_total": 300,
"neural_net_time": 0.03467679023742676,
"num_replanning_steps": 2,
"wall_time": 1.1136159896850586
},
{
"num_node_expansions": 0,
"search_time": 0.0114418,
"total_time": 0.0551799,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 181,
"objects_total": 300,
"neural_net_time": 0.03484320640563965,
"num_replanning_steps": 9,
"wall_time": 2.7118818759918213
},
{
"num_node_expansions": 0,
"search_time": 0.0302738,
"total_time": 0.063326,
"plan_length": 115,
"plan_cost": 115,
"objects_used": 160,
"objects_total": 300,
"neural_net_time": 0.03459954261779785,
"num_replanning_steps": 1,
"wall_time": 0.7651896476745605
},
{
"num_node_expansions": 0,
"search_time": 0.0400562,
"total_time": 0.0797025,
"plan_length": 118,
"plan_cost": 118,
"objects_used": 179,
"objects_total": 300,
"neural_net_time": 0.03562045097351074,
"num_replanning_steps": 2,
"wall_time": 1.0804712772369385
},
{
"num_node_expansions": 0,
"search_time": 0.0422136,
"total_time": 0.0737499,
"plan_length": 106,
"plan_cost": 106,
"objects_used": 111,
"objects_total": 207,
"neural_net_time": 0.024649620056152344,
"num_replanning_steps": 1,
"wall_time": 0.7231485843658447
},
{
"num_node_expansions": 0,
"search_time": 0.589862,
"total_time": 0.616981,
"plan_length": 144,
"plan_cost": 144,
"objects_used": 108,
"objects_total": 207,
"neural_net_time": 0.022114992141723633,
"num_replanning_steps": 1,
"wall_time": 1.2148189544677734
},
{
"num_node_expansions": 0,
"search_time": 0.00757273,
"total_time": 0.0402687,
"plan_length": 77,
"plan_cost": 77,
"objects_used": 143,
"objects_total": 360,
"neural_net_time": 0.041933536529541016,
"num_replanning_steps": 1,
"wall_time": 0.6827218532562256
},
{
"num_node_expansions": 0,
"search_time": 0.0236235,
"total_time": 0.0868747,
"plan_length": 87,
"plan_cost": 87,
"objects_used": 215,
"objects_total": 360,
"neural_net_time": 0.04205775260925293,
"num_replanning_steps": 3,
"wall_time": 1.6703770160675049
},
{
"num_node_expansions": 0,
"search_time": 0.0138744,
"total_time": 0.0579201,
"plan_length": 86,
"plan_cost": 86,
"objects_used": 196,
"objects_total": 360,
"neural_net_time": 0.04246044158935547,
"num_replanning_steps": 3,
"wall_time": 1.3284046649932861
},
{
"num_node_expansions": 0,
"search_time": 0.00703885,
"total_time": 0.0352536,
"plan_length": 55,
"plan_cost": 55,
"objects_used": 147,
"objects_total": 360,
"neural_net_time": 0.04299211502075195,
"num_replanning_steps": 1,
"wall_time": 0.6966879367828369
},
{
"num_node_expansions": 0,
"search_time": 0.0274147,
"total_time": 0.06448,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 153,
"objects_total": 297,
"neural_net_time": 0.035511016845703125,
"num_replanning_steps": 2,
"wall_time": 0.977226972579956
},
{
"num_node_expansions": 0,
"search_time": 0.0151724,
"total_time": 0.055616,
"plan_length": 71,
"plan_cost": 71,
"objects_used": 179,
"objects_total": 297,
"neural_net_time": 0.03351855278015137,
"num_replanning_steps": 4,
"wall_time": 1.496020793914795
},
{
"num_node_expansions": 0,
"search_time": 0.0227,
"total_time": 0.0402083,
"plan_length": 120,
"plan_cost": 120,
"objects_used": 124,
"objects_total": 297,
"neural_net_time": 0.033219337463378906,
"num_replanning_steps": 1,
"wall_time": 0.5845835208892822
},
{
"num_node_expansions": 0,
"search_time": 0.0136751,
"total_time": 0.104908,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 209,
"objects_total": 360,
"neural_net_time": 0.04411458969116211,
"num_replanning_steps": 5,
"wall_time": 2.6313624382019043
},
{
"num_node_expansions": 0,
"search_time": 0.0156716,
"total_time": 0.0544133,
"plan_length": 71,
"plan_cost": 71,
"objects_used": 194,
"objects_total": 360,
"neural_net_time": 0.042566537857055664,
"num_replanning_steps": 3,
"wall_time": 1.2898650169372559
},
{
"num_node_expansions": 0,
"search_time": 0.0424693,
"total_time": 0.0888787,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 202,
"objects_total": 360,
"neural_net_time": 0.04498171806335449,
"num_replanning_steps": 5,
"wall_time": 1.9762516021728516
},
{
"num_node_expansions": 0,
"search_time": 0.0132157,
"total_time": 0.0615721,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 194,
"objects_total": 360,
"neural_net_time": 0.041929006576538086,
"num_replanning_steps": 4,
"wall_time": 1.5290262699127197
},
{
"num_node_expansions": 0,
"search_time": 0.0138784,
"total_time": 0.0427355,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 166,
"objects_total": 360,
"neural_net_time": 0.04323077201843262,
"num_replanning_steps": 2,
"wall_time": 0.9426064491271973
},
{
"num_node_expansions": 0,
"search_time": 0.0117299,
"total_time": 0.0797448,
"plan_length": 66,
"plan_cost": 66,
"objects_used": 216,
"objects_total": 357,
"neural_net_time": 0.0737154483795166,
"num_replanning_steps": 4,
"wall_time": 1.8599905967712402
},
{
"num_node_expansions": 0,
"search_time": 0.0111837,
"total_time": 0.0440161,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 177,
"objects_total": 357,
"neural_net_time": 0.04400944709777832,
"num_replanning_steps": 3,
"wall_time": 1.124758005142212
},
{
"num_node_expansions": 0,
"search_time": 0.00743944,
"total_time": 0.0370168,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 179,
"objects_total": 357,
"neural_net_time": 0.043679237365722656,
"num_replanning_steps": 3,
"wall_time": 1.139833927154541
},
{
"num_node_expansions": 0,
"search_time": 0.0390421,
"total_time": 0.19221,
"plan_length": 69,
"plan_cost": 69,
"objects_used": 210,
"objects_total": 317,
"neural_net_time": 0.03583574295043945,
"num_replanning_steps": 2,
"wall_time": 1.9653725624084473
},
{
"num_node_expansions": 0,
"search_time": 0.0233793,
"total_time": 0.0993436,
"plan_length": 104,
"plan_cost": 104,
"objects_used": 222,
"objects_total": 436,
"neural_net_time": 0.056069374084472656,
"num_replanning_steps": 3,
"wall_time": 1.9668478965759277
},
{
"num_node_expansions": 0,
"search_time": 0.739835,
"total_time": 0.777773,
"plan_length": 114,
"plan_cost": 114,
"objects_used": 202,
"objects_total": 436,
"neural_net_time": 0.07914900779724121,
"num_replanning_steps": 2,
"wall_time": 1.8835902214050293
},
{
"num_node_expansions": 0,
"search_time": 0.0343597,
"total_time": 0.091895,
"plan_length": 126,
"plan_cost": 126,
"objects_used": 231,
"objects_total": 436,
"neural_net_time": 0.05430316925048828,
"num_replanning_steps": 4,
"wall_time": 2.178868532180786
},
{
"num_node_expansions": 0,
"search_time": 0.0228558,
"total_time": 0.233691,
"plan_length": 72,
"plan_cost": 72,
"objects_used": 260,
"objects_total": 436,
"neural_net_time": 0.05440783500671387,
"num_replanning_steps": 4,
"wall_time": 3.665740489959717
},
{
"num_node_expansions": 0,
"search_time": 0.0283988,
"total_time": 0.106641,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 250,
"objects_total": 436,
"neural_net_time": 0.05553555488586426,
"num_replanning_steps": 4,
"wall_time": 2.4536619186401367
},
{
"num_node_expansions": 0,
"search_time": 0.00626628,
"total_time": 0.0364009,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 199,
"objects_total": 412,
"neural_net_time": 0.049780845642089844,
"num_replanning_steps": 3,
"wall_time": 1.2885041236877441
},
{
"num_node_expansions": 0,
"search_time": 0.00982321,
"total_time": 0.0573175,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 206,
"objects_total": 412,
"neural_net_time": 0.06677055358886719,
"num_replanning_steps": 3,
"wall_time": 1.551220417022705
},
{
"num_node_expansions": 0,
"search_time": 0.0255965,
"total_time": 0.372836,
"plan_length": 47,
"plan_cost": 47,
"objects_used": 264,
"objects_total": 412,
"neural_net_time": 0.049848079681396484,
"num_replanning_steps": 8,
"wall_time": 7.183395624160767
},
{
"num_node_expansions": 0,
"search_time": 0.0464217,
"total_time": 0.11506,
"plan_length": 84,
"plan_cost": 84,
"objects_used": 222,
"objects_total": 412,
"neural_net_time": 0.05061841011047363,
"num_replanning_steps": 4,
"wall_time": 2.077465295791626
},
{
"num_node_expansions": 0,
"search_time": 0.0160679,
"total_time": 0.0791312,
"plan_length": 76,
"plan_cost": 76,
"objects_used": 214,
"objects_total": 412,
"neural_net_time": 0.04934334754943848,
"num_replanning_steps": 3,
"wall_time": 1.5904521942138672
},
{
"num_node_expansions": 0,
"search_time": 0.0089066,
"total_time": 0.0379467,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 113,
"objects_total": 227,
"neural_net_time": 0.025095462799072266,
"num_replanning_steps": 3,
"wall_time": 1.0743298530578613
},
{
"num_node_expansions": 0,
"search_time": 0.00439902,
"total_time": 0.0242393,
"plan_length": 50,
"plan_cost": 50,
"objects_used": 96,
"objects_total": 227,
"neural_net_time": 0.026160240173339844,
"num_replanning_steps": 1,
"wall_time": 0.5640134811401367
},
{
"num_node_expansions": 0,
"search_time": 0.00855509,
"total_time": 0.0305782,
"plan_length": 58,
"plan_cost": 58,
"objects_used": 92,
"objects_total": 227,
"neural_net_time": 0.0270841121673584,
"num_replanning_steps": 1,
"wall_time": 0.5972237586975098
},
{
"num_node_expansions": 0,
"search_time": 0.00407841,
"total_time": 0.0155321,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 83,
"objects_total": 227,
"neural_net_time": 0.02671051025390625,
"num_replanning_steps": 1,
"wall_time": 0.49387478828430176
},
{
"num_node_expansions": 0,
"search_time": 0.00905457,
"total_time": 0.0574119,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 125,
"objects_total": 207,
"neural_net_time": 0.02355790138244629,
"num_replanning_steps": 1,
"wall_time": 0.823401927947998
},
{
"num_node_expansions": 0,
"search_time": 0.0117871,
"total_time": 0.0727514,
"plan_length": 80,
"plan_cost": 80,
"objects_used": 127,
"objects_total": 207,
"neural_net_time": 0.0237581729888916,
"num_replanning_steps": 1,
"wall_time": 0.9516494274139404
},
{
"num_node_expansions": 0,
"search_time": 0.00989917,
"total_time": 0.0448104,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 115,
"objects_total": 207,
"neural_net_time": 0.02336907386779785,
"num_replanning_steps": 1,
"wall_time": 0.7254390716552734
},
{
"num_node_expansions": 0,
"search_time": 0.00955956,
"total_time": 0.0553541,
"plan_length": 66,
"plan_cost": 66,
"objects_used": 120,
"objects_total": 207,
"neural_net_time": 0.02523517608642578,
"num_replanning_steps": 1,
"wall_time": 0.8302793502807617
}
] | scenegraph/exp-official/taskographyv4tiny5_hierarchical/hierarchical_test_stats.py | STATS = [
{
"num_node_expansions": 0,
"search_time": 0.0127327,
"total_time": 0.0637178,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 264,
"objects_total": 374,
"neural_net_time": 0.09185624122619629,
"num_replanning_steps": 14,
"wall_time": 3.7494454383850098
},
{
"num_node_expansions": 0,
"search_time": 0.0639766,
"total_time": 0.424378,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 315,
"objects_total": 374,
"neural_net_time": 0.04388070106506348,
"num_replanning_steps": 19,
"wall_time": 14.684161901473999
},
{
"num_node_expansions": 0,
"search_time": 0.0234142,
"total_time": 0.148142,
"plan_length": 49,
"plan_cost": 49,
"objects_used": 260,
"objects_total": 374,
"neural_net_time": 0.0436246395111084,
"num_replanning_steps": 8,
"wall_time": 3.5716323852539062
},
{
"num_node_expansions": 0,
"search_time": 0.0237469,
"total_time": 0.126784,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 229,
"objects_total": 374,
"neural_net_time": 0.04356646537780762,
"num_replanning_steps": 5,
"wall_time": 2.0004935264587402
},
{
"num_node_expansions": 0,
"search_time": 0.0533077,
"total_time": 0.427103,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 315,
"objects_total": 374,
"neural_net_time": 0.04310035705566406,
"num_replanning_steps": 15,
"wall_time": 11.804622888565063
},
{
"num_node_expansions": 0,
"search_time": 0.012068,
"total_time": 0.0532375,
"plan_length": 69,
"plan_cost": 69,
"objects_used": 134,
"objects_total": 212,
"neural_net_time": 0.023810863494873047,
"num_replanning_steps": 4,
"wall_time": 1.4087488651275635
},
{
"num_node_expansions": 0,
"search_time": 0.0196328,
"total_time": 0.116857,
"plan_length": 63,
"plan_cost": 63,
"objects_used": 146,
"objects_total": 212,
"neural_net_time": 0.023676156997680664,
"num_replanning_steps": 4,
"wall_time": 2.050039052963257
},
{
"num_node_expansions": 0,
"search_time": 0.0101841,
"total_time": 0.038334,
"plan_length": 62,
"plan_cost": 62,
"objects_used": 106,
"objects_total": 212,
"neural_net_time": 0.02423882484436035,
"num_replanning_steps": 1,
"wall_time": 0.6713206768035889
},
{
"num_node_expansions": 0,
"search_time": 0.689695,
"total_time": 0.732443,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 133,
"objects_total": 212,
"neural_net_time": 0.023630619049072266,
"num_replanning_steps": 6,
"wall_time": 2.553912401199341
},
{
"num_node_expansions": 0,
"search_time": 0.0103352,
"total_time": 0.0815693,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 128,
"objects_total": 212,
"neural_net_time": 0.02439117431640625,
"num_replanning_steps": 1,
"wall_time": 1.0330884456634521
},
{
"num_node_expansions": 0,
"search_time": 0.0117348,
"total_time": 0.0572425,
"plan_length": 55,
"plan_cost": 55,
"objects_used": 158,
"objects_total": 315,
"neural_net_time": 0.03613710403442383,
"num_replanning_steps": 2,
"wall_time": 1.115511417388916
},
{
"num_node_expansions": 0,
"search_time": 0.580413,
"total_time": 0.607457,
"plan_length": 97,
"plan_cost": 97,
"objects_used": 140,
"objects_total": 315,
"neural_net_time": 0.03647589683532715,
"num_replanning_steps": 2,
"wall_time": 1.4513070583343506
},
{
"num_node_expansions": 0,
"search_time": 0.00837245,
"total_time": 0.0391559,
"plan_length": 58,
"plan_cost": 58,
"objects_used": 133,
"objects_total": 315,
"neural_net_time": 0.03521132469177246,
"num_replanning_steps": 1,
"wall_time": 0.6604447364807129
},
{
"num_node_expansions": 0,
"search_time": 0.0499121,
"total_time": 0.245917,
"plan_length": 84,
"plan_cost": 84,
"objects_used": 213,
"objects_total": 315,
"neural_net_time": 0.036103248596191406,
"num_replanning_steps": 6,
"wall_time": 3.635627031326294
},
{
"num_node_expansions": 0,
"search_time": 0.0277287,
"total_time": 0.0718385,
"plan_length": 102,
"plan_cost": 102,
"objects_used": 171,
"objects_total": 315,
"neural_net_time": 0.036237478256225586,
"num_replanning_steps": 4,
"wall_time": 1.551818609237671
},
{
"num_node_expansions": 0,
"search_time": 0.0844936,
"total_time": 0.238564,
"plan_length": 89,
"plan_cost": 89,
"objects_used": 205,
"objects_total": 300,
"neural_net_time": 0.03444242477416992,
"num_replanning_steps": 6,
"wall_time": 3.9205574989318848
},
{
"num_node_expansions": 0,
"search_time": 0.00977649,
"total_time": 0.0545079,
"plan_length": 61,
"plan_cost": 61,
"objects_used": 186,
"objects_total": 300,
"neural_net_time": 0.03467679023742676,
"num_replanning_steps": 2,
"wall_time": 1.1136159896850586
},
{
"num_node_expansions": 0,
"search_time": 0.0114418,
"total_time": 0.0551799,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 181,
"objects_total": 300,
"neural_net_time": 0.03484320640563965,
"num_replanning_steps": 9,
"wall_time": 2.7118818759918213
},
{
"num_node_expansions": 0,
"search_time": 0.0302738,
"total_time": 0.063326,
"plan_length": 115,
"plan_cost": 115,
"objects_used": 160,
"objects_total": 300,
"neural_net_time": 0.03459954261779785,
"num_replanning_steps": 1,
"wall_time": 0.7651896476745605
},
{
"num_node_expansions": 0,
"search_time": 0.0400562,
"total_time": 0.0797025,
"plan_length": 118,
"plan_cost": 118,
"objects_used": 179,
"objects_total": 300,
"neural_net_time": 0.03562045097351074,
"num_replanning_steps": 2,
"wall_time": 1.0804712772369385
},
{
"num_node_expansions": 0,
"search_time": 0.0422136,
"total_time": 0.0737499,
"plan_length": 106,
"plan_cost": 106,
"objects_used": 111,
"objects_total": 207,
"neural_net_time": 0.024649620056152344,
"num_replanning_steps": 1,
"wall_time": 0.7231485843658447
},
{
"num_node_expansions": 0,
"search_time": 0.589862,
"total_time": 0.616981,
"plan_length": 144,
"plan_cost": 144,
"objects_used": 108,
"objects_total": 207,
"neural_net_time": 0.022114992141723633,
"num_replanning_steps": 1,
"wall_time": 1.2148189544677734
},
{
"num_node_expansions": 0,
"search_time": 0.00757273,
"total_time": 0.0402687,
"plan_length": 77,
"plan_cost": 77,
"objects_used": 143,
"objects_total": 360,
"neural_net_time": 0.041933536529541016,
"num_replanning_steps": 1,
"wall_time": 0.6827218532562256
},
{
"num_node_expansions": 0,
"search_time": 0.0236235,
"total_time": 0.0868747,
"plan_length": 87,
"plan_cost": 87,
"objects_used": 215,
"objects_total": 360,
"neural_net_time": 0.04205775260925293,
"num_replanning_steps": 3,
"wall_time": 1.6703770160675049
},
{
"num_node_expansions": 0,
"search_time": 0.0138744,
"total_time": 0.0579201,
"plan_length": 86,
"plan_cost": 86,
"objects_used": 196,
"objects_total": 360,
"neural_net_time": 0.04246044158935547,
"num_replanning_steps": 3,
"wall_time": 1.3284046649932861
},
{
"num_node_expansions": 0,
"search_time": 0.00703885,
"total_time": 0.0352536,
"plan_length": 55,
"plan_cost": 55,
"objects_used": 147,
"objects_total": 360,
"neural_net_time": 0.04299211502075195,
"num_replanning_steps": 1,
"wall_time": 0.6966879367828369
},
{
"num_node_expansions": 0,
"search_time": 0.0274147,
"total_time": 0.06448,
"plan_length": 53,
"plan_cost": 53,
"objects_used": 153,
"objects_total": 297,
"neural_net_time": 0.035511016845703125,
"num_replanning_steps": 2,
"wall_time": 0.977226972579956
},
{
"num_node_expansions": 0,
"search_time": 0.0151724,
"total_time": 0.055616,
"plan_length": 71,
"plan_cost": 71,
"objects_used": 179,
"objects_total": 297,
"neural_net_time": 0.03351855278015137,
"num_replanning_steps": 4,
"wall_time": 1.496020793914795
},
{
"num_node_expansions": 0,
"search_time": 0.0227,
"total_time": 0.0402083,
"plan_length": 120,
"plan_cost": 120,
"objects_used": 124,
"objects_total": 297,
"neural_net_time": 0.033219337463378906,
"num_replanning_steps": 1,
"wall_time": 0.5845835208892822
},
{
"num_node_expansions": 0,
"search_time": 0.0136751,
"total_time": 0.104908,
"plan_length": 59,
"plan_cost": 59,
"objects_used": 209,
"objects_total": 360,
"neural_net_time": 0.04411458969116211,
"num_replanning_steps": 5,
"wall_time": 2.6313624382019043
},
{
"num_node_expansions": 0,
"search_time": 0.0156716,
"total_time": 0.0544133,
"plan_length": 71,
"plan_cost": 71,
"objects_used": 194,
"objects_total": 360,
"neural_net_time": 0.042566537857055664,
"num_replanning_steps": 3,
"wall_time": 1.2898650169372559
},
{
"num_node_expansions": 0,
"search_time": 0.0424693,
"total_time": 0.0888787,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 202,
"objects_total": 360,
"neural_net_time": 0.04498171806335449,
"num_replanning_steps": 5,
"wall_time": 1.9762516021728516
},
{
"num_node_expansions": 0,
"search_time": 0.0132157,
"total_time": 0.0615721,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 194,
"objects_total": 360,
"neural_net_time": 0.041929006576538086,
"num_replanning_steps": 4,
"wall_time": 1.5290262699127197
},
{
"num_node_expansions": 0,
"search_time": 0.0138784,
"total_time": 0.0427355,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 166,
"objects_total": 360,
"neural_net_time": 0.04323077201843262,
"num_replanning_steps": 2,
"wall_time": 0.9426064491271973
},
{
"num_node_expansions": 0,
"search_time": 0.0117299,
"total_time": 0.0797448,
"plan_length": 66,
"plan_cost": 66,
"objects_used": 216,
"objects_total": 357,
"neural_net_time": 0.0737154483795166,
"num_replanning_steps": 4,
"wall_time": 1.8599905967712402
},
{
"num_node_expansions": 0,
"search_time": 0.0111837,
"total_time": 0.0440161,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 177,
"objects_total": 357,
"neural_net_time": 0.04400944709777832,
"num_replanning_steps": 3,
"wall_time": 1.124758005142212
},
{
"num_node_expansions": 0,
"search_time": 0.00743944,
"total_time": 0.0370168,
"plan_length": 70,
"plan_cost": 70,
"objects_used": 179,
"objects_total": 357,
"neural_net_time": 0.043679237365722656,
"num_replanning_steps": 3,
"wall_time": 1.139833927154541
},
{
"num_node_expansions": 0,
"search_time": 0.0390421,
"total_time": 0.19221,
"plan_length": 69,
"plan_cost": 69,
"objects_used": 210,
"objects_total": 317,
"neural_net_time": 0.03583574295043945,
"num_replanning_steps": 2,
"wall_time": 1.9653725624084473
},
{
"num_node_expansions": 0,
"search_time": 0.0233793,
"total_time": 0.0993436,
"plan_length": 104,
"plan_cost": 104,
"objects_used": 222,
"objects_total": 436,
"neural_net_time": 0.056069374084472656,
"num_replanning_steps": 3,
"wall_time": 1.9668478965759277
},
{
"num_node_expansions": 0,
"search_time": 0.739835,
"total_time": 0.777773,
"plan_length": 114,
"plan_cost": 114,
"objects_used": 202,
"objects_total": 436,
"neural_net_time": 0.07914900779724121,
"num_replanning_steps": 2,
"wall_time": 1.8835902214050293
},
{
"num_node_expansions": 0,
"search_time": 0.0343597,
"total_time": 0.091895,
"plan_length": 126,
"plan_cost": 126,
"objects_used": 231,
"objects_total": 436,
"neural_net_time": 0.05430316925048828,
"num_replanning_steps": 4,
"wall_time": 2.178868532180786
},
{
"num_node_expansions": 0,
"search_time": 0.0228558,
"total_time": 0.233691,
"plan_length": 72,
"plan_cost": 72,
"objects_used": 260,
"objects_total": 436,
"neural_net_time": 0.05440783500671387,
"num_replanning_steps": 4,
"wall_time": 3.665740489959717
},
{
"num_node_expansions": 0,
"search_time": 0.0283988,
"total_time": 0.106641,
"plan_length": 103,
"plan_cost": 103,
"objects_used": 250,
"objects_total": 436,
"neural_net_time": 0.05553555488586426,
"num_replanning_steps": 4,
"wall_time": 2.4536619186401367
},
{
"num_node_expansions": 0,
"search_time": 0.00626628,
"total_time": 0.0364009,
"plan_length": 64,
"plan_cost": 64,
"objects_used": 199,
"objects_total": 412,
"neural_net_time": 0.049780845642089844,
"num_replanning_steps": 3,
"wall_time": 1.2885041236877441
},
{
"num_node_expansions": 0,
"search_time": 0.00982321,
"total_time": 0.0573175,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 206,
"objects_total": 412,
"neural_net_time": 0.06677055358886719,
"num_replanning_steps": 3,
"wall_time": 1.551220417022705
},
{
"num_node_expansions": 0,
"search_time": 0.0255965,
"total_time": 0.372836,
"plan_length": 47,
"plan_cost": 47,
"objects_used": 264,
"objects_total": 412,
"neural_net_time": 0.049848079681396484,
"num_replanning_steps": 8,
"wall_time": 7.183395624160767
},
{
"num_node_expansions": 0,
"search_time": 0.0464217,
"total_time": 0.11506,
"plan_length": 84,
"plan_cost": 84,
"objects_used": 222,
"objects_total": 412,
"neural_net_time": 0.05061841011047363,
"num_replanning_steps": 4,
"wall_time": 2.077465295791626
},
{
"num_node_expansions": 0,
"search_time": 0.0160679,
"total_time": 0.0791312,
"plan_length": 76,
"plan_cost": 76,
"objects_used": 214,
"objects_total": 412,
"neural_net_time": 0.04934334754943848,
"num_replanning_steps": 3,
"wall_time": 1.5904521942138672
},
{
"num_node_expansions": 0,
"search_time": 0.0089066,
"total_time": 0.0379467,
"plan_length": 74,
"plan_cost": 74,
"objects_used": 113,
"objects_total": 227,
"neural_net_time": 0.025095462799072266,
"num_replanning_steps": 3,
"wall_time": 1.0743298530578613
},
{
"num_node_expansions": 0,
"search_time": 0.00439902,
"total_time": 0.0242393,
"plan_length": 50,
"plan_cost": 50,
"objects_used": 96,
"objects_total": 227,
"neural_net_time": 0.026160240173339844,
"num_replanning_steps": 1,
"wall_time": 0.5640134811401367
},
{
"num_node_expansions": 0,
"search_time": 0.00855509,
"total_time": 0.0305782,
"plan_length": 58,
"plan_cost": 58,
"objects_used": 92,
"objects_total": 227,
"neural_net_time": 0.0270841121673584,
"num_replanning_steps": 1,
"wall_time": 0.5972237586975098
},
{
"num_node_expansions": 0,
"search_time": 0.00407841,
"total_time": 0.0155321,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 83,
"objects_total": 227,
"neural_net_time": 0.02671051025390625,
"num_replanning_steps": 1,
"wall_time": 0.49387478828430176
},
{
"num_node_expansions": 0,
"search_time": 0.00905457,
"total_time": 0.0574119,
"plan_length": 57,
"plan_cost": 57,
"objects_used": 125,
"objects_total": 207,
"neural_net_time": 0.02355790138244629,
"num_replanning_steps": 1,
"wall_time": 0.823401927947998
},
{
"num_node_expansions": 0,
"search_time": 0.0117871,
"total_time": 0.0727514,
"plan_length": 80,
"plan_cost": 80,
"objects_used": 127,
"objects_total": 207,
"neural_net_time": 0.0237581729888916,
"num_replanning_steps": 1,
"wall_time": 0.9516494274139404
},
{
"num_node_expansions": 0,
"search_time": 0.00989917,
"total_time": 0.0448104,
"plan_length": 79,
"plan_cost": 79,
"objects_used": 115,
"objects_total": 207,
"neural_net_time": 0.02336907386779785,
"num_replanning_steps": 1,
"wall_time": 0.7254390716552734
},
{
"num_node_expansions": 0,
"search_time": 0.00955956,
"total_time": 0.0553541,
"plan_length": 66,
"plan_cost": 66,
"objects_used": 120,
"objects_total": 207,
"neural_net_time": 0.02523517608642578,
"num_replanning_steps": 1,
"wall_time": 0.8302793502807617
}
] | 0.276886 | 0.516535 |
import os
import numpy as np
import pandas as pd
from keras.optimizers import Adam
class WordEmbedder:
"""
WordEmbedder is a helper class for every embedding algorithms. It
does extract all possible words, adjacency matrix, corpus from
the given sequences. It is parent class of SkipGram, Freq2Vec, GensimWord2Vec.
Parameters
----------
sequences : numpy ndarray, list, or DataFrame
sequences of data like protein sequences
word_length : integer
The length of each word in sequences to be separated from each other.
window_size: integer
Size of window for counting the number of neighbors.
emb_dim: integer
Number of embedding vector dimensions.
loss: basestring
The loss function is going to be used on training phase.
epochs: integer
Number of epochs for training the embedding.
See also
--------
SkipGram : Skipgram Embedding
Freq2Vec : Freq2Vec Embedding
GensimWord2Vec : Word2Vec Embedding
Sent2Vec : Sent2Vec Embedding
"""
def __init__(self, sequences, word_length, window_size, emb_dim, loss, epochs):
self.sequences = sequences
self.word_length = word_length
self.window_size = window_size
self.emb_dim = emb_dim
self.loss = loss
self.optimizer = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
self.epochs = epochs
self.adj_matrix = None
self.corpus = []
self.vocab = set()
self.vocabulary = pd.Series()
self.sentences = []
self.embedding_layer = None
self.__corpus_maker()
self.__adj_matrix_maker()
def __seq_splitter(self, seq):
words = list(map(lambda x: seq[x:(x + self.word_length)], range(0, (len(seq) - self.word_length + 1))))
self.vocab |= set(words)
list(map(lambda s: self.corpus.append(words[s::self.word_length]), range(self.word_length)))
self.sentences.append(words)
def __freq_calc(self):
def adder(idx):
self.frequency[idx] += 1
self.frequency = dict.fromkeys(range(len(self.vocab)), 0)
list(map(lambda sent: list(map(lambda word: adder(word), sent)), self.sentences))
os.makedirs("./aux/", exist_ok=True)
with open('./aux/' + self.embedding + "_" + str(self.word_length) + '_vocab.txt', 'w') as out:
out.write(",".join(self.vocab))
self.frequency = {k: v / total for total in (sum(self.frequency.values()),) for k, v in self.frequency.items()}
self.frequency = self.frequency.values()
def __corpus_maker(self):
list(map(lambda seq: self.__seq_splitter(seq), self.sequences))
self.input = self.sentences
self.vocab = dict(list(enumerate(self.vocab)))
self.vocab_aux = self.vocab
self.vocab_indices = list(k for k, v in self.vocab.items())
self.vocab = dict((v, k) for k, v in self.vocab.items())
self.corpus = list(map(lambda x: list(map(lambda y: self.vocab.get(y, -1), x)), self.corpus))
self.sentences = list(map(lambda x: list(map(lambda y: self.vocab.get(y, -1), x)), self.sentences))
self.__freq_calc()
def __neighbor_counter(self, idx, word_list):
def __adder(idx1, idx2):
self.adj_matrix[idx1, idx2] += 1
s = idx - self.window_size
e = idx + self.window_size + 1
rng = range(max(s, 0), min(e, (len(word_list) - 1)))
word = word_list[idx]
list(map(lambda i: __adder(word, word_list[i]), rng))
def __adj_matrix_maker(self):
self.adj_matrix = pd.read_csv("../data/20amineMat", header=None, delimiter= "\t").values
# self.adj_matrix = np.zeros(((len(self.vocab)), (len(self.vocab))))
# list(map(lambda words: list(map(lambda idx: self.__neighbor_counter(idx, words), range(len(words)))),
# self.corpus))
# np.fill_diagonal(self.adj_matrix, 0)
# self.adj_matrix = (self.adj_matrix.T / self.adj_matrix.sum(axis=1)).T
# self.adj_matrix = np.nan_to_num(self.adj_matrix) | seqlearner/WordEmbedder.py | import os
import numpy as np
import pandas as pd
from keras.optimizers import Adam
class WordEmbedder:
"""
WordEmbedder is a helper class for every embedding algorithms. It
does extract all possible words, adjacency matrix, corpus from
the given sequences. It is parent class of SkipGram, Freq2Vec, GensimWord2Vec.
Parameters
----------
sequences : numpy ndarray, list, or DataFrame
sequences of data like protein sequences
word_length : integer
The length of each word in sequences to be separated from each other.
window_size: integer
Size of window for counting the number of neighbors.
emb_dim: integer
Number of embedding vector dimensions.
loss: basestring
The loss function is going to be used on training phase.
epochs: integer
Number of epochs for training the embedding.
See also
--------
SkipGram : Skipgram Embedding
Freq2Vec : Freq2Vec Embedding
GensimWord2Vec : Word2Vec Embedding
Sent2Vec : Sent2Vec Embedding
"""
def __init__(self, sequences, word_length, window_size, emb_dim, loss, epochs):
self.sequences = sequences
self.word_length = word_length
self.window_size = window_size
self.emb_dim = emb_dim
self.loss = loss
self.optimizer = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
self.epochs = epochs
self.adj_matrix = None
self.corpus = []
self.vocab = set()
self.vocabulary = pd.Series()
self.sentences = []
self.embedding_layer = None
self.__corpus_maker()
self.__adj_matrix_maker()
def __seq_splitter(self, seq):
words = list(map(lambda x: seq[x:(x + self.word_length)], range(0, (len(seq) - self.word_length + 1))))
self.vocab |= set(words)
list(map(lambda s: self.corpus.append(words[s::self.word_length]), range(self.word_length)))
self.sentences.append(words)
def __freq_calc(self):
def adder(idx):
self.frequency[idx] += 1
self.frequency = dict.fromkeys(range(len(self.vocab)), 0)
list(map(lambda sent: list(map(lambda word: adder(word), sent)), self.sentences))
os.makedirs("./aux/", exist_ok=True)
with open('./aux/' + self.embedding + "_" + str(self.word_length) + '_vocab.txt', 'w') as out:
out.write(",".join(self.vocab))
self.frequency = {k: v / total for total in (sum(self.frequency.values()),) for k, v in self.frequency.items()}
self.frequency = self.frequency.values()
def __corpus_maker(self):
list(map(lambda seq: self.__seq_splitter(seq), self.sequences))
self.input = self.sentences
self.vocab = dict(list(enumerate(self.vocab)))
self.vocab_aux = self.vocab
self.vocab_indices = list(k for k, v in self.vocab.items())
self.vocab = dict((v, k) for k, v in self.vocab.items())
self.corpus = list(map(lambda x: list(map(lambda y: self.vocab.get(y, -1), x)), self.corpus))
self.sentences = list(map(lambda x: list(map(lambda y: self.vocab.get(y, -1), x)), self.sentences))
self.__freq_calc()
def __neighbor_counter(self, idx, word_list):
def __adder(idx1, idx2):
self.adj_matrix[idx1, idx2] += 1
s = idx - self.window_size
e = idx + self.window_size + 1
rng = range(max(s, 0), min(e, (len(word_list) - 1)))
word = word_list[idx]
list(map(lambda i: __adder(word, word_list[i]), rng))
def __adj_matrix_maker(self):
self.adj_matrix = pd.read_csv("../data/20amineMat", header=None, delimiter= "\t").values
# self.adj_matrix = np.zeros(((len(self.vocab)), (len(self.vocab))))
# list(map(lambda words: list(map(lambda idx: self.__neighbor_counter(idx, words), range(len(words)))),
# self.corpus))
# np.fill_diagonal(self.adj_matrix, 0)
# self.adj_matrix = (self.adj_matrix.T / self.adj_matrix.sum(axis=1)).T
# self.adj_matrix = np.nan_to_num(self.adj_matrix) | 0.769167 | 0.501343 |
import time
import numpy as np
from tensorflow.keras import Input, layers
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
from implementation import read_mat
from preprocess import prepro
file_dict = read_mat('../data/12k/0HP')
# train_data, test_data = train_test_split(file_dict)
# train_x, train_y = one_hot_label(train_data)
# test_x, test_y = one_hot_label(test_data)
train_x, train_y, valid_x, valid_y, test_x, test_y = prepro(d_path='../data/48k/0HP',
length=1024,
number=1000,
normal=False,
rate=[0.5, 0.25, 0.25],
enc=False)
train_x = np.expand_dims(train_x, -1)
valid_x = np.expand_dims(valid_x, -1)
test_x = np.expand_dims(test_x, -1)
input_size = train_x.shape[1:]
output_size = train_y.shape[-1]
model = Sequential([
Input(shape=input_size),
layers.AveragePooling1D(pool_size=2, strides=2),
layers.Conv1D(filters=8, kernel_size=3, strides=1, kernel_regularizer=l2(1e-4)),
layers.AveragePooling1D(pool_size=2, strides=2),
layers.Conv1D(filters=16, kernel_size=3, strides=1, kernel_regularizer=l2(1e-4)),
layers.Flatten(),
layers.Dense(units=400, activation='relu'),
layers.Dense(units=output_size, activation='softmax', kernel_regularizer=l2(1e-4)),
])
model.summary()
opt = Adam(learning_rate=0.05)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
localtime = time.strftime("%Y%m%d_%H%M", time.localtime())
tb_cb = TensorBoard(log_dir=rf'logs\without_stft_cnn_fault_diagnosis-{localtime}')
model.fit(x=train_x, y=train_y, batch_size=128, epochs=30,
validation_data=(valid_x, valid_y),
verbose=1, shuffle=True, callbacks=[tb_cb])
score = model.evaluate(x=test_x, y=test_y)
print("测试集上的损失率:", score[0])
print("测试集上的准确率:", score[1])
plot_model(model=model, to_file='images/without_stft_cnn_fault_diagnosis.png', show_shapes=True) | stft-cnn-fault-diagnosis/without_stft.py | import time
import numpy as np
from tensorflow.keras import Input, layers
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
from implementation import read_mat
from preprocess import prepro
file_dict = read_mat('../data/12k/0HP')
# train_data, test_data = train_test_split(file_dict)
# train_x, train_y = one_hot_label(train_data)
# test_x, test_y = one_hot_label(test_data)
train_x, train_y, valid_x, valid_y, test_x, test_y = prepro(d_path='../data/48k/0HP',
length=1024,
number=1000,
normal=False,
rate=[0.5, 0.25, 0.25],
enc=False)
train_x = np.expand_dims(train_x, -1)
valid_x = np.expand_dims(valid_x, -1)
test_x = np.expand_dims(test_x, -1)
input_size = train_x.shape[1:]
output_size = train_y.shape[-1]
model = Sequential([
Input(shape=input_size),
layers.AveragePooling1D(pool_size=2, strides=2),
layers.Conv1D(filters=8, kernel_size=3, strides=1, kernel_regularizer=l2(1e-4)),
layers.AveragePooling1D(pool_size=2, strides=2),
layers.Conv1D(filters=16, kernel_size=3, strides=1, kernel_regularizer=l2(1e-4)),
layers.Flatten(),
layers.Dense(units=400, activation='relu'),
layers.Dense(units=output_size, activation='softmax', kernel_regularizer=l2(1e-4)),
])
model.summary()
opt = Adam(learning_rate=0.05)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
localtime = time.strftime("%Y%m%d_%H%M", time.localtime())
tb_cb = TensorBoard(log_dir=rf'logs\without_stft_cnn_fault_diagnosis-{localtime}')
model.fit(x=train_x, y=train_y, batch_size=128, epochs=30,
validation_data=(valid_x, valid_y),
verbose=1, shuffle=True, callbacks=[tb_cb])
score = model.evaluate(x=test_x, y=test_y)
print("测试集上的损失率:", score[0])
print("测试集上的准确率:", score[1])
plot_model(model=model, to_file='images/without_stft_cnn_fault_diagnosis.png', show_shapes=True) | 0.648244 | 0.342049 |
import math
from jsonargparse import ArgumentParser, ActionParser
import torch
from .attack_factory import AttackFactory as AF
class RandomAttackFactory(object):
def __init__(
self,
attack_types,
min_eps=1e-5,
max_eps=0.1,
min_snr=30,
max_snr=60,
min_alpha=1e-5,
max_alpha=0.02,
norms=[float("inf")],
random_eps=False,
min_num_random_init=0,
max_num_random_init=3,
min_confidence=0,
max_confidence=1,
min_lr=1e-3,
max_lr=1e-2,
min_binary_search_steps=9,
max_binary_search_steps=9,
min_iter=5,
max_iter=10,
abort_early=True,
min_c=1e-3,
max_c=1e-2,
reduce_c=False,
c_incr_factor=2,
tau_decr_factor=0.9,
indep_channels=False,
norm_time=False,
time_dim=None,
use_snr=False,
loss=None,
targeted=False,
range_min=None,
range_max=None,
eps_scale=1,
):
self.attack_types = attack_types
self.min_eps = min_eps
self.max_eps = max_eps
self.min_snr = min_snr
self.max_snr = max_snr
self.min_alpha = min_alpha
self.max_alpha = max_alpha
self.norms = norms
self.random_eps = random_eps
self.min_num_random_init = min_num_random_init
self.max_num_random_init = max_num_random_init
self.min_confidence = min_confidence
self.max_confidence = max_confidence
self.min_lr = min_lr
self.max_lr = max_lr
self.min_binary_search_steps = min_binary_search_steps
self.max_binary_search_steps = max_binary_search_steps
self.abort_early = abort_early
self.min_iter = min_iter
self.max_iter = max_iter
self.min_c = min_c
self.max_c = max_c
self.reduce_c = reduce_c
self.c_incr_factor = c_incr_factor
self.tau_decr_factor = tau_decr_factor
self.indep_channels = indep_channels
self.norm_time = norm_time
self.time_dim = time_dim
self.use_snr = use_snr
self.loss = loss
self.targeted = targeted
self.range_min = range_min
self.range_max = range_max
self.eps_scale = eps_scale
@staticmethod
def _choice(n):
return torch.randint(low=0, high=n, size=(1,)).item()
@staticmethod
def _randint(min_val, max_val):
return torch.randint(low=min_val, high=max_val + 1, size=(1,)).item()
@staticmethod
def _uniform(min_val, max_val):
return (max_val - min_val) * torch.rand(size=(1,)).item() + min_val
@staticmethod
def _log_uniform(min_val, max_val):
log_x = (math.log(max_val) - math.log(min_val)) * torch.rand(
size=(1,)
).item() + math.log(min_val)
return math.exp(log_x)
def _sample_attack_args(self):
attack_args = {}
attack_idx = self._choice(len(self.attack_types))
attack_args["attack_type"] = self.attack_types[attack_idx]
eps = self._log_uniform(self.min_eps, self.max_eps)
attack_args["eps"] = eps
attack_args["alpha"] = self._log_uniform(
min(eps, self.min_alpha), min(eps, self.max_alpha)
)
attack_args["norm"] = self.norms[self._choice(len(self.norms))]
attack_args["random_eps"] = self.random_eps
attack_args["num_random_init"] = self._randint(
self.min_num_random_init, self.max_num_random_init
)
attack_args["confidence"] = self._uniform(
self.min_confidence, self.max_confidence
)
attack_args["lr"] = self._uniform(self.min_lr, self.max_lr)
attack_args["binary_search_steps"] = self._randint(
self.min_binary_search_steps, self.max_binary_search_steps
)
attack_args["max_iter"] = self._randint(self.min_iter, self.max_iter)
attack_args["abort_early"] = self.abort_early
attack_args["c"] = self._uniform(self.min_c, self.max_c)
attack_args["reduce_c"] = self.reduce_c
attack_args["c_incr_factor"] = self.c_incr_factor
attack_args["tau_decr_factor"] = self.tau_decr_factor
attack_args["indep_channels"] = self.indep_channels
attack_args["norm_time"] = self.norm_time
attack_args["time_dim"] = self.time_dim
attack_args["use_snr"] = self.use_snr
attack_args["targeted"] = self.targeted
attack_args["range_min"] = self.range_min
attack_args["range_max"] = self.range_max
attack_args["eps_scale"] = self.eps_scale
attack_args["loss"] = self.loss
return attack_args
def sample_attack(self, model=None):
attack_args = self._sample_attack_args()
attack_args["model"] = model
return AF.create(**attack_args)
@staticmethod
def filter_args(**kwargs):
if "no_abort" in kwargs:
kwargs["abort_early"] = not kwargs["no_abort"]
if "norms" in kwargs:
kwargs["norms"] = [float(a) for a in kwargs["norms"]]
valid_args = (
"attack_types",
"min_eps",
"max_eps",
"min_snr",
"max_snr",
"norms",
"random_eps",
"min_num_random_init",
"max_num_random_init",
"min_alpha",
"max_alpha",
"min_confidence",
"max_confidence",
"min_lr",
"max_lr",
"min_binary_search_steps",
"max_binary_search_steps",
"min_iter",
"max_iter",
"abort_early",
"min_c",
"max_c",
"reduce_c",
"c_incr_factor",
"tau_decr_factor",
"indep_channels",
"use_snr",
"norm_time",
"targeted",
)
args = dict((k, kwargs[k]) for k in valid_args if k in kwargs)
return args
@staticmethod
def add_class_args(parser, prefix=None):
if prefix is not None:
outer_parser = parser
parser = ArgumentParser(prog="")
parser.add_argument(
"--attack-types",
type=str.lower,
default=["fgsm"],
nargs="+",
choices=[
"fgsm",
"snr-fgsm",
"rand-fgsm",
"iter-fgsm",
"cw-l0",
"cw-l2",
"cw-linf",
"pgd",
],
help=("Attack types"),
)
parser.add_argument(
"--norms",
type=float,
default=[float("inf")],
nargs="+",
choices=[float("inf"), 1, 2],
help=("Attack perturbation norms"),
)
parser.add_argument(
"--min-eps",
default=1e-5,
type=float,
help=("attack min epsilon, upper bound for the perturbation norm"),
)
parser.add_argument(
"--max-eps",
default=0.1,
type=float,
help=("attack max epsilon, upper bound for the perturbation norm"),
)
parser.add_argument(
"--min-snr",
default=30,
type=float,
help=(
"min upper bound for the signal-to-noise ratio of the "
"perturbed signal"
),
)
parser.add_argument(
"--max-snr",
default=60,
type=float,
help=(
"max upper bound for the signal-to-noise ratio of the "
"perturbed signal"
),
)
parser.add_argument(
"--min-alpha",
default=1e-5,
type=float,
help=("min alpha for iter and rand fgsm attack"),
)
parser.add_argument(
"--max-alpha",
default=0.02,
type=float,
help=("max alpha for iter and rand fgsm attack"),
)
parser.add_argument(
"--random-eps",
default=False,
action="store_true",
help=("use random epsilon in PGD attack"),
)
parser.add_argument(
"--min-confidence",
default=0,
type=float,
help=("min confidence for carlini-wagner attack"),
)
parser.add_argument(
"--max-confidence",
default=1,
type=float,
help=("max confidence for carlini-wagner attack"),
)
parser.add_argument(
"--min-lr",
default=1e-3,
type=float,
help=("min learning rate for attack optimizers"),
)
parser.add_argument(
"--max-lr",
default=1e-2,
type=float,
help=("max learning rate for attack optimizers"),
)
parser.add_argument(
"--min-binary-search-steps",
default=9,
type=int,
help=("min num bin. search steps in carlini-wagner-l2 attack"),
)
parser.add_argument(
"--max-binary-search-steps",
default=9,
type=int,
help=("max num bin. search steps in carlini-wagner-l2 attack"),
)
parser.add_argument(
"--min-iter",
default=5,
type=int,
help=("min maximum. num. of optim iters in attack"),
)
parser.add_argument(
"--max-iter",
default=10,
type=int,
help=("max maximum num. of optim iters in attack"),
)
parser.add_argument(
"--min-c",
default=1e-3,
type=float,
help=(
"min initial weight of constraint function f "
"in carlini-wagner attack"
),
)
parser.add_argument(
"--max-c",
default=1e-2,
type=float,
help=(
"max initial weight of constraint function f "
"in carlini-wagner attack"
),
)
parser.add_argument(
"--reduce-c",
default=False,
action="store_true",
help=("allow to reduce c in carline-wagner-l0/inf attack"),
)
parser.add_argument(
"--c-incr-factor",
default=2,
type=float,
help=("factor to increment c in carline-wagner-l0/inf attack"),
)
parser.add_argument(
"--tau-decr-factor",
default=0.75,
type=float,
help=("factor to reduce tau in carline-wagner-linf attack"),
)
parser.add_argument(
"--indep-channels",
default=False,
action="store_true",
help=("consider independent input channels in " "carlini-wagner-l0 attack"),
)
parser.add_argument(
"--no-abort",
default=False,
action="store_true",
help=("do not abort early in optimizer iterations"),
)
parser.add_argument(
"--min-num-random-init",
default=1,
type=int,
help=("min number of random initializations in PGD attack"),
)
parser.add_argument(
"--max-num-random-init",
default=5,
type=int,
help=("max number of random initializations in PGD attack"),
)
parser.add_argument(
"--targeted",
default=False,
action="store_true",
help="use targeted attack intead of non-targeted",
)
parser.add_argument(
"--use-snr",
default=False,
action="store_true",
help=(
"In carlini-wagner attack maximize SNR instead of "
"minimize perturbation norm"
),
)
parser.add_argument(
"--norm-time",
default=False,
action="store_true",
help=("normalize norm by number of samples in time dimension"),
)
if prefix is not None:
outer_parser.add_argument("--" + prefix, action=ActionParser(parser=parser))
# help='adversarial attack options')
add_argparse_args = add_class_args | hyperion/torch/adv_attacks/random_attack_factory.py | import math
from jsonargparse import ArgumentParser, ActionParser
import torch
from .attack_factory import AttackFactory as AF
class RandomAttackFactory(object):
def __init__(
self,
attack_types,
min_eps=1e-5,
max_eps=0.1,
min_snr=30,
max_snr=60,
min_alpha=1e-5,
max_alpha=0.02,
norms=[float("inf")],
random_eps=False,
min_num_random_init=0,
max_num_random_init=3,
min_confidence=0,
max_confidence=1,
min_lr=1e-3,
max_lr=1e-2,
min_binary_search_steps=9,
max_binary_search_steps=9,
min_iter=5,
max_iter=10,
abort_early=True,
min_c=1e-3,
max_c=1e-2,
reduce_c=False,
c_incr_factor=2,
tau_decr_factor=0.9,
indep_channels=False,
norm_time=False,
time_dim=None,
use_snr=False,
loss=None,
targeted=False,
range_min=None,
range_max=None,
eps_scale=1,
):
self.attack_types = attack_types
self.min_eps = min_eps
self.max_eps = max_eps
self.min_snr = min_snr
self.max_snr = max_snr
self.min_alpha = min_alpha
self.max_alpha = max_alpha
self.norms = norms
self.random_eps = random_eps
self.min_num_random_init = min_num_random_init
self.max_num_random_init = max_num_random_init
self.min_confidence = min_confidence
self.max_confidence = max_confidence
self.min_lr = min_lr
self.max_lr = max_lr
self.min_binary_search_steps = min_binary_search_steps
self.max_binary_search_steps = max_binary_search_steps
self.abort_early = abort_early
self.min_iter = min_iter
self.max_iter = max_iter
self.min_c = min_c
self.max_c = max_c
self.reduce_c = reduce_c
self.c_incr_factor = c_incr_factor
self.tau_decr_factor = tau_decr_factor
self.indep_channels = indep_channels
self.norm_time = norm_time
self.time_dim = time_dim
self.use_snr = use_snr
self.loss = loss
self.targeted = targeted
self.range_min = range_min
self.range_max = range_max
self.eps_scale = eps_scale
@staticmethod
def _choice(n):
return torch.randint(low=0, high=n, size=(1,)).item()
@staticmethod
def _randint(min_val, max_val):
return torch.randint(low=min_val, high=max_val + 1, size=(1,)).item()
@staticmethod
def _uniform(min_val, max_val):
return (max_val - min_val) * torch.rand(size=(1,)).item() + min_val
@staticmethod
def _log_uniform(min_val, max_val):
log_x = (math.log(max_val) - math.log(min_val)) * torch.rand(
size=(1,)
).item() + math.log(min_val)
return math.exp(log_x)
def _sample_attack_args(self):
attack_args = {}
attack_idx = self._choice(len(self.attack_types))
attack_args["attack_type"] = self.attack_types[attack_idx]
eps = self._log_uniform(self.min_eps, self.max_eps)
attack_args["eps"] = eps
attack_args["alpha"] = self._log_uniform(
min(eps, self.min_alpha), min(eps, self.max_alpha)
)
attack_args["norm"] = self.norms[self._choice(len(self.norms))]
attack_args["random_eps"] = self.random_eps
attack_args["num_random_init"] = self._randint(
self.min_num_random_init, self.max_num_random_init
)
attack_args["confidence"] = self._uniform(
self.min_confidence, self.max_confidence
)
attack_args["lr"] = self._uniform(self.min_lr, self.max_lr)
attack_args["binary_search_steps"] = self._randint(
self.min_binary_search_steps, self.max_binary_search_steps
)
attack_args["max_iter"] = self._randint(self.min_iter, self.max_iter)
attack_args["abort_early"] = self.abort_early
attack_args["c"] = self._uniform(self.min_c, self.max_c)
attack_args["reduce_c"] = self.reduce_c
attack_args["c_incr_factor"] = self.c_incr_factor
attack_args["tau_decr_factor"] = self.tau_decr_factor
attack_args["indep_channels"] = self.indep_channels
attack_args["norm_time"] = self.norm_time
attack_args["time_dim"] = self.time_dim
attack_args["use_snr"] = self.use_snr
attack_args["targeted"] = self.targeted
attack_args["range_min"] = self.range_min
attack_args["range_max"] = self.range_max
attack_args["eps_scale"] = self.eps_scale
attack_args["loss"] = self.loss
return attack_args
def sample_attack(self, model=None):
attack_args = self._sample_attack_args()
attack_args["model"] = model
return AF.create(**attack_args)
@staticmethod
def filter_args(**kwargs):
if "no_abort" in kwargs:
kwargs["abort_early"] = not kwargs["no_abort"]
if "norms" in kwargs:
kwargs["norms"] = [float(a) for a in kwargs["norms"]]
valid_args = (
"attack_types",
"min_eps",
"max_eps",
"min_snr",
"max_snr",
"norms",
"random_eps",
"min_num_random_init",
"max_num_random_init",
"min_alpha",
"max_alpha",
"min_confidence",
"max_confidence",
"min_lr",
"max_lr",
"min_binary_search_steps",
"max_binary_search_steps",
"min_iter",
"max_iter",
"abort_early",
"min_c",
"max_c",
"reduce_c",
"c_incr_factor",
"tau_decr_factor",
"indep_channels",
"use_snr",
"norm_time",
"targeted",
)
args = dict((k, kwargs[k]) for k in valid_args if k in kwargs)
return args
@staticmethod
def add_class_args(parser, prefix=None):
if prefix is not None:
outer_parser = parser
parser = ArgumentParser(prog="")
parser.add_argument(
"--attack-types",
type=str.lower,
default=["fgsm"],
nargs="+",
choices=[
"fgsm",
"snr-fgsm",
"rand-fgsm",
"iter-fgsm",
"cw-l0",
"cw-l2",
"cw-linf",
"pgd",
],
help=("Attack types"),
)
parser.add_argument(
"--norms",
type=float,
default=[float("inf")],
nargs="+",
choices=[float("inf"), 1, 2],
help=("Attack perturbation norms"),
)
parser.add_argument(
"--min-eps",
default=1e-5,
type=float,
help=("attack min epsilon, upper bound for the perturbation norm"),
)
parser.add_argument(
"--max-eps",
default=0.1,
type=float,
help=("attack max epsilon, upper bound for the perturbation norm"),
)
parser.add_argument(
"--min-snr",
default=30,
type=float,
help=(
"min upper bound for the signal-to-noise ratio of the "
"perturbed signal"
),
)
parser.add_argument(
"--max-snr",
default=60,
type=float,
help=(
"max upper bound for the signal-to-noise ratio of the "
"perturbed signal"
),
)
parser.add_argument(
"--min-alpha",
default=1e-5,
type=float,
help=("min alpha for iter and rand fgsm attack"),
)
parser.add_argument(
"--max-alpha",
default=0.02,
type=float,
help=("max alpha for iter and rand fgsm attack"),
)
parser.add_argument(
"--random-eps",
default=False,
action="store_true",
help=("use random epsilon in PGD attack"),
)
parser.add_argument(
"--min-confidence",
default=0,
type=float,
help=("min confidence for carlini-wagner attack"),
)
parser.add_argument(
"--max-confidence",
default=1,
type=float,
help=("max confidence for carlini-wagner attack"),
)
parser.add_argument(
"--min-lr",
default=1e-3,
type=float,
help=("min learning rate for attack optimizers"),
)
parser.add_argument(
"--max-lr",
default=1e-2,
type=float,
help=("max learning rate for attack optimizers"),
)
parser.add_argument(
"--min-binary-search-steps",
default=9,
type=int,
help=("min num bin. search steps in carlini-wagner-l2 attack"),
)
parser.add_argument(
"--max-binary-search-steps",
default=9,
type=int,
help=("max num bin. search steps in carlini-wagner-l2 attack"),
)
parser.add_argument(
"--min-iter",
default=5,
type=int,
help=("min maximum. num. of optim iters in attack"),
)
parser.add_argument(
"--max-iter",
default=10,
type=int,
help=("max maximum num. of optim iters in attack"),
)
parser.add_argument(
"--min-c",
default=1e-3,
type=float,
help=(
"min initial weight of constraint function f "
"in carlini-wagner attack"
),
)
parser.add_argument(
"--max-c",
default=1e-2,
type=float,
help=(
"max initial weight of constraint function f "
"in carlini-wagner attack"
),
)
parser.add_argument(
"--reduce-c",
default=False,
action="store_true",
help=("allow to reduce c in carline-wagner-l0/inf attack"),
)
parser.add_argument(
"--c-incr-factor",
default=2,
type=float,
help=("factor to increment c in carline-wagner-l0/inf attack"),
)
parser.add_argument(
"--tau-decr-factor",
default=0.75,
type=float,
help=("factor to reduce tau in carline-wagner-linf attack"),
)
parser.add_argument(
"--indep-channels",
default=False,
action="store_true",
help=("consider independent input channels in " "carlini-wagner-l0 attack"),
)
parser.add_argument(
"--no-abort",
default=False,
action="store_true",
help=("do not abort early in optimizer iterations"),
)
parser.add_argument(
"--min-num-random-init",
default=1,
type=int,
help=("min number of random initializations in PGD attack"),
)
parser.add_argument(
"--max-num-random-init",
default=5,
type=int,
help=("max number of random initializations in PGD attack"),
)
parser.add_argument(
"--targeted",
default=False,
action="store_true",
help="use targeted attack intead of non-targeted",
)
parser.add_argument(
"--use-snr",
default=False,
action="store_true",
help=(
"In carlini-wagner attack maximize SNR instead of "
"minimize perturbation norm"
),
)
parser.add_argument(
"--norm-time",
default=False,
action="store_true",
help=("normalize norm by number of samples in time dimension"),
)
if prefix is not None:
outer_parser.add_argument("--" + prefix, action=ActionParser(parser=parser))
# help='adversarial attack options')
add_argparse_args = add_class_args | 0.7797 | 0.133839 |
from I3Tray import *
from icecube import icetray, dataclasses, dataio, simclasses
from os.path import expandvars
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pylab
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i","--infile", dest="INFILE",
help="Input file to read.")
parser.add_option("-m","--nhits_per_DOM", type = "int",
dest="nhits_per_DOM", default=20,
help="Number of hits per DOM")
parser.add_option("-p","--plots", action="store_true",
dest="GENERATE_PLOTS", default = False,
help="Number of hits per DOM")
(options, args) = parser.parse_args()
f = dataio.I3File(options.INFILE)
infile = dataio.I3File(options.INFILE)
status_frame = infile.pop_frame()
while not status_frame.Has('I3DetectorStatus'):
status_frame = infile.pop_frame()
status = status_frame.Get('I3DetectorStatus')
badDOMList = list()
badDOMListSLC = list()
if "BadDomsList" in status_frame :
print("Found a BadDomsList in the frame.")
print("Using this one instead.")
badDOMList = status_frame.Get("BadDomsList")
badDOMListSLC = status_frame.Get("BadDomsListSLC")
print("len(badDOMList) = ",len(badDOMList))
print("len(badDOMListSLC) = ",len(badDOMListSLC))
else:
print(status_frame)
try :
from icecube.BadDomList import bad_dom_list_static
badDOMList = bad_dom_list_static.IC86_static_bad_dom_list()
except ImportError :
print("ERROR : BadDomsList wasn't found in the frame")
print("and either the BadDomList doesn't exist or")
print("there's no static_bad_dom_list.")
sys.exit(1)
from icecube.sim_services.sim_utils.gcd_utils import get_omgeo, get_domcal, get_domstatus
omgeo = get_omgeo( dataio.I3File(options.INFILE) )
domcal = get_domcal( dataio.I3File(options.INFILE) )
domstat = get_domstatus( dataio.I3File(options.INFILE) )
goodDOMList = [omkey for omkey,g in omgeo \
if omkey not in badDOMList and omkey.string > 0]
counter = 0
bad_doms_with_hits = list()
while f.more():
counter += 1
frame = f.pop_frame()
if frame.Stop != icetray.I3Frame.DAQ : continue
print("[ Frame %d ]" % (counter))
print(frame)
pulsemap = frame.Get("I3MCPulseSeriesMap")
dlmap = frame.Get("I3DOMLaunchSeriesMap")
calwfmap = frame.Get("CalibratedWaveforms")
rpmap = frame.Get("WavedeformPulses")
nhits_per_DOM = options.nhits_per_DOM
if 'NHitsPerDOM' in frame.keys():
print('Found `NHitsPerDOM` in frame. Override options.nhits_per_DOM')
print()
nhits_per_DOM = int(frame['NHitsPerDOM'].value)
# make sure this DOM is not in the bad DOM list
for omkey, rpseries in rpmap :
charge = sum([rp.charge for rp in rpseries])
if len(rpseries) == 0 :
print("%s : this DOM has an empty I3RecoPulseSeries" % str(omkey))
print(" beacon baseline ATWD0a = %f" % domcal[omkey].atwd_beacon_baseline[0,0])
print(" beacon baseline ATWD0b = %f" % domcal[omkey].atwd_beacon_baseline[0,1])
# how do the calibrated waveforms look?
if options.GENERATE_PLOTS:
atwd0 = calwfmap[omkey][0]
fig = plt.figure()
plt.plot(range(len(atwd0.waveform)), [v/I3Units.mV for v in atwd0.waveform])
fig.savefig("calibrated_ATWD0_%s_%s.png" % (omkey.string, omkey.om))
plt.clf()
domlaunch = dlmap[omkey][0]
fig = plt.figure()
pylab.plot(range(len(domlaunch.raw_atwd[0])), [v for v in domlaunch.raw_atwd[0]])
pylab.title("N_launches = %d LC_Bit = %s" % (len(dlmap[omkey]),domlaunch.lc_bit))
fig.savefig("launch_ATWD0_%s_%s.png" % (omkey.string, omkey.om))
plt.clf()
# DOMs in the badDOMListSLC should have no waveforms at all
if omkey in badDOMListSLC :
print("%s : this DOM is in the BAD DOM List!!!" % str(omkey))
print(" number of recopulses = ",len(rpseries))
print(" charge = %.2f" % charge)
print(" number of launches = ",len(dlmap[omkey]))
print(" lc_bit = ",dlmap[omkey][0].lc_bit)
print(" trigger_type = ",dlmap[omkey][0].trigger_type)
print(" trigger_mode = ",dlmap[omkey][0].trigger_mode)
if omkey not in bad_doms_with_hits:
bad_doms_with_hits.append(omkey)
if(charge/float(nhits_per_DOM) < 0.2 or \
charge/float(nhits_per_DOM) > 2.0 ) :
print("%s : what do you think about this (%f) charge and this (%f) charge ratio? " % \
(str(omkey),charge,charge/float(nhits_per_DOM)))
# The BadDOMListSLC are DOMs that are off and should not contain any hits
# The BadDOMList are DOMs that do not participate in HLC launches
if omkey in badDOMListSLC and omkey not in badDOMList:
# these are SLC-only DOMs
for dl in dlmap[omkey] :
if dl.lc_bit :
print("ERROR: This %s is an SLC-only DOM with LCBit set to True." % omkey)
# make sure every DOM in the good DOM list has a hit
for omkey in goodDOMList :
if omkey not in rpmap:
print("%s : this DOM is good but produced no hits!!!" % str(omkey))
print(" this is an %s DOM." % str(omgeo[omkey].omtype))
if str(omgeo[omkey].omtype) == 'Scintillator':
print(" No PEs were created to test the Scintillators. Skip this DOM.")
continue
if omkey not in pulsemap :
print(" %s : ERROR this DOM has no PMT waveform!!!" % str(omkey))
else:
charge = sum([pulse.charge for pulse in pulsemap[omkey]])
print(" %s : OK this DOM has a PMT waveform with charge %f" % (str(omkey), charge))
if omkey not in dlmap :
print(" %s : ERROR this DOM has no DOM launches!!!" % str(omkey))
else:
print(" %s : OK this DOM has %s launches." % len(dlmap[omkey]))
if omkey not in calwfmap :
print(" %s : ERROR this DOM has no calibrated waveforms!!!" % str(omkey))
else:
print(" %s : OK this DOM has %d calibrated waveforms." % len(calwfmap[omkey]))
if omkey not in domcal :
print(" %s : this DOM has no domcal entry!!!" % str(omkey))
else:
print(" impedance = %f ohms" % ( (domcal[omkey].front_end_impedance)/I3Units.ohm))
if omkey not in domstat :
print(" %s : this DOM has no domstat entry!!!" % str(omkey))
else:
print(" voltage = %f V" % ( (domstat[omkey].pmt_hv)/I3Units.V))
print(" statusATWDa = %s" % domstat[omkey].status_atwd_a)
print(" statusATWDb = %s" % domstat[omkey].status_atwd_b)
print(" lcWindowPost = %s ns" % domstat[omkey].lc_window_post)
if omkey in domcal and omkey in domstat :
print(" gain = %f " % ( dataclasses.pmt_gain(domstat[omkey],domcal[omkey]) ))
print(" ttime = %f ns " % ( dataclasses.transit_time(domstat[omkey],domcal[omkey])/I3Units.ns ))
print("number of bad DOMs with hits = ",len(bad_doms_with_hits))
print("len(badDOMList) = ",len(badDOMList))
print("len(badDOMListSLC) = ",len(badDOMListSLC))
for d in bad_doms_with_hits:
print(d) | sim-services/resources/gcd_validation/details/validate_stress_test_samples.py |
from I3Tray import *
from icecube import icetray, dataclasses, dataio, simclasses
from os.path import expandvars
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pylab
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i","--infile", dest="INFILE",
help="Input file to read.")
parser.add_option("-m","--nhits_per_DOM", type = "int",
dest="nhits_per_DOM", default=20,
help="Number of hits per DOM")
parser.add_option("-p","--plots", action="store_true",
dest="GENERATE_PLOTS", default = False,
help="Number of hits per DOM")
(options, args) = parser.parse_args()
f = dataio.I3File(options.INFILE)
infile = dataio.I3File(options.INFILE)
status_frame = infile.pop_frame()
while not status_frame.Has('I3DetectorStatus'):
status_frame = infile.pop_frame()
status = status_frame.Get('I3DetectorStatus')
badDOMList = list()
badDOMListSLC = list()
if "BadDomsList" in status_frame :
print("Found a BadDomsList in the frame.")
print("Using this one instead.")
badDOMList = status_frame.Get("BadDomsList")
badDOMListSLC = status_frame.Get("BadDomsListSLC")
print("len(badDOMList) = ",len(badDOMList))
print("len(badDOMListSLC) = ",len(badDOMListSLC))
else:
print(status_frame)
try :
from icecube.BadDomList import bad_dom_list_static
badDOMList = bad_dom_list_static.IC86_static_bad_dom_list()
except ImportError :
print("ERROR : BadDomsList wasn't found in the frame")
print("and either the BadDomList doesn't exist or")
print("there's no static_bad_dom_list.")
sys.exit(1)
from icecube.sim_services.sim_utils.gcd_utils import get_omgeo, get_domcal, get_domstatus
omgeo = get_omgeo( dataio.I3File(options.INFILE) )
domcal = get_domcal( dataio.I3File(options.INFILE) )
domstat = get_domstatus( dataio.I3File(options.INFILE) )
goodDOMList = [omkey for omkey,g in omgeo \
if omkey not in badDOMList and omkey.string > 0]
counter = 0
bad_doms_with_hits = list()
while f.more():
counter += 1
frame = f.pop_frame()
if frame.Stop != icetray.I3Frame.DAQ : continue
print("[ Frame %d ]" % (counter))
print(frame)
pulsemap = frame.Get("I3MCPulseSeriesMap")
dlmap = frame.Get("I3DOMLaunchSeriesMap")
calwfmap = frame.Get("CalibratedWaveforms")
rpmap = frame.Get("WavedeformPulses")
nhits_per_DOM = options.nhits_per_DOM
if 'NHitsPerDOM' in frame.keys():
print('Found `NHitsPerDOM` in frame. Override options.nhits_per_DOM')
print()
nhits_per_DOM = int(frame['NHitsPerDOM'].value)
# make sure this DOM is not in the bad DOM list
for omkey, rpseries in rpmap :
charge = sum([rp.charge for rp in rpseries])
if len(rpseries) == 0 :
print("%s : this DOM has an empty I3RecoPulseSeries" % str(omkey))
print(" beacon baseline ATWD0a = %f" % domcal[omkey].atwd_beacon_baseline[0,0])
print(" beacon baseline ATWD0b = %f" % domcal[omkey].atwd_beacon_baseline[0,1])
# how do the calibrated waveforms look?
if options.GENERATE_PLOTS:
atwd0 = calwfmap[omkey][0]
fig = plt.figure()
plt.plot(range(len(atwd0.waveform)), [v/I3Units.mV for v in atwd0.waveform])
fig.savefig("calibrated_ATWD0_%s_%s.png" % (omkey.string, omkey.om))
plt.clf()
domlaunch = dlmap[omkey][0]
fig = plt.figure()
pylab.plot(range(len(domlaunch.raw_atwd[0])), [v for v in domlaunch.raw_atwd[0]])
pylab.title("N_launches = %d LC_Bit = %s" % (len(dlmap[omkey]),domlaunch.lc_bit))
fig.savefig("launch_ATWD0_%s_%s.png" % (omkey.string, omkey.om))
plt.clf()
# DOMs in the badDOMListSLC should have no waveforms at all
if omkey in badDOMListSLC :
print("%s : this DOM is in the BAD DOM List!!!" % str(omkey))
print(" number of recopulses = ",len(rpseries))
print(" charge = %.2f" % charge)
print(" number of launches = ",len(dlmap[omkey]))
print(" lc_bit = ",dlmap[omkey][0].lc_bit)
print(" trigger_type = ",dlmap[omkey][0].trigger_type)
print(" trigger_mode = ",dlmap[omkey][0].trigger_mode)
if omkey not in bad_doms_with_hits:
bad_doms_with_hits.append(omkey)
if(charge/float(nhits_per_DOM) < 0.2 or \
charge/float(nhits_per_DOM) > 2.0 ) :
print("%s : what do you think about this (%f) charge and this (%f) charge ratio? " % \
(str(omkey),charge,charge/float(nhits_per_DOM)))
# The BadDOMListSLC are DOMs that are off and should not contain any hits
# The BadDOMList are DOMs that do not participate in HLC launches
if omkey in badDOMListSLC and omkey not in badDOMList:
# these are SLC-only DOMs
for dl in dlmap[omkey] :
if dl.lc_bit :
print("ERROR: This %s is an SLC-only DOM with LCBit set to True." % omkey)
# make sure every DOM in the good DOM list has a hit
for omkey in goodDOMList :
if omkey not in rpmap:
print("%s : this DOM is good but produced no hits!!!" % str(omkey))
print(" this is an %s DOM." % str(omgeo[omkey].omtype))
if str(omgeo[omkey].omtype) == 'Scintillator':
print(" No PEs were created to test the Scintillators. Skip this DOM.")
continue
if omkey not in pulsemap :
print(" %s : ERROR this DOM has no PMT waveform!!!" % str(omkey))
else:
charge = sum([pulse.charge for pulse in pulsemap[omkey]])
print(" %s : OK this DOM has a PMT waveform with charge %f" % (str(omkey), charge))
if omkey not in dlmap :
print(" %s : ERROR this DOM has no DOM launches!!!" % str(omkey))
else:
print(" %s : OK this DOM has %s launches." % len(dlmap[omkey]))
if omkey not in calwfmap :
print(" %s : ERROR this DOM has no calibrated waveforms!!!" % str(omkey))
else:
print(" %s : OK this DOM has %d calibrated waveforms." % len(calwfmap[omkey]))
if omkey not in domcal :
print(" %s : this DOM has no domcal entry!!!" % str(omkey))
else:
print(" impedance = %f ohms" % ( (domcal[omkey].front_end_impedance)/I3Units.ohm))
if omkey not in domstat :
print(" %s : this DOM has no domstat entry!!!" % str(omkey))
else:
print(" voltage = %f V" % ( (domstat[omkey].pmt_hv)/I3Units.V))
print(" statusATWDa = %s" % domstat[omkey].status_atwd_a)
print(" statusATWDb = %s" % domstat[omkey].status_atwd_b)
print(" lcWindowPost = %s ns" % domstat[omkey].lc_window_post)
if omkey in domcal and omkey in domstat :
print(" gain = %f " % ( dataclasses.pmt_gain(domstat[omkey],domcal[omkey]) ))
print(" ttime = %f ns " % ( dataclasses.transit_time(domstat[omkey],domcal[omkey])/I3Units.ns ))
print("number of bad DOMs with hits = ",len(bad_doms_with_hits))
print("len(badDOMList) = ",len(badDOMList))
print("len(badDOMListSLC) = ",len(badDOMListSLC))
for d in bad_doms_with_hits:
print(d) | 0.281504 | 0.120957 |
import dash
import dash_core_components as dcc
import dash_html_components as html
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, roc_curve, f1_score
campaigns = pd.read_csv('../data/1-community_campaigns.csv',index_col=0)
Cs = [];
Ss = [];
for i in range(1,201):
# Read CSV file
df = pd.read_csv('../data/campaign'+'{0:0=4d}'.format(i)+'.csv',index_col=0)
# Use date to create a datetime_index
df['Datetime'] = pd.to_datetime(df['date'])
df = df.set_index('Datetime')
# Remove unnecesary "date" and "supporter name" columns
# and add column with backer count and
df = df.drop(['date','supporter name'], axis=1)
df['backers'] = 1
# Resamnple dataframe by day to get daily transacion data
df = df.resample('D').sum()
df['day_number'] = range(1,1+len(df))
# Add columns with cummulative sums of pledges and number
# of backers
df['cumsum_pledges'] = df['pledge'].cumsum()
df['cumsum_backers'] = df['backers'].cumsum()
# Normalizations
df['norm_time'] = df['day_number']/(1+campaigns.iloc[i-1]['duration'])
df['norm_capital'] = df['cumsum_pledges']/campaigns.iloc[i-1]['Goal']
t = df['norm_time'].tolist()
m = df['norm_capital'].tolist()
# Add point (t,M) = (0,0)
t.insert(0,0)
m.insert(0,0)
# Resampled time series
ts = np.linspace(0,1,29)
ms = np.interp(ts,t,m)
state = 1 if ms[-1]>=1 else 0
Cs.append(ms)
Ss.append(state)
X_train, X_test, y_train, y_test = train_test_split(Cs, Ss, stratify=Ss, test_size=0.2, random_state=42)
error = []
# Calculating error for K values between 1 and 100
for i in range(1, 100):
knn = KNeighborsClassifier(n_neighbors=i,weights='distance')
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(12, 6))
plt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=10)
plt.title('Error Rate K Value');
plt.xlabel('K Value');
plt.ylabel('Mean Error');
best_k = 1+error.index(min(error))
kNN = KNeighborsClassifier(n_neighbors=best_k,weights='distance')
kNN.fit(X_train, y_train);
y_pred = kNN.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print('Used',best_k,'Neighbors')
print('Accuracy of kNN classifier on test set: {:.2f}'.format(kNN.score(X_test, y_test)))
print('F1 score of kNN classifier on test set: {:.2f}'.format(f1_score(y_test,y_pred)))
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div([
html.H1('Crowdfunding-Prophet',
style={'textAlign':'center', 'color': colors['text']}
),
html.Div(children='Select a Campaign'),
dcc.Dropdown(
id='campaigns-dropdown-menu',
style={'width': '48%','text-align':'center'},
options=[{'label': i, 'value': i} for i in campaigns['URL']],
placeholder='Select a Campaign'
),
html.Div([
dcc.Graph(id='time-series-plot')],
style={'display': 'inline-block', 'width': '48%','text-align':'center'})
],
style={'backgroundColor': colors['background']})
def create_time_series(df):
return {
'data': [go.Scatter(
x=df['day_number'],
y=df['cumsum_pledges'],
mode='lines+markers'
)],
'layout': {
'height': 450,
'widht' : 450,
'margin': {'l': 40, 'b': 40, 'r': 10, 't': 10},
'annotations': [{
'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'center', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': 'Time Series data'
}],
'yaxis': {'type': 'linear', 'title': 'Capital'},
'xaxis': {'showgrid': False, 'title': 'day number'}
}
}
@app.callback(
dash.dependencies.Output('time-series-plot', 'figure'),
[dash.dependencies.Input('campaigns-dropdown-menu', 'value')])
def update_graph(url):
campaign_idx = 1+campaigns[campaigns['URL'] == url].index.values.astype(int)[0]
df = pd.read_csv('../data/campaign'+'{0:0=4d}'.format(campaign_idx)+'.csv',index_col=0)
df['Datetime'] = pd.to_datetime(df['date'])
df = df.set_index('Datetime')
# Remove unnecesary "date" and "supporter name" columns
# and add column with backer count and
df = df.drop(['date','supporter name'], axis=1)
df['backers'] = 1
# Resamnple dataframe by day to get daily transacion data
df = df.resample('D').sum()
df['day_number'] = range(1,1+len(df))
# Add columns with cummulative sums of pledges and number
# of backers
df['cumsum_pledges'] = df['pledge'].cumsum()
df['cumsum_backers'] = df['backers'].cumsum()
# Normalizations
df['norm_time'] = df['day_number']/(1+campaigns.iloc[5-1]['duration'])
df['norm_capital'] = df['cumsum_pledges']/campaigns.iloc[5-1]['Goal']
df = df[['day_number','norm_time','pledge','cumsum_pledges','norm_capital','backers','cumsum_backers']]
return create_time_series(df)
if __name__ == '__main__':
app.run_server(debug=True) | heroku/app.py | import dash
import dash_core_components as dcc
import dash_html_components as html
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, roc_curve, f1_score
campaigns = pd.read_csv('../data/1-community_campaigns.csv',index_col=0)
Cs = [];
Ss = [];
for i in range(1,201):
# Read CSV file
df = pd.read_csv('../data/campaign'+'{0:0=4d}'.format(i)+'.csv',index_col=0)
# Use date to create a datetime_index
df['Datetime'] = pd.to_datetime(df['date'])
df = df.set_index('Datetime')
# Remove unnecesary "date" and "supporter name" columns
# and add column with backer count and
df = df.drop(['date','supporter name'], axis=1)
df['backers'] = 1
# Resamnple dataframe by day to get daily transacion data
df = df.resample('D').sum()
df['day_number'] = range(1,1+len(df))
# Add columns with cummulative sums of pledges and number
# of backers
df['cumsum_pledges'] = df['pledge'].cumsum()
df['cumsum_backers'] = df['backers'].cumsum()
# Normalizations
df['norm_time'] = df['day_number']/(1+campaigns.iloc[i-1]['duration'])
df['norm_capital'] = df['cumsum_pledges']/campaigns.iloc[i-1]['Goal']
t = df['norm_time'].tolist()
m = df['norm_capital'].tolist()
# Add point (t,M) = (0,0)
t.insert(0,0)
m.insert(0,0)
# Resampled time series
ts = np.linspace(0,1,29)
ms = np.interp(ts,t,m)
state = 1 if ms[-1]>=1 else 0
Cs.append(ms)
Ss.append(state)
X_train, X_test, y_train, y_test = train_test_split(Cs, Ss, stratify=Ss, test_size=0.2, random_state=42)
error = []
# Calculating error for K values between 1 and 100
for i in range(1, 100):
knn = KNeighborsClassifier(n_neighbors=i,weights='distance')
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(12, 6))
plt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=10)
plt.title('Error Rate K Value');
plt.xlabel('K Value');
plt.ylabel('Mean Error');
best_k = 1+error.index(min(error))
kNN = KNeighborsClassifier(n_neighbors=best_k,weights='distance')
kNN.fit(X_train, y_train);
y_pred = kNN.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print('Used',best_k,'Neighbors')
print('Accuracy of kNN classifier on test set: {:.2f}'.format(kNN.score(X_test, y_test)))
print('F1 score of kNN classifier on test set: {:.2f}'.format(f1_score(y_test,y_pred)))
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div([
html.H1('Crowdfunding-Prophet',
style={'textAlign':'center', 'color': colors['text']}
),
html.Div(children='Select a Campaign'),
dcc.Dropdown(
id='campaigns-dropdown-menu',
style={'width': '48%','text-align':'center'},
options=[{'label': i, 'value': i} for i in campaigns['URL']],
placeholder='Select a Campaign'
),
html.Div([
dcc.Graph(id='time-series-plot')],
style={'display': 'inline-block', 'width': '48%','text-align':'center'})
],
style={'backgroundColor': colors['background']})
def create_time_series(df):
return {
'data': [go.Scatter(
x=df['day_number'],
y=df['cumsum_pledges'],
mode='lines+markers'
)],
'layout': {
'height': 450,
'widht' : 450,
'margin': {'l': 40, 'b': 40, 'r': 10, 't': 10},
'annotations': [{
'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'center', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': 'Time Series data'
}],
'yaxis': {'type': 'linear', 'title': 'Capital'},
'xaxis': {'showgrid': False, 'title': 'day number'}
}
}
@app.callback(
dash.dependencies.Output('time-series-plot', 'figure'),
[dash.dependencies.Input('campaigns-dropdown-menu', 'value')])
def update_graph(url):
campaign_idx = 1+campaigns[campaigns['URL'] == url].index.values.astype(int)[0]
df = pd.read_csv('../data/campaign'+'{0:0=4d}'.format(campaign_idx)+'.csv',index_col=0)
df['Datetime'] = pd.to_datetime(df['date'])
df = df.set_index('Datetime')
# Remove unnecesary "date" and "supporter name" columns
# and add column with backer count and
df = df.drop(['date','supporter name'], axis=1)
df['backers'] = 1
# Resamnple dataframe by day to get daily transacion data
df = df.resample('D').sum()
df['day_number'] = range(1,1+len(df))
# Add columns with cummulative sums of pledges and number
# of backers
df['cumsum_pledges'] = df['pledge'].cumsum()
df['cumsum_backers'] = df['backers'].cumsum()
# Normalizations
df['norm_time'] = df['day_number']/(1+campaigns.iloc[5-1]['duration'])
df['norm_capital'] = df['cumsum_pledges']/campaigns.iloc[5-1]['Goal']
df = df[['day_number','norm_time','pledge','cumsum_pledges','norm_capital','backers','cumsum_backers']]
return create_time_series(df)
if __name__ == '__main__':
app.run_server(debug=True) | 0.406862 | 0.266174 |
import sys
import array
import struct
from . import errors
from . import wire_format
class OutputStream(object):
"""Contains all logic for writing bits, and ToString() to get the result."""
def __init__(self):
self._buffer = array.array('B')
if sys.version_info < (3, 3):
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.fromstring(raw_bytes)
else:
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.frombytes(raw_bytes)
def append_little_endian32(self, unsigned_value):
"""Appends an unsigned 32-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT32_MAX:
raise errors.EncodeError(
'Unsigned 32-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT32_LITTLE_ENDIAN, unsigned_value))
def append_little_endian64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError(
'Unsigned 64-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT64_LITTLE_ENDIAN, unsigned_value))
def append_varint32(self, value):
"""Appends a signed 32-bit integer to the internal buffer,
encoded as a varint. (Note that a negative varint32 will
always require 10 bytes of space.)
"""
if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_varint64(value)
def append_var_uint32(self, value):
"""Appends an unsigned 32-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= value <= wire_format.UINT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_var_uint64(value)
def append_varint64(self, value):
"""Appends a signed 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not wire_format.INT64_MIN <= value <= wire_format.INT64_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
if value < 0:
value += (1 << 64)
self.append_var_uint64(value)
def append_var_uint64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError('Value out of range: %d' % unsigned_value)
while True:
bits = unsigned_value & 0x7f
unsigned_value >>= 7
if unsigned_value:
bits |= 0x80
self._buffer.append(bits)
if not unsigned_value:
break
if sys.version_info < (3, 3):
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tostring()
else:
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tobytes()
def __len__(self):
return len(self._buffer) | odps/tunnel/pb/output_stream.py | import sys
import array
import struct
from . import errors
from . import wire_format
class OutputStream(object):
"""Contains all logic for writing bits, and ToString() to get the result."""
def __init__(self):
self._buffer = array.array('B')
if sys.version_info < (3, 3):
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.fromstring(raw_bytes)
else:
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.frombytes(raw_bytes)
def append_little_endian32(self, unsigned_value):
"""Appends an unsigned 32-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT32_MAX:
raise errors.EncodeError(
'Unsigned 32-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT32_LITTLE_ENDIAN, unsigned_value))
def append_little_endian64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError(
'Unsigned 64-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT64_LITTLE_ENDIAN, unsigned_value))
def append_varint32(self, value):
"""Appends a signed 32-bit integer to the internal buffer,
encoded as a varint. (Note that a negative varint32 will
always require 10 bytes of space.)
"""
if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_varint64(value)
def append_var_uint32(self, value):
"""Appends an unsigned 32-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= value <= wire_format.UINT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_var_uint64(value)
def append_varint64(self, value):
"""Appends a signed 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not wire_format.INT64_MIN <= value <= wire_format.INT64_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
if value < 0:
value += (1 << 64)
self.append_var_uint64(value)
def append_var_uint64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError('Value out of range: %d' % unsigned_value)
while True:
bits = unsigned_value & 0x7f
unsigned_value >>= 7
if unsigned_value:
bits |= 0x80
self._buffer.append(bits)
if not unsigned_value:
break
if sys.version_info < (3, 3):
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tostring()
else:
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tobytes()
def __len__(self):
return len(self._buffer) | 0.486575 | 0.277228 |
import datetime
import testtools
from mock import patch
import oslo_messaging as messaging
from oslo_config import cfg
from oslo_log import log as logging
from designate import exceptions
from designate.central import service as central_service
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1zonesTest(ApiV1Test):
def test_get_zone_schema(self):
response = self.get('schemas/domain')
self.assertIn('description', response.json)
self.assertIn('links', response.json)
self.assertIn('title', response.json)
self.assertIn('id', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
self.assertIn('description', response.json['properties'])
self.assertIn('created_at', response.json['properties'])
self.assertIn('updated_at', response.json['properties'])
self.assertIn('name', response.json['properties'])
self.assertIn('email', response.json['properties'])
self.assertIn('ttl', response.json['properties'])
self.assertIn('serial', response.json['properties'])
def test_get_zones_schema(self):
response = self.get('schemas/domains')
self.assertIn('description', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
self.assertIn('title', response.json)
self.assertIn('id', response.json)
def test_create_zone(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
response = self.post('domains', data=fixture)
self.assertIn('id', response.json)
self.assertIn('name', response.json)
self.assertEqual(response.json['name'], fixture['name'])
def test_create_zone_junk(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# Add a junk property
fixture['junk'] = 'Junk Field'
# Ensure it fails with a 400
self.post('domains', data=fixture, status_code=400)
@patch.object(central_service.Service, 'create_zone',
side_effect=messaging.MessagingTimeout())
def test_create_zone_timeout(self, _):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
self.post('domains', data=fixture, status_code=504)
@patch.object(central_service.Service, 'create_zone',
side_effect=exceptions.DuplicateZone())
def test_create_zone_duplicate(self, _):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
self.post('domains', data=fixture, status_code=409)
def test_create_zone_null_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = None
self.post('domains', data=fixture, status_code=400)
def test_create_zone_negative_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = -1
self.post('domains', data=fixture, status_code=400)
def test_create_zone_zero_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = 0
self.post('domains', data=fixture, status_code=400)
def test_create_zone_invalid_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = "$?>&"
self.post('domains', data=fixture, status_code=400)
def test_create_zone_ttl_greater_than_max(self):
fixture = self.get_zone_fixture(0)
fixture['ttl'] = 2147483648
self.post('domains', data=fixture, status_code=400)
def test_create_zone_utf_description(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have type
del fixture['type']
# Give it a UTF-8 filled description
fixture['description'] = "utf-8:2H₂+O₂⇌2H₂O,R=4.7kΩ,⌀200mm∮E⋅da=Q,n" \
",∑f(i)=∏g(i),∀x∈ℝ:⌈x⌉"
# Create the zone, ensuring it succeeds, thus UTF-8 is supported
self.post('domains', data=fixture)
def test_create_zone_description_too_long(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['description'] = "x" * 161
# Create the zone, ensuring it fails with a 400
self.post('domains', data=fixture, status_code=400)
def test_create_zone_with_unwanted_attributes(self):
zone_id = "2d1d1d1d-1324-4a80-aa32-1f69a91bf2c8"
created_at = datetime.datetime(2014, 6, 22, 21, 50, 0)
updated_at = datetime.datetime(2014, 6, 22, 21, 50, 0)
serial = 1234567
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['id'] = zone_id
fixture['created_at'] = created_at
fixture['updated_at'] = updated_at
fixture['serial'] = serial
self.post('domains', data=fixture, status_code=400)
def test_create_invalid_name(self):
# Prepare a zone
fixture = self.get_zone_fixture(0)
invalid_names = [
'org',
'example.org',
'example.321',
]
for invalid_name in invalid_names:
fixture['name'] = invalid_name
# Create a record
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_name_too_long(self):
fixture = self.get_zone_fixture(0)
long_name = 'a' * 255 + ".org."
fixture['name'] = long_name
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_name_is_not_present(self):
fixture = self.get_zone_fixture(0)
del fixture['name']
self.post('domains', data=fixture, status_code=400)
def test_create_invalid_email(self):
# Prepare a zone
fixture = self.get_zone_fixture(0)
invalid_emails = [
'org',
'example.org',
'bla.example.org',
'org.',
'example.org.',
'bla.example.org.',
'bla.example.org.',
]
for invalid_email in invalid_emails:
fixture['email'] = invalid_email
# Create a record
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_email_too_long(self):
fixture = self.get_zone_fixture(0)
long_email = 'a' * 255 + "@org.com"
fixture['email'] = long_email
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_email_not_present(self):
fixture = self.get_zone_fixture(0)
del fixture['email']
self.post('domains', data=fixture, status_code=400)
def test_create_zone_twice(self):
self.create_zone()
with testtools.ExpectedException(exceptions.DuplicateZone):
self.create_zone()
def test_create_zone_pending_deletion(self):
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
with testtools.ExpectedException(exceptions.DuplicateZone):
self.create_zone()
def test_get_zones(self):
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(0, len(response.json['domains']))
# Create a zone
self.create_zone()
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(1, len(response.json['domains']))
# Create a second zone
self.create_zone(fixture=1)
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(2, len(response.json['domains']))
def test_get_zone_servers(self):
# Create a zone
zone = self.create_zone()
response = self.get('domains/%s/servers' % zone['id'])
# Verify length of zone servers
self.assertEqual(1, len(response.json['servers']))
@patch.object(central_service.Service, 'find_zones',
side_effect=messaging.MessagingTimeout())
def test_get_zones_timeout(self, _):
self.get('domains', status_code=504)
def test_get_zone(self):
# Create a zone
zone = self.create_zone()
response = self.get('domains/%s' % zone['id'])
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], zone['id'])
@patch.object(central_service.Service, 'find_zone',
side_effect=messaging.MessagingTimeout())
def test_get_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
self.get('domains/%s' % zone['id'], status_code=504)
def test_get_zone_missing(self):
self.get('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980',
status_code=404)
def test_get_zone_invalid_id(self):
# The letter "G" is not valid in a UUID
self.get('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG',
status_code=404)
self.get('domains/2fdadfb1cf964259ac6bbb7b6d2ff980', status_code=404)
def test_update_zone(self):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
response = self.put('domains/%s' % zone['id'], data=data)
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], zone['id'])
self.assertIn('email', response.json)
self.assertEqual('prefix-%s' % zone['email'], response.json['email'])
def test_update_zone_junk(self):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email'], 'junk': 'Junk Field'}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_name_fail(self):
# Create a zone
zone = self.create_zone()
data = {'name': 'renamed.com.'}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_null_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': None}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_negative_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': -1}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_zero_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': 0}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
@patch.object(central_service.Service, 'update_zone',
side_effect=messaging.MessagingTimeout())
def test_update_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
self.put('domains/%s' % zone['id'], data=data, status_code=504)
@patch.object(central_service.Service, 'update_zone',
side_effect=exceptions.DuplicateZone())
def test_update_zone_duplicate(self, _):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
self.put('domains/%s' % zone['id'], data=data, status_code=409)
def test_update_zone_missing(self):
data = {'email': '<EMAIL>'}
self.put('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980', data=data,
status_code=404)
def test_update_zone_invalid_id(self):
data = {'email': '<EMAIL>'}
# The letter "G" is not valid in a UUID
self.put('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG', data=data,
status_code=404)
self.put('domains/2fdadfb1cf964259ac6bbb7b6d2ff980', data=data,
status_code=404)
def test_update_zone_ttl_greter_than_max(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': 2147483648}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_invalid_email(self):
# Create a zone
zone = self.create_zone()
invalid_emails = [
'org',
'example.org',
'bla.example.org',
'org.',
'example.org.',
'bla.example.org.',
'bla.example.org.',
'a' * 255 + "@com",
''
]
for invalid_email in invalid_emails:
data = {'email': invalid_email}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_description_too_long(self):
# Create a zone
zone = self.create_zone()
invalid_des = 'a' * 165
data = {'description': invalid_des}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_in_pending_deletion(self):
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
self.put('domains/%s' % zone['id'], data={}, status_code=404)
def test_delete_zone(self):
# Create a zone
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
# Simulate the zone having been deleted on the backend
zone_serial = self.central_service.get_zone(
self.admin_context, zone['id']).serial
self.central_service.update_status(
self.admin_context, zone['id'], "SUCCESS", zone_serial)
# Ensure we can no longer fetch the zone
self.get('domains/%s' % zone['id'], status_code=404)
def test_zone_in_pending_deletion(self):
zone1 = self.create_zone()
self.create_zone(fixture=1)
response = self.get('domains')
self.assertEqual(2, len(response.json['domains']))
# Delete zone1
self.delete('domains/%s' % zone1['id'])
# Ensure we can no longer list nor fetch the deleted zone
response = self.get('domains')
self.assertEqual(1, len(response.json['domains']))
self.get('domains/%s' % zone1['id'], status_code=404)
@patch.object(central_service.Service, 'delete_zone',
side_effect=messaging.MessagingTimeout())
def test_delete_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
self.delete('domains/%s' % zone['id'], status_code=504)
def test_delete_zone_missing(self):
self.delete('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980',
status_code=404)
def test_delete_zone_invalid_id(self):
# The letter "G" is not valid in a UUID
self.delete('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG',
status_code=404)
self.delete('domains/2fdadfb1cf964259ac6bbb7b6d2ff980',
status_code=404)
def test_get_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.get('domains/%s' % zone.id, status_code=404)
def test_update_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.put('domains/%s' % zone.id, {}, status_code=404)
def test_delete_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.delete('domains/%s' % zone.id, status_code=404)
def test_get_zone_servers_from_secondary(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.get('domains/%s/servers' % zone.id, status_code=404) | designate/tests/test_api/test_v1/test_domains.py | import datetime
import testtools
from mock import patch
import oslo_messaging as messaging
from oslo_config import cfg
from oslo_log import log as logging
from designate import exceptions
from designate.central import service as central_service
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1zonesTest(ApiV1Test):
def test_get_zone_schema(self):
response = self.get('schemas/domain')
self.assertIn('description', response.json)
self.assertIn('links', response.json)
self.assertIn('title', response.json)
self.assertIn('id', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
self.assertIn('description', response.json['properties'])
self.assertIn('created_at', response.json['properties'])
self.assertIn('updated_at', response.json['properties'])
self.assertIn('name', response.json['properties'])
self.assertIn('email', response.json['properties'])
self.assertIn('ttl', response.json['properties'])
self.assertIn('serial', response.json['properties'])
def test_get_zones_schema(self):
response = self.get('schemas/domains')
self.assertIn('description', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
self.assertIn('title', response.json)
self.assertIn('id', response.json)
def test_create_zone(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
response = self.post('domains', data=fixture)
self.assertIn('id', response.json)
self.assertIn('name', response.json)
self.assertEqual(response.json['name'], fixture['name'])
def test_create_zone_junk(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# Add a junk property
fixture['junk'] = 'Junk Field'
# Ensure it fails with a 400
self.post('domains', data=fixture, status_code=400)
@patch.object(central_service.Service, 'create_zone',
side_effect=messaging.MessagingTimeout())
def test_create_zone_timeout(self, _):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
self.post('domains', data=fixture, status_code=504)
@patch.object(central_service.Service, 'create_zone',
side_effect=exceptions.DuplicateZone())
def test_create_zone_duplicate(self, _):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have these
del fixture['type']
self.post('domains', data=fixture, status_code=409)
def test_create_zone_null_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = None
self.post('domains', data=fixture, status_code=400)
def test_create_zone_negative_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = -1
self.post('domains', data=fixture, status_code=400)
def test_create_zone_zero_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = 0
self.post('domains', data=fixture, status_code=400)
def test_create_zone_invalid_ttl(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['ttl'] = "$?>&"
self.post('domains', data=fixture, status_code=400)
def test_create_zone_ttl_greater_than_max(self):
fixture = self.get_zone_fixture(0)
fixture['ttl'] = 2147483648
self.post('domains', data=fixture, status_code=400)
def test_create_zone_utf_description(self):
# Create a zone
fixture = self.get_zone_fixture(0)
# V1 doesn't have type
del fixture['type']
# Give it a UTF-8 filled description
fixture['description'] = "utf-8:2H₂+O₂⇌2H₂O,R=4.7kΩ,⌀200mm∮E⋅da=Q,n" \
",∑f(i)=∏g(i),∀x∈ℝ:⌈x⌉"
# Create the zone, ensuring it succeeds, thus UTF-8 is supported
self.post('domains', data=fixture)
def test_create_zone_description_too_long(self):
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['description'] = "x" * 161
# Create the zone, ensuring it fails with a 400
self.post('domains', data=fixture, status_code=400)
def test_create_zone_with_unwanted_attributes(self):
zone_id = "2d1d1d1d-1324-4a80-aa32-1f69a91bf2c8"
created_at = datetime.datetime(2014, 6, 22, 21, 50, 0)
updated_at = datetime.datetime(2014, 6, 22, 21, 50, 0)
serial = 1234567
# Create a zone
fixture = self.get_zone_fixture(0)
fixture['id'] = zone_id
fixture['created_at'] = created_at
fixture['updated_at'] = updated_at
fixture['serial'] = serial
self.post('domains', data=fixture, status_code=400)
def test_create_invalid_name(self):
# Prepare a zone
fixture = self.get_zone_fixture(0)
invalid_names = [
'org',
'example.org',
'example.321',
]
for invalid_name in invalid_names:
fixture['name'] = invalid_name
# Create a record
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_name_too_long(self):
fixture = self.get_zone_fixture(0)
long_name = 'a' * 255 + ".org."
fixture['name'] = long_name
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_name_is_not_present(self):
fixture = self.get_zone_fixture(0)
del fixture['name']
self.post('domains', data=fixture, status_code=400)
def test_create_invalid_email(self):
# Prepare a zone
fixture = self.get_zone_fixture(0)
invalid_emails = [
'org',
'example.org',
'bla.example.org',
'org.',
'example.org.',
'bla.example.org.',
'bla.example.org.',
]
for invalid_email in invalid_emails:
fixture['email'] = invalid_email
# Create a record
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_email_too_long(self):
fixture = self.get_zone_fixture(0)
long_email = 'a' * 255 + "@org.com"
fixture['email'] = long_email
response = self.post('domains', data=fixture, status_code=400)
self.assertNotIn('id', response.json)
def test_create_zone_email_not_present(self):
fixture = self.get_zone_fixture(0)
del fixture['email']
self.post('domains', data=fixture, status_code=400)
def test_create_zone_twice(self):
self.create_zone()
with testtools.ExpectedException(exceptions.DuplicateZone):
self.create_zone()
def test_create_zone_pending_deletion(self):
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
with testtools.ExpectedException(exceptions.DuplicateZone):
self.create_zone()
def test_get_zones(self):
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(0, len(response.json['domains']))
# Create a zone
self.create_zone()
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(1, len(response.json['domains']))
# Create a second zone
self.create_zone(fixture=1)
response = self.get('domains')
self.assertIn('domains', response.json)
self.assertEqual(2, len(response.json['domains']))
def test_get_zone_servers(self):
# Create a zone
zone = self.create_zone()
response = self.get('domains/%s/servers' % zone['id'])
# Verify length of zone servers
self.assertEqual(1, len(response.json['servers']))
@patch.object(central_service.Service, 'find_zones',
side_effect=messaging.MessagingTimeout())
def test_get_zones_timeout(self, _):
self.get('domains', status_code=504)
def test_get_zone(self):
# Create a zone
zone = self.create_zone()
response = self.get('domains/%s' % zone['id'])
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], zone['id'])
@patch.object(central_service.Service, 'find_zone',
side_effect=messaging.MessagingTimeout())
def test_get_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
self.get('domains/%s' % zone['id'], status_code=504)
def test_get_zone_missing(self):
self.get('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980',
status_code=404)
def test_get_zone_invalid_id(self):
# The letter "G" is not valid in a UUID
self.get('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG',
status_code=404)
self.get('domains/2fdadfb1cf964259ac6bbb7b6d2ff980', status_code=404)
def test_update_zone(self):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
response = self.put('domains/%s' % zone['id'], data=data)
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], zone['id'])
self.assertIn('email', response.json)
self.assertEqual('prefix-%s' % zone['email'], response.json['email'])
def test_update_zone_junk(self):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email'], 'junk': 'Junk Field'}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_name_fail(self):
# Create a zone
zone = self.create_zone()
data = {'name': 'renamed.com.'}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_null_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': None}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_negative_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': -1}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_zero_ttl(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': 0}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
@patch.object(central_service.Service, 'update_zone',
side_effect=messaging.MessagingTimeout())
def test_update_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
self.put('domains/%s' % zone['id'], data=data, status_code=504)
@patch.object(central_service.Service, 'update_zone',
side_effect=exceptions.DuplicateZone())
def test_update_zone_duplicate(self, _):
# Create a zone
zone = self.create_zone()
data = {'email': 'prefix-%s' % zone['email']}
self.put('domains/%s' % zone['id'], data=data, status_code=409)
def test_update_zone_missing(self):
data = {'email': '<EMAIL>'}
self.put('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980', data=data,
status_code=404)
def test_update_zone_invalid_id(self):
data = {'email': '<EMAIL>'}
# The letter "G" is not valid in a UUID
self.put('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG', data=data,
status_code=404)
self.put('domains/2fdadfb1cf964259ac6bbb7b6d2ff980', data=data,
status_code=404)
def test_update_zone_ttl_greter_than_max(self):
# Create a zone
zone = self.create_zone()
data = {'ttl': 2147483648}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_invalid_email(self):
# Create a zone
zone = self.create_zone()
invalid_emails = [
'org',
'example.org',
'bla.example.org',
'org.',
'example.org.',
'bla.example.org.',
'bla.example.org.',
'a' * 255 + "@com",
''
]
for invalid_email in invalid_emails:
data = {'email': invalid_email}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_description_too_long(self):
# Create a zone
zone = self.create_zone()
invalid_des = 'a' * 165
data = {'description': invalid_des}
self.put('domains/%s' % zone['id'], data=data, status_code=400)
def test_update_zone_in_pending_deletion(self):
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
self.put('domains/%s' % zone['id'], data={}, status_code=404)
def test_delete_zone(self):
# Create a zone
zone = self.create_zone()
self.delete('domains/%s' % zone['id'])
# Simulate the zone having been deleted on the backend
zone_serial = self.central_service.get_zone(
self.admin_context, zone['id']).serial
self.central_service.update_status(
self.admin_context, zone['id'], "SUCCESS", zone_serial)
# Ensure we can no longer fetch the zone
self.get('domains/%s' % zone['id'], status_code=404)
def test_zone_in_pending_deletion(self):
zone1 = self.create_zone()
self.create_zone(fixture=1)
response = self.get('domains')
self.assertEqual(2, len(response.json['domains']))
# Delete zone1
self.delete('domains/%s' % zone1['id'])
# Ensure we can no longer list nor fetch the deleted zone
response = self.get('domains')
self.assertEqual(1, len(response.json['domains']))
self.get('domains/%s' % zone1['id'], status_code=404)
@patch.object(central_service.Service, 'delete_zone',
side_effect=messaging.MessagingTimeout())
def test_delete_zone_timeout(self, _):
# Create a zone
zone = self.create_zone()
self.delete('domains/%s' % zone['id'], status_code=504)
def test_delete_zone_missing(self):
self.delete('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff980',
status_code=404)
def test_delete_zone_invalid_id(self):
# The letter "G" is not valid in a UUID
self.delete('domains/2fdadfb1-cf96-4259-ac6b-bb7b6d2ff9GG',
status_code=404)
self.delete('domains/2fdadfb1cf964259ac6bbb7b6d2ff980',
status_code=404)
def test_get_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.get('domains/%s' % zone.id, status_code=404)
def test_update_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.put('domains/%s' % zone.id, {}, status_code=404)
def test_delete_secondary_missing(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.delete('domains/%s' % zone.id, status_code=404)
def test_get_zone_servers_from_secondary(self):
fixture = self.get_zone_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
zone = self.create_zone(**fixture)
self.get('domains/%s/servers' % zone.id, status_code=404) | 0.496094 | 0.42937 |
import pytest
from crummycm.validation.validation import validate
from example_templates.component.config_dict.a import (
cd_outer,
no_cd_single,
no_cd_single_nested,
)
ex_config = {
"cd_outer": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
}
},
cd_outer,
),
{"my_mixed": {"kd_num": 0, "my_str": "Jack", "my_num": 11, "wild_card": 2.3}},
),
"no_cd_single": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
}
},
no_cd_single,
),
{"my_mixed": {"kd_num": 0, "my_str": "Jack", "my_num": 11, "wild_card": 2.3}},
),
"no_cd_single_nested": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
"nested_md": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
},
}
},
no_cd_single_nested,
),
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
"nested_md": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
},
}
},
),
}
def call(config):
raw_dict = validate(config[0], config[1])
return raw_dict
@pytest.mark.parametrize(
"config,expected", ex_config.values(), ids=list(ex_config.keys())
)
def test_basic_parse(config, expected):
"""test whether the user input can be parsed to a dict"""
if isinstance(expected, dict):
raw_dict = call(config)
assert expected == raw_dict
elif issubclass(expected, ValueError):
with pytest.raises(ValueError):
raw_dict = call(config)
elif issubclass(expected, FileNotFoundError):
with pytest.raises(FileNotFoundError):
raw_dict = call(config)
elif issubclass(expected, TypeError):
with pytest.raises(TypeError):
raw_dict = call(config)
elif issubclass(expected, KeyError):
with pytest.raises(KeyError):
raw_dict = call(config)
else:
raise ValueError(f"expected {expected} not accounted for") | tests/unit/validate/test_dict_cd.py | import pytest
from crummycm.validation.validation import validate
from example_templates.component.config_dict.a import (
cd_outer,
no_cd_single,
no_cd_single_nested,
)
ex_config = {
"cd_outer": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
}
},
cd_outer,
),
{"my_mixed": {"kd_num": 0, "my_str": "Jack", "my_num": 11, "wild_card": 2.3}},
),
"no_cd_single": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
}
},
no_cd_single,
),
{"my_mixed": {"kd_num": 0, "my_str": "Jack", "my_num": 11, "wild_card": 2.3}},
),
"no_cd_single_nested": (
(
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
"nested_md": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
},
}
},
no_cd_single_nested,
),
{
"my_mixed": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
"nested_md": {
"kd_num": 0,
"my_str": "Jack",
"my_num": 11,
"wild_card": 2.3,
},
}
},
),
}
def call(config):
raw_dict = validate(config[0], config[1])
return raw_dict
@pytest.mark.parametrize(
"config,expected", ex_config.values(), ids=list(ex_config.keys())
)
def test_basic_parse(config, expected):
"""test whether the user input can be parsed to a dict"""
if isinstance(expected, dict):
raw_dict = call(config)
assert expected == raw_dict
elif issubclass(expected, ValueError):
with pytest.raises(ValueError):
raw_dict = call(config)
elif issubclass(expected, FileNotFoundError):
with pytest.raises(FileNotFoundError):
raw_dict = call(config)
elif issubclass(expected, TypeError):
with pytest.raises(TypeError):
raw_dict = call(config)
elif issubclass(expected, KeyError):
with pytest.raises(KeyError):
raw_dict = call(config)
else:
raise ValueError(f"expected {expected} not accounted for") | 0.541894 | 0.413359 |
from pathlib import Path
from statistics import median
from typing import Optional
POINTS = {")": 3, "]": 57, "}": 1197, ">": 25137}
AUTOCOMPLETE_POINTS = {")": 1, "]": 2, "}": 3, ">": 4}
def read_syntax_file(path: Path) -> list[str]:
with open(path, "r") as file:
return file.read().split("\n")
def check_syntax(syntax, pairs: dict) -> tuple[bool, str]:
combinations = [key + val for key, val in pairs.items()]
found_something = True
while found_something:
found_something = False
for combination in combinations:
if combination in syntax:
syntax = syntax.replace(combination, "")
found_something = True
first = find_first_closing(syntax)
if first is not None:
return False, syntax[first]
else:
return True, syntax
def find_first_closing(syntax: str) -> Optional[int]:
first = float("inf")
for closing in [")", "]", "}", ">"]:
if closing in syntax:
idx = syntax.find(closing)
if idx < first:
first = idx
return None if first == float("inf") else first
def compute_autocomplete_score(completion_string: str) -> int:
score = 0
for character in completion_string:
score *= 5
score += AUTOCOMPLETE_POINTS[character]
return score
def main():
pairs = {"[": "]", "(": ")", "{": "}", "<": ">"}
lines = read_syntax_file(Path("./data/day_10_data.txt"))
points_total = 0
for line in lines:
valid, illegal = check_syntax(line, pairs)
if not valid:
points_total += POINTS[illegal]
print(points_total)
autocompletion_scores = []
for line in lines:
valid, result = check_syntax(line, pairs)
if not valid:
continue
completion = ""
for character in reversed(result):
completion += pairs[character]
autocompletion_scores.append(compute_autocomplete_score(completion))
print(median(autocompletion_scores)) | advent/day_10.py | from pathlib import Path
from statistics import median
from typing import Optional
POINTS = {")": 3, "]": 57, "}": 1197, ">": 25137}
AUTOCOMPLETE_POINTS = {")": 1, "]": 2, "}": 3, ">": 4}
def read_syntax_file(path: Path) -> list[str]:
with open(path, "r") as file:
return file.read().split("\n")
def check_syntax(syntax, pairs: dict) -> tuple[bool, str]:
combinations = [key + val for key, val in pairs.items()]
found_something = True
while found_something:
found_something = False
for combination in combinations:
if combination in syntax:
syntax = syntax.replace(combination, "")
found_something = True
first = find_first_closing(syntax)
if first is not None:
return False, syntax[first]
else:
return True, syntax
def find_first_closing(syntax: str) -> Optional[int]:
first = float("inf")
for closing in [")", "]", "}", ">"]:
if closing in syntax:
idx = syntax.find(closing)
if idx < first:
first = idx
return None if first == float("inf") else first
def compute_autocomplete_score(completion_string: str) -> int:
score = 0
for character in completion_string:
score *= 5
score += AUTOCOMPLETE_POINTS[character]
return score
def main():
pairs = {"[": "]", "(": ")", "{": "}", "<": ">"}
lines = read_syntax_file(Path("./data/day_10_data.txt"))
points_total = 0
for line in lines:
valid, illegal = check_syntax(line, pairs)
if not valid:
points_total += POINTS[illegal]
print(points_total)
autocompletion_scores = []
for line in lines:
valid, result = check_syntax(line, pairs)
if not valid:
continue
completion = ""
for character in reversed(result):
completion += pairs[character]
autocompletion_scores.append(compute_autocomplete_score(completion))
print(median(autocompletion_scores)) | 0.763484 | 0.374991 |
from .GoogleTokenSpan import GoogleTokenSpan
from .GoogleSentiment import GoogleSentiment
class GoogleMention(GoogleTokenSpan):
def __init__(self, dictionary, document, entity):
text = dictionary.pop('text')
content = text.pop('content')
begin = text.pop('begin_offset')
end = begin + len(content)
super().__init__(dictionary=dictionary, document=document, begin=begin, end=end)
self._text = content
self._index = None
self._entity = entity
@property
def entity(self):
"""
:rtype: GoogleEntity
"""
return self._entity
@property
def id(self):
return self.document.id, 'mention', self.entity._index, self._index
class GoogleEntity:
def __init__(self, dictionary, document):
self._dictionary = dictionary
self._document = document
self._name = self._dictionary.pop('name')
self._type = self._dictionary.pop('type')
self._metadata = self._dictionary.pop('metadata')
self._wikipedia_url = self._metadata.pop('wikipedia_url', None)
self._salience = self._dictionary.pop('salience')
sentiment = self._dictionary.pop('sentiment')
self._sentiment = GoogleSentiment(score=sentiment.pop('score'), magnitude=sentiment.pop('magnitude'))
self._mentions = [
GoogleMention(dictionary=mention, document=self.document, entity=self)
for mention in self._dictionary.pop('mentions')
]
for index, mention in enumerate(self.mentions):
mention._index = index
self._index = None
def graph_str(self):
return f'{self.name}\n({str(self._type).replace("_", " ")})'
@property
def id(self):
return self.document.id, 'entity', self._index
def __str__(self):
return f'{self.name} ({self._type})'
def __repr__(self):
return str(self)
@property
def document(self):
"""
:type: .GoogleDocument.GoogleDocument
"""
return self._document
@property
def mentions(self):
"""
:rtype: list[GoogleMention]
"""
return self._mentions
@property
def dictionary(self):
"""
:rtype: dict
"""
return self._dictionary
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def salience(self):
return self._salience
@property
def sentiment(self):
"""
:rtype: GoogleSentiment
"""
return self._sentiment
@property
def metadata(self):
"""
:rtype: dict or NoneType
"""
return self._metadata
@property
def wikipedia_url(self):
"""
:rtype: str or NoneType
"""
return self._wikipedia_url
@property
def tokens(self):
return [token for mention in self.mentions for token in mention.tokens] | linguistics/google/GoogleEntity.py | from .GoogleTokenSpan import GoogleTokenSpan
from .GoogleSentiment import GoogleSentiment
class GoogleMention(GoogleTokenSpan):
def __init__(self, dictionary, document, entity):
text = dictionary.pop('text')
content = text.pop('content')
begin = text.pop('begin_offset')
end = begin + len(content)
super().__init__(dictionary=dictionary, document=document, begin=begin, end=end)
self._text = content
self._index = None
self._entity = entity
@property
def entity(self):
"""
:rtype: GoogleEntity
"""
return self._entity
@property
def id(self):
return self.document.id, 'mention', self.entity._index, self._index
class GoogleEntity:
def __init__(self, dictionary, document):
self._dictionary = dictionary
self._document = document
self._name = self._dictionary.pop('name')
self._type = self._dictionary.pop('type')
self._metadata = self._dictionary.pop('metadata')
self._wikipedia_url = self._metadata.pop('wikipedia_url', None)
self._salience = self._dictionary.pop('salience')
sentiment = self._dictionary.pop('sentiment')
self._sentiment = GoogleSentiment(score=sentiment.pop('score'), magnitude=sentiment.pop('magnitude'))
self._mentions = [
GoogleMention(dictionary=mention, document=self.document, entity=self)
for mention in self._dictionary.pop('mentions')
]
for index, mention in enumerate(self.mentions):
mention._index = index
self._index = None
def graph_str(self):
return f'{self.name}\n({str(self._type).replace("_", " ")})'
@property
def id(self):
return self.document.id, 'entity', self._index
def __str__(self):
return f'{self.name} ({self._type})'
def __repr__(self):
return str(self)
@property
def document(self):
"""
:type: .GoogleDocument.GoogleDocument
"""
return self._document
@property
def mentions(self):
"""
:rtype: list[GoogleMention]
"""
return self._mentions
@property
def dictionary(self):
"""
:rtype: dict
"""
return self._dictionary
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def salience(self):
return self._salience
@property
def sentiment(self):
"""
:rtype: GoogleSentiment
"""
return self._sentiment
@property
def metadata(self):
"""
:rtype: dict or NoneType
"""
return self._metadata
@property
def wikipedia_url(self):
"""
:rtype: str or NoneType
"""
return self._wikipedia_url
@property
def tokens(self):
return [token for mention in self.mentions for token in mention.tokens] | 0.767777 | 0.205954 |
from unittest import TestCase, mock
import unittest
from buf import libraries
import os
import sys
import tempfile
class TestMakeDir(TestCase):
"""Tests buf.libraries.make_library."""
def test_already_exists(self):
"""Tests that the function raises an error if the directory it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(IsADirectoryError):
libraries.make_library_dir()
def test_proper_directory_creation(self):
"""Tests that the function properly creates a directory if none exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.make_library_dir()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestEnsureLibraryDirExists(TestCase):
"""Tests buf.libraries.ensure_library_dir_exists."""
def test_existence_check(self):
"""Tests that the function checks whether library_dir exists."""
with mock.patch("buf.libraries.os.path.exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.ensure_library_dir_exists()
mock_check.assert_called_with(libraries.library_dir)
def test_directory_creation(self):
"""Tests that the function actually makes library_dir if it doesn't exist."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.ensure_library_dir_exists()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestAddLibraryFile(TestCase):
"""Tests buf.libraries.add_library_file."""
def test_library_dir_existence_check(self):
"""Tests that the function ensures that library_dir has already been created."""
with mock.patch("buf.libraries.ensure_library_dir_exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.add_library_file("file.txt")
mock_check.assert_called()
def test_file_already_exists_check(self):
"""Tests that the function raises an error if the file it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(FileExistsError):
libraries.add_library_file("file.txt")
def test_proper_file_creation(self):
"""Tests that the function properly creates a directory if none exists."""
test_file_name = "file.txt"
test_file_path = os.path.join(sys.prefix, libraries.library_dir, test_file_name)
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.ensure_library_dir_exists"):
with mock.patch("buf.libraries.open") as mock_open:
libraries.add_library_file(test_file_name)
mock_open.assert_called_with(test_file_path, "w")
if __name__ == '__main__':
unittest.main() | tests/test_libraries.py | from unittest import TestCase, mock
import unittest
from buf import libraries
import os
import sys
import tempfile
class TestMakeDir(TestCase):
"""Tests buf.libraries.make_library."""
def test_already_exists(self):
"""Tests that the function raises an error if the directory it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(IsADirectoryError):
libraries.make_library_dir()
def test_proper_directory_creation(self):
"""Tests that the function properly creates a directory if none exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.make_library_dir()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestEnsureLibraryDirExists(TestCase):
"""Tests buf.libraries.ensure_library_dir_exists."""
def test_existence_check(self):
"""Tests that the function checks whether library_dir exists."""
with mock.patch("buf.libraries.os.path.exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.ensure_library_dir_exists()
mock_check.assert_called_with(libraries.library_dir)
def test_directory_creation(self):
"""Tests that the function actually makes library_dir if it doesn't exist."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.ensure_library_dir_exists()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestAddLibraryFile(TestCase):
"""Tests buf.libraries.add_library_file."""
def test_library_dir_existence_check(self):
"""Tests that the function ensures that library_dir has already been created."""
with mock.patch("buf.libraries.ensure_library_dir_exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.add_library_file("file.txt")
mock_check.assert_called()
def test_file_already_exists_check(self):
"""Tests that the function raises an error if the file it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(FileExistsError):
libraries.add_library_file("file.txt")
def test_proper_file_creation(self):
"""Tests that the function properly creates a directory if none exists."""
test_file_name = "file.txt"
test_file_path = os.path.join(sys.prefix, libraries.library_dir, test_file_name)
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.ensure_library_dir_exists"):
with mock.patch("buf.libraries.open") as mock_open:
libraries.add_library_file(test_file_name)
mock_open.assert_called_with(test_file_path, "w")
if __name__ == '__main__':
unittest.main() | 0.563858 | 0.503662 |
import fnmatch
import re
import collections
from zlib import adler32
from typing import ByteString, Iterable, Callable, Union
from .. import arg, Unit
from ...lib.tools import isbuffer
def pathspec(expression):
"""
Normalizes a path which is separated by backward or forward slashes to be
separated by forward slashes.
"""
return '/'.join(re.split(R'[\\\/]', expression))
class UnpackResult:
def get_data(self) -> ByteString:
if callable(self.data):
self.data = self.data()
return self.data
def __init__(self, path: str, data: Union[ByteString, Callable[[], ByteString]], **meta):
self.path = path
self.data = data
self.meta = meta
class EndOfStringNotFound(ValueError):
def __init__(self):
super().__init__('end of string could not be determined')
class PathPattern:
def __init__(self, pp, regex=False):
if isinstance(pp, re.Pattern):
self.stops = []
self.pattern = pp
return
elif not regex:
if not pp.startswith('*') and not pp.endswith('*'):
pp = F'*{pp}*'
self.stops = [stop for stop in re.split(R'(.*?[/*?])', pp) if stop]
pp = fnmatch.translate(pp)
self.pattern = re.compile(pp)
def reach(self, path):
if not any(self.stops):
return True
for stop in self.stops:
if fnmatch.fnmatch(path, stop):
return True
return False
def check(self, path):
return self.pattern.fullmatch(path)
def __repr__(self):
return F'<PathPattern:{"//".join(self.stops) or "RE"}>'
class PathExtractorUnit(Unit, abstract=True):
def __init__(self, *paths: arg(
metavar='path', nargs='*', default=(), type=pathspec, help=(
'Wildcard pattern for the name of the item to be extracted. Each item is returned'
' as a separate output of this unit. Paths may contain wildcards. The default is '
'a single wildcard, which means that every item will be extracted.')),
list : arg.switch('-l', help='Return all matching paths as UTF8-encoded output chunks.') = False,
join : arg.switch('-j', help='Join path names from container with previous path names.') = False,
regex: arg.switch('-r', help='Use regular expressions instead of wildcard patterns.') = False,
meta: arg('-m', metavar='NAME',
help='Name of the meta variable to receive the extracted path. The default value is "{default}".') = b'path',
**keywords
):
paths = paths or (['.*'] if regex else ['*'])
super().__init__(
patterns=[
PathPattern(p, regex)
for p in paths
],
list=list,
join=join,
meta=meta,
**keywords
)
def _check_reachable(self, path: str) -> bool:
return any(p.reach(path) for p in self.args.patterns)
def _check_data(self, item: UnpackResult) -> bool:
if not isbuffer(item.get_data()):
self.log_warn('discarding item with invalid contents.')
return False
return True
def _check_path(self, item: UnpackResult) -> bool:
if not isinstance(item.path, str):
if not self._check_data(item):
return False
else:
from ...lib.mime import file_extension_from_data
self.__unknown += 1
self.log_warn('received an attachment without file name!')
ext = file_extension_from_data(item.data)
item.path = F'UNKNOWN{self.__unknown:02d}.{ext}'
if not any(p.check(item.path) for p in self.args.patterns):
return False
elif self.args.list:
return True
return self._check_data(item)
def unpack(self, data: ByteString) -> Iterable[UnpackResult]:
raise NotImplementedError
def process(self, data: ByteString) -> ByteString:
results = []
metavar = self.args.meta.decode(self.codec)
paths = collections.defaultdict(set)
self.__unknown = 0
try:
root = data[metavar]
except KeyError:
root = ''
for result in self.unpack(data):
if self._check_path(result):
results.append(result)
for p in self.args.patterns:
for result in results:
path = result.path
if '\\' in path:
path = '/'.join(path.split('\\'))
if not p.check(path):
continue
if not self.args.list:
csum = adler32(result.get_data())
if path in paths:
if csum in paths[path]:
continue
self.log_warn('duplicate path with different contents:', path)
paths[path].add(csum)
if self.args.join and root:
if '\\' in root:
root = '/'.join(root.split('\\'))
path = F'{root}/{path}'
if self.args.list:
yield path.encode(self.codec)
continue
else:
self.log_info(path)
result.meta[metavar] = path
yield self.labelled(result.get_data(), **result.meta) | refinery/units/formats/__init__.py | import fnmatch
import re
import collections
from zlib import adler32
from typing import ByteString, Iterable, Callable, Union
from .. import arg, Unit
from ...lib.tools import isbuffer
def pathspec(expression):
"""
Normalizes a path which is separated by backward or forward slashes to be
separated by forward slashes.
"""
return '/'.join(re.split(R'[\\\/]', expression))
class UnpackResult:
def get_data(self) -> ByteString:
if callable(self.data):
self.data = self.data()
return self.data
def __init__(self, path: str, data: Union[ByteString, Callable[[], ByteString]], **meta):
self.path = path
self.data = data
self.meta = meta
class EndOfStringNotFound(ValueError):
def __init__(self):
super().__init__('end of string could not be determined')
class PathPattern:
def __init__(self, pp, regex=False):
if isinstance(pp, re.Pattern):
self.stops = []
self.pattern = pp
return
elif not regex:
if not pp.startswith('*') and not pp.endswith('*'):
pp = F'*{pp}*'
self.stops = [stop for stop in re.split(R'(.*?[/*?])', pp) if stop]
pp = fnmatch.translate(pp)
self.pattern = re.compile(pp)
def reach(self, path):
if not any(self.stops):
return True
for stop in self.stops:
if fnmatch.fnmatch(path, stop):
return True
return False
def check(self, path):
return self.pattern.fullmatch(path)
def __repr__(self):
return F'<PathPattern:{"//".join(self.stops) or "RE"}>'
class PathExtractorUnit(Unit, abstract=True):
def __init__(self, *paths: arg(
metavar='path', nargs='*', default=(), type=pathspec, help=(
'Wildcard pattern for the name of the item to be extracted. Each item is returned'
' as a separate output of this unit. Paths may contain wildcards. The default is '
'a single wildcard, which means that every item will be extracted.')),
list : arg.switch('-l', help='Return all matching paths as UTF8-encoded output chunks.') = False,
join : arg.switch('-j', help='Join path names from container with previous path names.') = False,
regex: arg.switch('-r', help='Use regular expressions instead of wildcard patterns.') = False,
meta: arg('-m', metavar='NAME',
help='Name of the meta variable to receive the extracted path. The default value is "{default}".') = b'path',
**keywords
):
paths = paths or (['.*'] if regex else ['*'])
super().__init__(
patterns=[
PathPattern(p, regex)
for p in paths
],
list=list,
join=join,
meta=meta,
**keywords
)
def _check_reachable(self, path: str) -> bool:
return any(p.reach(path) for p in self.args.patterns)
def _check_data(self, item: UnpackResult) -> bool:
if not isbuffer(item.get_data()):
self.log_warn('discarding item with invalid contents.')
return False
return True
def _check_path(self, item: UnpackResult) -> bool:
if not isinstance(item.path, str):
if not self._check_data(item):
return False
else:
from ...lib.mime import file_extension_from_data
self.__unknown += 1
self.log_warn('received an attachment without file name!')
ext = file_extension_from_data(item.data)
item.path = F'UNKNOWN{self.__unknown:02d}.{ext}'
if not any(p.check(item.path) for p in self.args.patterns):
return False
elif self.args.list:
return True
return self._check_data(item)
def unpack(self, data: ByteString) -> Iterable[UnpackResult]:
raise NotImplementedError
def process(self, data: ByteString) -> ByteString:
results = []
metavar = self.args.meta.decode(self.codec)
paths = collections.defaultdict(set)
self.__unknown = 0
try:
root = data[metavar]
except KeyError:
root = ''
for result in self.unpack(data):
if self._check_path(result):
results.append(result)
for p in self.args.patterns:
for result in results:
path = result.path
if '\\' in path:
path = '/'.join(path.split('\\'))
if not p.check(path):
continue
if not self.args.list:
csum = adler32(result.get_data())
if path in paths:
if csum in paths[path]:
continue
self.log_warn('duplicate path with different contents:', path)
paths[path].add(csum)
if self.args.join and root:
if '\\' in root:
root = '/'.join(root.split('\\'))
path = F'{root}/{path}'
if self.args.list:
yield path.encode(self.codec)
continue
else:
self.log_info(path)
result.meta[metavar] = path
yield self.labelled(result.get_data(), **result.meta) | 0.759091 | 0.099645 |
from os import environ
from pathlib import Path
import envdir
import sentry_sdk
from configurations import Configuration
from sentry_sdk.integrations.django import DjangoIntegration
# Common settings
BASE_DIR = Path(__file__).absolute().parent.parent
PROJECT_NAME = "{{cookiecutter.project_name}}"
CONFIGURATION = environ["DJANGO_CONFIGURATION"]
CONFIG_DIR = environ.get("DJANGO_CONFIG_DIR")
SECRET_DIR = environ.get("DJANGO_SECRET_DIR")
# Detect if we are running tests.
IN_TESTS = environ.get("RUNNING_TESTS")
def get_env(name, default=None, required=False, cast=str):
"""
Get an environment variable
Arguments:
name (str): Name of environment variable
default (Any): default value
required (bool): If True, raises an ImproperlyConfigured error if not defined
cast (Callable): function to call with extracted string value.
Not applied to defaults.
"""
def _lookup(self):
value = environ.get(name)
if value is None and default is not None:
return default
if value is None and required:
raise ValueError(f"{name} not found in env")
return cast(value)
return property(_lookup)
def get_secret(name, cast=str):
"""
Get a secret from disk
Secrets should be available as the content of `<SECRET_DIR>/<name>`
All secrets are required
Arguments:
name (str): Name of environment variable
cast (Callable): function to call on extracted string value
"""
# We don't want this to be called unless we're in a configuration which uses it
def _lookup(self):
if not SECRET_DIR:
raise ValueError(
f"Secret {name} not found: DJANGO_SECRET_DIR not set in env"
)
file = Path(SECRET_DIR) / name
if not file.exists():
raise ValueError(f"Secret {file} not found")
value = file.read_text().strip()
return cast(value)
return property(_lookup)
def csv_to_list(value):
"""
Convert a comma separated list of values into a list.
Convenience function for use with get_env() and get_secret() ``cast`` argument.
"""
if value is None:
return []
return value.split(",")
class Common(Configuration):
@classmethod
def pre_setup(cls):
"""
If specified, add config dir to environment
"""
if CONFIG_DIR:
envdir.Env(CONFIG_DIR)
super().pre_setup()
PROJECT_ENVIRONMENT_SLUG = f"{PROJECT_NAME}_{CONFIGURATION}".lower()
@property
def ADMINS(self):
"""
Look up DJANGO_ADMINS and split into list of (name, email) tuples
Separate name and email with commas, name+email pairs with semicolons, eg::
DJANGO_ADMINS="User One,<EMAIL>;User Two,<EMAIL>"
"""
value = environ.get("DJANGO_ADMINS")
if not value:
return []
pairs = value.split(";")
return [pair.rsplit(",", 1) for pair in pairs]
MANAGERS = ADMINS
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env("DJANGO_SECRET_KEY", PROJECT_NAME)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = get_env("DJANGO_ALLOWED_HOSTS", cast=csv_to_list, default=["*"])
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# Third party
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"django_extensions",
"clear_cache",
# Project
"{{cookiecutter.project_name}}.{{cookiecutter.app_name}}",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "{{cookiecutter.project_name}}.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "{{cookiecutter.project_name}}.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASE_HOST = get_env("DATABASE_HOST", default="localhost")
DATABASE_PORT = get_env("DATABASE_PORT", default=5432, cast=int)
DATABASE_NAME = get_env("DATABASE_NAME", default=PROJECT_NAME)
DATABASE_USER = get_env("DATABASE_USER", default=PROJECT_NAME)
DATABASE_PASSWORD = get_env("DATABASE_PASSWORD", default=PROJECT_NAME)
@property
def DATABASES(self):
"""
Build the databases object here to allow subclasses to override specific values
"""
return {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": self.DATABASE_HOST,
"PORT": self.DATABASE_PORT,
"NAME": self.DATABASE_NAME,
"USER": self.DATABASE_USER,
"PASSWORD": self.DATABASE_PASSWORD,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-GB"
TIME_ZONE = "{{cookiecutter.time_zone}}"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static"
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
# Additional locations of static files
STATICFILES_DIRS = [BASE_DIR / "frontend" / "dist"]
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WHITENOISE_ROOT = BASE_DIR / "public"
FIXTURE_DIRS = [BASE_DIR / "fixtures"]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
},
},
"handlers": {"console": {"class": "logging.StreamHandler"}},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"sentry_sdk": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
class RedisCache:
REDIS_HOST = get_env("DJANGO_REDIS_HOST", required=True)
REDIS_PORT = get_env("DJANGO_REDIS_PORT", default=6379, cast=int)
# Cache
# https://docs.djangoproject.com/en/3.0/ref/settings/#caches
@property
def CACHES(self):
return {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{self.REDIS_HOST}:{self.REDIS_PORT}/1",
"KEY_PREFIX": f"{self.PROJECT_ENVIRONMENT_SLUG}_",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PARSER_CLASS": "redis.connection.HiredisParser",
},
}
}
class Dev(Common):
DEBUG = True
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = "/tmp/app-emails"
INTERNAL_IPS = ["127.0.0.1"]
@property
def INSTALLED_APPS(self):
INSTALLED_APPS = super().INSTALLED_APPS
INSTALLED_APPS.append("debug_toolbar")
return INSTALLED_APPS
@property
def MIDDLEWARE(self):
MIDDLEWARE = super().MIDDLEWARE
MIDDLEWARE.append("debug_toolbar.middleware.DebugToolbarMiddleware")
return MIDDLEWARE
class DevDocker(RedisCache, Dev):
"""
Dev for docker, uses Redis.
"""
class Test(Common):
"""
Default test settings
Includes some testing speedups.
"""
DEBUG = False
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
class CI(Test):
"""
Default CI settings
"""
class Deployed(RedisCache, Common):
"""
Settings which are for a non-local deployment
"""
# Redefine values which are not optional in a deployed environment
ALLOWED_HOSTS = get_env("DJANGO_ALLOWED_HOSTS", cast=csv_to_list, required=True)
# Some deployed settings are no longer env vars - collect from the secret store
SECRET_KEY = get_secret("DJANGO_SECRET_KEY")
DATABASE_USER = get_secret("DATABASE_USER")
DATABASE_PASSWORD = get_secret("DATABASE_PASSWORD")
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# django-debug-toolbar will throw an ImproperlyConfigured exception if DEBUG is
# ever turned on when run with a WSGI server
DEBUG_TOOLBAR_PATCH_SETTINGS = False
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = "{{cookiecutter.email_user}}"
EMAIL_HOST_PASSWORD = "{{cookiecutter.email_password}}"
DEFAULT_FROM_EMAIL = ""
SERVER_EMAIL = ""
@classmethod
def post_setup(cls):
super(Deployed, cls).post_setup()
sentry_sdk.init(
dsn="{{cookiecutter.sentry_dsn}}", integrations=[DjangoIntegration()], environment=CONFIGURATION,
)
class Stage(Deployed):
pass
class Prod(Deployed):
DEBUG = False | {{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings.py | from os import environ
from pathlib import Path
import envdir
import sentry_sdk
from configurations import Configuration
from sentry_sdk.integrations.django import DjangoIntegration
# Common settings
BASE_DIR = Path(__file__).absolute().parent.parent
PROJECT_NAME = "{{cookiecutter.project_name}}"
CONFIGURATION = environ["DJANGO_CONFIGURATION"]
CONFIG_DIR = environ.get("DJANGO_CONFIG_DIR")
SECRET_DIR = environ.get("DJANGO_SECRET_DIR")
# Detect if we are running tests.
IN_TESTS = environ.get("RUNNING_TESTS")
def get_env(name, default=None, required=False, cast=str):
"""
Get an environment variable
Arguments:
name (str): Name of environment variable
default (Any): default value
required (bool): If True, raises an ImproperlyConfigured error if not defined
cast (Callable): function to call with extracted string value.
Not applied to defaults.
"""
def _lookup(self):
value = environ.get(name)
if value is None and default is not None:
return default
if value is None and required:
raise ValueError(f"{name} not found in env")
return cast(value)
return property(_lookup)
def get_secret(name, cast=str):
"""
Get a secret from disk
Secrets should be available as the content of `<SECRET_DIR>/<name>`
All secrets are required
Arguments:
name (str): Name of environment variable
cast (Callable): function to call on extracted string value
"""
# We don't want this to be called unless we're in a configuration which uses it
def _lookup(self):
if not SECRET_DIR:
raise ValueError(
f"Secret {name} not found: DJANGO_SECRET_DIR not set in env"
)
file = Path(SECRET_DIR) / name
if not file.exists():
raise ValueError(f"Secret {file} not found")
value = file.read_text().strip()
return cast(value)
return property(_lookup)
def csv_to_list(value):
"""
Convert a comma separated list of values into a list.
Convenience function for use with get_env() and get_secret() ``cast`` argument.
"""
if value is None:
return []
return value.split(",")
class Common(Configuration):
@classmethod
def pre_setup(cls):
"""
If specified, add config dir to environment
"""
if CONFIG_DIR:
envdir.Env(CONFIG_DIR)
super().pre_setup()
PROJECT_ENVIRONMENT_SLUG = f"{PROJECT_NAME}_{CONFIGURATION}".lower()
@property
def ADMINS(self):
"""
Look up DJANGO_ADMINS and split into list of (name, email) tuples
Separate name and email with commas, name+email pairs with semicolons, eg::
DJANGO_ADMINS="User One,<EMAIL>;User Two,<EMAIL>"
"""
value = environ.get("DJANGO_ADMINS")
if not value:
return []
pairs = value.split(";")
return [pair.rsplit(",", 1) for pair in pairs]
MANAGERS = ADMINS
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env("DJANGO_SECRET_KEY", PROJECT_NAME)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = get_env("DJANGO_ALLOWED_HOSTS", cast=csv_to_list, default=["*"])
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# Third party
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"django_extensions",
"clear_cache",
# Project
"{{cookiecutter.project_name}}.{{cookiecutter.app_name}}",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "{{cookiecutter.project_name}}.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "{{cookiecutter.project_name}}.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASE_HOST = get_env("DATABASE_HOST", default="localhost")
DATABASE_PORT = get_env("DATABASE_PORT", default=5432, cast=int)
DATABASE_NAME = get_env("DATABASE_NAME", default=PROJECT_NAME)
DATABASE_USER = get_env("DATABASE_USER", default=PROJECT_NAME)
DATABASE_PASSWORD = get_env("DATABASE_PASSWORD", default=PROJECT_NAME)
@property
def DATABASES(self):
"""
Build the databases object here to allow subclasses to override specific values
"""
return {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": self.DATABASE_HOST,
"PORT": self.DATABASE_PORT,
"NAME": self.DATABASE_NAME,
"USER": self.DATABASE_USER,
"PASSWORD": self.DATABASE_PASSWORD,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-GB"
TIME_ZONE = "{{cookiecutter.time_zone}}"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static"
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
# Additional locations of static files
STATICFILES_DIRS = [BASE_DIR / "frontend" / "dist"]
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WHITENOISE_ROOT = BASE_DIR / "public"
FIXTURE_DIRS = [BASE_DIR / "fixtures"]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
},
},
"handlers": {"console": {"class": "logging.StreamHandler"}},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"sentry_sdk": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
class RedisCache:
REDIS_HOST = get_env("DJANGO_REDIS_HOST", required=True)
REDIS_PORT = get_env("DJANGO_REDIS_PORT", default=6379, cast=int)
# Cache
# https://docs.djangoproject.com/en/3.0/ref/settings/#caches
@property
def CACHES(self):
return {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{self.REDIS_HOST}:{self.REDIS_PORT}/1",
"KEY_PREFIX": f"{self.PROJECT_ENVIRONMENT_SLUG}_",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PARSER_CLASS": "redis.connection.HiredisParser",
},
}
}
class Dev(Common):
DEBUG = True
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = "/tmp/app-emails"
INTERNAL_IPS = ["127.0.0.1"]
@property
def INSTALLED_APPS(self):
INSTALLED_APPS = super().INSTALLED_APPS
INSTALLED_APPS.append("debug_toolbar")
return INSTALLED_APPS
@property
def MIDDLEWARE(self):
MIDDLEWARE = super().MIDDLEWARE
MIDDLEWARE.append("debug_toolbar.middleware.DebugToolbarMiddleware")
return MIDDLEWARE
class DevDocker(RedisCache, Dev):
"""
Dev for docker, uses Redis.
"""
class Test(Common):
"""
Default test settings
Includes some testing speedups.
"""
DEBUG = False
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
class CI(Test):
"""
Default CI settings
"""
class Deployed(RedisCache, Common):
"""
Settings which are for a non-local deployment
"""
# Redefine values which are not optional in a deployed environment
ALLOWED_HOSTS = get_env("DJANGO_ALLOWED_HOSTS", cast=csv_to_list, required=True)
# Some deployed settings are no longer env vars - collect from the secret store
SECRET_KEY = get_secret("DJANGO_SECRET_KEY")
DATABASE_USER = get_secret("DATABASE_USER")
DATABASE_PASSWORD = get_secret("DATABASE_PASSWORD")
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# django-debug-toolbar will throw an ImproperlyConfigured exception if DEBUG is
# ever turned on when run with a WSGI server
DEBUG_TOOLBAR_PATCH_SETTINGS = False
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = "{{cookiecutter.email_user}}"
EMAIL_HOST_PASSWORD = "{{cookiecutter.email_password}}"
DEFAULT_FROM_EMAIL = ""
SERVER_EMAIL = ""
@classmethod
def post_setup(cls):
super(Deployed, cls).post_setup()
sentry_sdk.init(
dsn="{{cookiecutter.sentry_dsn}}", integrations=[DjangoIntegration()], environment=CONFIGURATION,
)
class Stage(Deployed):
pass
class Prod(Deployed):
DEBUG = False | 0.63307 | 0.133528 |
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import argparse
import sys
#TESTING URLS
# "https://www.imdb.com/title/tt5753856" DARK
# "https://www.imdb.com/title/tt0098904" SEINFLED
# "https://www.imdb.com/title/tt0306414" THEWIRE
# "https://www.imdb.com/title/tt0096697" Simpsons
# "https://www.imdb.com/title/tt0903747" BreakingBad
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", help = "URL to show page",required=True)
parser.add_argument("-f", "--file", help = "Filename to save data to",default='data')
args = parser.parse_args()
url = args.url
csvName = args.file
# Get Show Home Page
try:
res = requests.get(url)
except:
print("Invalid URL\nMust follow IMDB Link format\nExample: https://www.imdb.com/title/tt1266020")
sys.exit()
soup = BeautifulSoup(res.text,features='lxml')
# Extract Show Name
subs = soup.find("div",{"class":"title_wrapper"})
showName = subs.find('h1').text.encode('ascii','ignore').decode('utf-8')
print(f"Getting data for {showName}")
# Get number of seasons
subs = soup.findAll("div",{"class":"seasons-and-year-nav"})
numberOfSeason = subs[0].find('a')
numberOfSeason = int(numberOfSeason.text)
print(f"Fetching Data for {numberOfSeason} seasons")
# Update url to iterate over seasons
if url[-1] == '/':
url = url[:-1]
url += "/episodes?season={}"
data = []
# Manipulates incoming data and adds to data list
def addData(data,row_data,isVote=False):
if len(data)>0:
if isVote:
row_data.append(data[0].text.replace('(','').replace(')','').replace(',',''))
else:
row_data.append(data[0].text)
else:
row_data.append("")
return row_data
# Iterate of seasons webpages and scrape episode wise data
for season in tqdm(range(1,numberOfSeason+1)):
with requests.get(url.format(season)) as resp:
html = resp.text
soup = BeautifulSoup(html,features="lxml")
episodes = soup.findAll("div", {"class": "list_item"})
for episode in episodes:
row_data = []
title = episode.findAll("a", {"itemprop": "name"})
airdate = episode.findAll("div", {"class": "airdate"})
rating = episode.findAll("span", {"class": "ipl-rating-star__rating"})
num_votes = episode.findAll("span", {"class": "ipl-rating-star__total-votes"})
description = episode.findAll("div", {"class": "item_description"})
row_data.append(season)
row_data = addData(title,row_data)
row_data = addData(airdate,row_data)
row_data = addData(rating,row_data)
row_data = addData(num_votes,row_data,isVote=True)
row_data = addData(description,row_data)
# row_data = [season,title[0].text,airdate[0].text,rating[0].text,num_votes[0].text.replace('(','').replace(')','').replace(',',''),description[0].text]
row_data = [r.replace('\n','').strip() if isinstance(r,str) else r for r in row_data ]
data.append(row_data)
# Save data to Dataframe making it easier to save to csv
df = pd.DataFrame(data, columns=["Season","Title","Airdate","Rating","Vote_count","Description"])
df.to_csv(csvName + '.csv',index=False)
print(f"Data saved to {csvName}.csv Successfully") | IMDB Data Scraper/scrape.py | import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import argparse
import sys
#TESTING URLS
# "https://www.imdb.com/title/tt5753856" DARK
# "https://www.imdb.com/title/tt0098904" SEINFLED
# "https://www.imdb.com/title/tt0306414" THEWIRE
# "https://www.imdb.com/title/tt0096697" Simpsons
# "https://www.imdb.com/title/tt0903747" BreakingBad
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", help = "URL to show page",required=True)
parser.add_argument("-f", "--file", help = "Filename to save data to",default='data')
args = parser.parse_args()
url = args.url
csvName = args.file
# Get Show Home Page
try:
res = requests.get(url)
except:
print("Invalid URL\nMust follow IMDB Link format\nExample: https://www.imdb.com/title/tt1266020")
sys.exit()
soup = BeautifulSoup(res.text,features='lxml')
# Extract Show Name
subs = soup.find("div",{"class":"title_wrapper"})
showName = subs.find('h1').text.encode('ascii','ignore').decode('utf-8')
print(f"Getting data for {showName}")
# Get number of seasons
subs = soup.findAll("div",{"class":"seasons-and-year-nav"})
numberOfSeason = subs[0].find('a')
numberOfSeason = int(numberOfSeason.text)
print(f"Fetching Data for {numberOfSeason} seasons")
# Update url to iterate over seasons
if url[-1] == '/':
url = url[:-1]
url += "/episodes?season={}"
data = []
# Manipulates incoming data and adds to data list
def addData(data,row_data,isVote=False):
if len(data)>0:
if isVote:
row_data.append(data[0].text.replace('(','').replace(')','').replace(',',''))
else:
row_data.append(data[0].text)
else:
row_data.append("")
return row_data
# Iterate of seasons webpages and scrape episode wise data
for season in tqdm(range(1,numberOfSeason+1)):
with requests.get(url.format(season)) as resp:
html = resp.text
soup = BeautifulSoup(html,features="lxml")
episodes = soup.findAll("div", {"class": "list_item"})
for episode in episodes:
row_data = []
title = episode.findAll("a", {"itemprop": "name"})
airdate = episode.findAll("div", {"class": "airdate"})
rating = episode.findAll("span", {"class": "ipl-rating-star__rating"})
num_votes = episode.findAll("span", {"class": "ipl-rating-star__total-votes"})
description = episode.findAll("div", {"class": "item_description"})
row_data.append(season)
row_data = addData(title,row_data)
row_data = addData(airdate,row_data)
row_data = addData(rating,row_data)
row_data = addData(num_votes,row_data,isVote=True)
row_data = addData(description,row_data)
# row_data = [season,title[0].text,airdate[0].text,rating[0].text,num_votes[0].text.replace('(','').replace(')','').replace(',',''),description[0].text]
row_data = [r.replace('\n','').strip() if isinstance(r,str) else r for r in row_data ]
data.append(row_data)
# Save data to Dataframe making it easier to save to csv
df = pd.DataFrame(data, columns=["Season","Title","Airdate","Rating","Vote_count","Description"])
df.to_csv(csvName + '.csv',index=False)
print(f"Data saved to {csvName}.csv Successfully") | 0.174692 | 0.162148 |
from antlr4 import *
# This class defines a complete listener for a parse tree produced by QrogueDungeonParser.
class QrogueDungeonListener(ParseTreeListener):
# Enter a parse tree produced by QrogueDungeonParser#start.
def enterStart(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#start.
def exitStart(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#integer.
def enterInteger(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#integer.
def exitInteger(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#complex_number.
def enterComplex_number(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#complex_number.
def exitComplex_number(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#robot.
def enterRobot(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#robot.
def exitRobot(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#layout.
def enterLayout(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#layout.
def exitLayout(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#l_room_row.
def enterL_room_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#l_room_row.
def exitL_room_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#l_hallway_row.
def enterL_hallway_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#l_hallway_row.
def exitL_hallway_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#rooms.
def enterRooms(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#rooms.
def exitRooms(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#room.
def enterRoom(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#room.
def exitRoom(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_attributes.
def enterR_attributes(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_attributes.
def exitR_attributes(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_visibility.
def enterR_visibility(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_visibility.
def exitR_visibility(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_type.
def enterR_type(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_type.
def exitR_type(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_row.
def enterR_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_row.
def exitR_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#tile.
def enterTile(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#tile.
def exitTile(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#tile_descriptor.
def enterTile_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#tile_descriptor.
def exitTile_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#trigger_descriptor.
def enterTrigger_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#trigger_descriptor.
def exitTrigger_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#message_descriptor.
def enterMessage_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#message_descriptor.
def exitMessage_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#enemy_descriptor.
def enterEnemy_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#enemy_descriptor.
def exitEnemy_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectible_descriptor.
def enterCollectible_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectible_descriptor.
def exitCollectible_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#energy_descriptor.
def enterEnergy_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#energy_descriptor.
def exitEnergy_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#riddle_descriptor.
def enterRiddle_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#riddle_descriptor.
def exitRiddle_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#shop_descriptor.
def enterShop_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#shop_descriptor.
def exitShop_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#hallways.
def enterHallways(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#hallways.
def exitHallways(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#hallway.
def enterHallway(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#hallway.
def exitHallway(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#h_attributes.
def enterH_attributes(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#h_attributes.
def exitH_attributes(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#draw_strategy.
def enterDraw_strategy(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#draw_strategy.
def exitDraw_strategy(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv_pools.
def enterStv_pools(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv_pools.
def exitStv_pools(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#default_stv_pool.
def enterDefault_stv_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#default_stv_pool.
def exitDefault_stv_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv_pool.
def enterStv_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv_pool.
def exitStv_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stvs.
def enterStvs(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stvs.
def exitStvs(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv.
def enterStv(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv.
def exitStv(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#reward_pools.
def enterReward_pools(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#reward_pools.
def exitReward_pools(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#default_reward_pool.
def enterDefault_reward_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#default_reward_pool.
def exitDefault_reward_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#reward_pool.
def enterReward_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#reward_pool.
def exitReward_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectibles.
def enterCollectibles(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectibles.
def exitCollectibles(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectible.
def enterCollectible(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectible.
def exitCollectible(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#messages.
def enterMessages(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#messages.
def exitMessages(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#message.
def enterMessage(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#message.
def exitMessage(self, ctx):
pass | qrogue/game/world/dungeon_generator/dungeon_parser/QrogueDungeonListener.py | from antlr4 import *
# This class defines a complete listener for a parse tree produced by QrogueDungeonParser.
class QrogueDungeonListener(ParseTreeListener):
# Enter a parse tree produced by QrogueDungeonParser#start.
def enterStart(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#start.
def exitStart(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#integer.
def enterInteger(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#integer.
def exitInteger(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#complex_number.
def enterComplex_number(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#complex_number.
def exitComplex_number(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#robot.
def enterRobot(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#robot.
def exitRobot(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#layout.
def enterLayout(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#layout.
def exitLayout(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#l_room_row.
def enterL_room_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#l_room_row.
def exitL_room_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#l_hallway_row.
def enterL_hallway_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#l_hallway_row.
def exitL_hallway_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#rooms.
def enterRooms(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#rooms.
def exitRooms(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#room.
def enterRoom(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#room.
def exitRoom(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_attributes.
def enterR_attributes(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_attributes.
def exitR_attributes(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_visibility.
def enterR_visibility(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_visibility.
def exitR_visibility(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_type.
def enterR_type(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_type.
def exitR_type(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#r_row.
def enterR_row(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#r_row.
def exitR_row(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#tile.
def enterTile(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#tile.
def exitTile(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#tile_descriptor.
def enterTile_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#tile_descriptor.
def exitTile_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#trigger_descriptor.
def enterTrigger_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#trigger_descriptor.
def exitTrigger_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#message_descriptor.
def enterMessage_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#message_descriptor.
def exitMessage_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#enemy_descriptor.
def enterEnemy_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#enemy_descriptor.
def exitEnemy_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectible_descriptor.
def enterCollectible_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectible_descriptor.
def exitCollectible_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#energy_descriptor.
def enterEnergy_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#energy_descriptor.
def exitEnergy_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#riddle_descriptor.
def enterRiddle_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#riddle_descriptor.
def exitRiddle_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#shop_descriptor.
def enterShop_descriptor(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#shop_descriptor.
def exitShop_descriptor(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#hallways.
def enterHallways(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#hallways.
def exitHallways(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#hallway.
def enterHallway(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#hallway.
def exitHallway(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#h_attributes.
def enterH_attributes(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#h_attributes.
def exitH_attributes(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#draw_strategy.
def enterDraw_strategy(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#draw_strategy.
def exitDraw_strategy(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv_pools.
def enterStv_pools(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv_pools.
def exitStv_pools(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#default_stv_pool.
def enterDefault_stv_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#default_stv_pool.
def exitDefault_stv_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv_pool.
def enterStv_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv_pool.
def exitStv_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stvs.
def enterStvs(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stvs.
def exitStvs(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#stv.
def enterStv(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#stv.
def exitStv(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#reward_pools.
def enterReward_pools(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#reward_pools.
def exitReward_pools(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#default_reward_pool.
def enterDefault_reward_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#default_reward_pool.
def exitDefault_reward_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#reward_pool.
def enterReward_pool(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#reward_pool.
def exitReward_pool(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectibles.
def enterCollectibles(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectibles.
def exitCollectibles(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#collectible.
def enterCollectible(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#collectible.
def exitCollectible(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#messages.
def enterMessages(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#messages.
def exitMessages(self, ctx):
pass
# Enter a parse tree produced by QrogueDungeonParser#message.
def enterMessage(self, ctx):
pass
# Exit a parse tree produced by QrogueDungeonParser#message.
def exitMessage(self, ctx):
pass | 0.307462 | 0.14978 |
import torch
def l2norm(tensor, dim, keepdim):
"""
Computes the l2-norm of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: l2-norm of input tensor.
"""
return torch.norm(tensor, 2, dim, keepdim)
def max(tensor, dim, keepdim):
"""
Computes the maximum value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Max of input tensor.
"""
return torch.max(tensor, dim, keepdim)[0]
def min(tensor, dim, keepdim):
"""
Computes the minimum value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Min of input tensor.
"""
return torch.min(tensor, dim, keepdim)[0]
def mean(tensor, dim, keepdim):
"""
Computes the mean value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Mean value of input tensor.
"""
return torch.mean(tensor, dim, keepdim)
def sum(tensor, dim, keepdim):
"""
Computes the sum of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Sum of input tensor.
"""
return torch.sum(tensor, dim, keepdim) | condensa/functional.py |
import torch
def l2norm(tensor, dim, keepdim):
"""
Computes the l2-norm of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: l2-norm of input tensor.
"""
return torch.norm(tensor, 2, dim, keepdim)
def max(tensor, dim, keepdim):
"""
Computes the maximum value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Max of input tensor.
"""
return torch.max(tensor, dim, keepdim)[0]
def min(tensor, dim, keepdim):
"""
Computes the minimum value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Min of input tensor.
"""
return torch.min(tensor, dim, keepdim)[0]
def mean(tensor, dim, keepdim):
"""
Computes the mean value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Mean value of input tensor.
"""
return torch.mean(tensor, dim, keepdim)
def sum(tensor, dim, keepdim):
"""
Computes the sum of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Sum of input tensor.
"""
return torch.sum(tensor, dim, keepdim) | 0.968329 | 0.878366 |
import sqlite3
import argparse
import datetime
import chtc_usage_tools as cut
import matplotlib.pyplot as plt
import matplotlib.dates as mpld
import matplotlib as mpl
from numpy import array
mpl.rcParams['axes.color_cycle'] = ['r', 'k', 'c']
parser = argparse.ArgumentParser(description='A tool to extract usage data')
parser.add_argument('--project',help='The name of a project over which to summarize the results',nargs="*",type=lambda s: unicode(s,'utf8'))
parser.add_argument('--pool',help='Limit the data to a single pool',nargs="*")
parser.add_argument('-s','--sum',help="Sum across pools",action='store_true')
parser.add_argument('--span',choices=['day','month','year'],help="Time span across which to sum data",default='month')
parser.add_argument('database',help='The name of a database file')
args=parser.parse_args()
conn = cut.usage_db_connect(args.database)
curs = conn.cursor()
### projects
usage_projects=set(cut.get_db_projects(curs))
if args.project:
usage_projects=set(args.project).intersection(usage_projects)
### pools
usage_pools=cut.get_db_pools(curs)
if args.pool:
usage_pools=set(args.pool).intersection(usage_pools)
usage_pools = list(usage_pools)
date_fmt_list= {'day':"%Y-%m-%d", 'month':"%Y-%m", 'year':"%Y"}
sql_groupby_name = 'month'
if args.span:
sql_groupby_name = args.span
date_fmt = date_fmt_list[sql_groupby_name]
# sum over all users for each pool
sum_usage_pools = map(lambda x: "sum(" + x + ")", usage_pools)
col_query = ','.join(sum_usage_pools)
# sum over all pools
if args.sum:
col_query = '(' + '+'.join(sum_usage_pools) + ')'
usage_pools = ["total"]
project_data = {}
fig = plt.figure()
for project in usage_projects:
sql_cmd = 'select strftime("' + date_fmt + '",enddate) as ' + sql_groupby_name + ',' + col_query + ' from usage where ' + 'userid in (select rowid from users where project=?) group by ' + sql_groupby_name
curs.execute(sql_cmd, (project,))
project_data[project] = {'dates':[], 'usage':[]}
rows = curs.fetchall()
for row in rows:
project_data[project]['dates'].append(datetime.datetime.strptime(row[0],date_fmt))
project_data[project]['usage'].append(list(row[1:]))
pool_idx = 0
for temp in zip(*project_data[project]['usage']):
if (max(temp) > 0):
plt.plot_date(mpld.date2num(project_data[project]['dates']),array(temp),'-',xdate=True,label=project + " " + usage_pools[pool_idx])
pool_idx += 1
pool_idx = pool_idx % len(usage_pools)
#print project_data
plt.legend(loc='upper left')
plt.ylabel('cpu-hours per ' + sql_groupby_name)
fig.autofmt_xdate()
plt.show() | extractUsage.py |
import sqlite3
import argparse
import datetime
import chtc_usage_tools as cut
import matplotlib.pyplot as plt
import matplotlib.dates as mpld
import matplotlib as mpl
from numpy import array
mpl.rcParams['axes.color_cycle'] = ['r', 'k', 'c']
parser = argparse.ArgumentParser(description='A tool to extract usage data')
parser.add_argument('--project',help='The name of a project over which to summarize the results',nargs="*",type=lambda s: unicode(s,'utf8'))
parser.add_argument('--pool',help='Limit the data to a single pool',nargs="*")
parser.add_argument('-s','--sum',help="Sum across pools",action='store_true')
parser.add_argument('--span',choices=['day','month','year'],help="Time span across which to sum data",default='month')
parser.add_argument('database',help='The name of a database file')
args=parser.parse_args()
conn = cut.usage_db_connect(args.database)
curs = conn.cursor()
### projects
usage_projects=set(cut.get_db_projects(curs))
if args.project:
usage_projects=set(args.project).intersection(usage_projects)
### pools
usage_pools=cut.get_db_pools(curs)
if args.pool:
usage_pools=set(args.pool).intersection(usage_pools)
usage_pools = list(usage_pools)
date_fmt_list= {'day':"%Y-%m-%d", 'month':"%Y-%m", 'year':"%Y"}
sql_groupby_name = 'month'
if args.span:
sql_groupby_name = args.span
date_fmt = date_fmt_list[sql_groupby_name]
# sum over all users for each pool
sum_usage_pools = map(lambda x: "sum(" + x + ")", usage_pools)
col_query = ','.join(sum_usage_pools)
# sum over all pools
if args.sum:
col_query = '(' + '+'.join(sum_usage_pools) + ')'
usage_pools = ["total"]
project_data = {}
fig = plt.figure()
for project in usage_projects:
sql_cmd = 'select strftime("' + date_fmt + '",enddate) as ' + sql_groupby_name + ',' + col_query + ' from usage where ' + 'userid in (select rowid from users where project=?) group by ' + sql_groupby_name
curs.execute(sql_cmd, (project,))
project_data[project] = {'dates':[], 'usage':[]}
rows = curs.fetchall()
for row in rows:
project_data[project]['dates'].append(datetime.datetime.strptime(row[0],date_fmt))
project_data[project]['usage'].append(list(row[1:]))
pool_idx = 0
for temp in zip(*project_data[project]['usage']):
if (max(temp) > 0):
plt.plot_date(mpld.date2num(project_data[project]['dates']),array(temp),'-',xdate=True,label=project + " " + usage_pools[pool_idx])
pool_idx += 1
pool_idx = pool_idx % len(usage_pools)
#print project_data
plt.legend(loc='upper left')
plt.ylabel('cpu-hours per ' + sql_groupby_name)
fig.autofmt_xdate()
plt.show() | 0.281307 | 0.249304 |
import uuid
import pygame
from dataclasses import dataclass
from ..style import Color
from ..structures import Vec2
from . import physics
Model = physics.Model
vec2 = physics.vec2
@dataclass
class Component:
entity_id = None
def update(self, delta) -> None: pass
@property
def class_name(self):
return self.__class__.__name__
def update(self):
pass
@dataclass
class Stats(Component):
health: int
strength: int
defense: int
agility: int
def change_health(self, amount):
self.health += amount
def change_strength(self, amount):
self.strength += amount
def change_defense(self, amount):
self.defense += amount
def change_agility(self, amount):
self.agility += amount
@property
def is_alive(self):
return self.health >= 0
@dataclass
class Accelerator(Component):
acceleration: float
max_acceleration: float
direction: Vec2 = None
def __init__(self, acceleration = 0, max_acceleration = 0, direction = None):
self.acceleration = acceleration
self.max_acceleration = max_acceleration
self.direction = direction if direction else Vec2(0,0)
def update(self, delta):
self.decelerate(delta)
def decelerate(self, delta):
self.acceleration = 0
self.direction = Vec2(0,0)
def accelerate(self, direction: Vec2):
self.direction += direction
if self.acceleration > self.max_acceleration: return
self.acceleration = 0.1 * self.max_acceleration
@property
def velocity(self) -> Vec2:
return Vec2(self.acceleration * self.direction.x, self.acceleration * self.direction.y)
@dataclass
class Body(Component):
model: Model
def get_position(self) -> Vec2:
return vec2(self.model.body.position)
def get_size(self) -> Vec2:
return vec2(self.model.size)
def get_angle(self) -> Vec2:
return -self.model.body.angle
def set_angle(self, value) -> Vec2:
self.model.body.angle = value
def get_color(self) -> Color:
return self.model.color
def get_velocity(self) -> Vec2:
return self.model.body.velocity
@property
def velocity(self) -> Vec2:
return self.get_velocity()
@property
def color(self) -> Color:
return self.get_color()
@property
def angle(self) -> Vec2:
return self.get_angle()
@property
def position(self) -> Vec2:
return self.get_position()
@property
def size(self) -> Vec2:
return self.get_size()
@property
def bottom(self) -> float:
return self.position.y + self.model.size.y
@property
def top(self) -> float:
return self.position.y
@property
def left(self) -> float:
return self.position.x
@property
def right(self) -> float:
return self.position.x + self.model.size.x
@dataclass
class Decaying(Component):
entity = None
start: float
clock: pygame.time.Clock
is_dead: bool = False
is_decaying: bool = False
current: float = None
def __init__(self, entity, start, clock, is_decaying=False, current=None):
self.entity = entity
self.start = start
self.clock = clock
self.is_decaying = is_decaying
self.current = current if current else self.start
def update(self):
if self.is_dead: return
if self.current is None:
self.current = self.start
self.current -= self.clock.get_time()
if self.current <= 0:
self.is_dead = True
color = self.entity.get_body().color
a = (color[3] * (self.current / self.start)) % 255
self.entity.change_color((color[0], color[1],color[2], a))
@dataclass
class Weapon(Component):
damage: float
fire_rate: float
bullet_speed: float
damping: float
clock: pygame.time.Clock
can_fire: bool = False
cooldown: float = 0
def update(self):
self.cooldown -= self.clock.get_time()
if self.cooldown <= 0:
self.can_fire = True
def fire(self):
if self.can_fire:
self.cooldown = self.fire_rate
self.can_fire = False | gg/ecs/components.py | import uuid
import pygame
from dataclasses import dataclass
from ..style import Color
from ..structures import Vec2
from . import physics
Model = physics.Model
vec2 = physics.vec2
@dataclass
class Component:
entity_id = None
def update(self, delta) -> None: pass
@property
def class_name(self):
return self.__class__.__name__
def update(self):
pass
@dataclass
class Stats(Component):
health: int
strength: int
defense: int
agility: int
def change_health(self, amount):
self.health += amount
def change_strength(self, amount):
self.strength += amount
def change_defense(self, amount):
self.defense += amount
def change_agility(self, amount):
self.agility += amount
@property
def is_alive(self):
return self.health >= 0
@dataclass
class Accelerator(Component):
acceleration: float
max_acceleration: float
direction: Vec2 = None
def __init__(self, acceleration = 0, max_acceleration = 0, direction = None):
self.acceleration = acceleration
self.max_acceleration = max_acceleration
self.direction = direction if direction else Vec2(0,0)
def update(self, delta):
self.decelerate(delta)
def decelerate(self, delta):
self.acceleration = 0
self.direction = Vec2(0,0)
def accelerate(self, direction: Vec2):
self.direction += direction
if self.acceleration > self.max_acceleration: return
self.acceleration = 0.1 * self.max_acceleration
@property
def velocity(self) -> Vec2:
return Vec2(self.acceleration * self.direction.x, self.acceleration * self.direction.y)
@dataclass
class Body(Component):
model: Model
def get_position(self) -> Vec2:
return vec2(self.model.body.position)
def get_size(self) -> Vec2:
return vec2(self.model.size)
def get_angle(self) -> Vec2:
return -self.model.body.angle
def set_angle(self, value) -> Vec2:
self.model.body.angle = value
def get_color(self) -> Color:
return self.model.color
def get_velocity(self) -> Vec2:
return self.model.body.velocity
@property
def velocity(self) -> Vec2:
return self.get_velocity()
@property
def color(self) -> Color:
return self.get_color()
@property
def angle(self) -> Vec2:
return self.get_angle()
@property
def position(self) -> Vec2:
return self.get_position()
@property
def size(self) -> Vec2:
return self.get_size()
@property
def bottom(self) -> float:
return self.position.y + self.model.size.y
@property
def top(self) -> float:
return self.position.y
@property
def left(self) -> float:
return self.position.x
@property
def right(self) -> float:
return self.position.x + self.model.size.x
@dataclass
class Decaying(Component):
entity = None
start: float
clock: pygame.time.Clock
is_dead: bool = False
is_decaying: bool = False
current: float = None
def __init__(self, entity, start, clock, is_decaying=False, current=None):
self.entity = entity
self.start = start
self.clock = clock
self.is_decaying = is_decaying
self.current = current if current else self.start
def update(self):
if self.is_dead: return
if self.current is None:
self.current = self.start
self.current -= self.clock.get_time()
if self.current <= 0:
self.is_dead = True
color = self.entity.get_body().color
a = (color[3] * (self.current / self.start)) % 255
self.entity.change_color((color[0], color[1],color[2], a))
@dataclass
class Weapon(Component):
damage: float
fire_rate: float
bullet_speed: float
damping: float
clock: pygame.time.Clock
can_fire: bool = False
cooldown: float = 0
def update(self):
self.cooldown -= self.clock.get_time()
if self.cooldown <= 0:
self.can_fire = True
def fire(self):
if self.can_fire:
self.cooldown = self.fire_rate
self.can_fire = False | 0.892773 | 0.402627 |
import matplotlib.pyplot as plt
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Reshape,LeakyReLU, Dropout
import tensorflow as tf
from tensorflow.keras.layers import AveragePooling2D,UpSampling2D
from tensorflow import keras
ab = np.load('ab1.npy')
gray = np.load('gray_scale.npy')
def batch_prep (gray_img,batch_size=100):
img=np.zeros((batch_size,224,224,3))
for i in range (0,3):
img[:batch_size,:,:,i]=gray_img[:batch_size]
return img
img_in=batch_prep(gray,batch_size=300)
def get_rbg(gray_imgs,ab_imgs,n=10):
img1=np.zeros((n,224,224,3))
img1[:,:,:,0]=gray_imgs[0:n:]
img1[:,:,:,1:]=ab_imgs[0:n]
img1=img1.astype('uint8')
imgs=[]
for i in range(0,n):
imgs.append(cv2.cvtColor(img1[i],cv2.COLOR_LAB2RGB))
imgs=np.array(imgs)
return imgs
img_out = get_rbg(gray_imgs = gray, ab_imgs = ab, n = 300)
model = Sequential()
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(UpSampling2D((2,2)))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.compile(optimizer=tf.keras.optimizers.Adam(),loss='mape',metrics=tf.keras.metrics.Accuracy())
model.fit(img_in,img_out,epochs=10,batch_size=16)
prediction=model.predict(img_in)
model.save('model_color.h5')
# plt.imshow(prediction[29])
# plt.show() | colorize_train.py | import matplotlib.pyplot as plt
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Reshape,LeakyReLU, Dropout
import tensorflow as tf
from tensorflow.keras.layers import AveragePooling2D,UpSampling2D
from tensorflow import keras
ab = np.load('ab1.npy')
gray = np.load('gray_scale.npy')
def batch_prep (gray_img,batch_size=100):
img=np.zeros((batch_size,224,224,3))
for i in range (0,3):
img[:batch_size,:,:,i]=gray_img[:batch_size]
return img
img_in=batch_prep(gray,batch_size=300)
def get_rbg(gray_imgs,ab_imgs,n=10):
img1=np.zeros((n,224,224,3))
img1[:,:,:,0]=gray_imgs[0:n:]
img1[:,:,:,1:]=ab_imgs[0:n]
img1=img1.astype('uint8')
imgs=[]
for i in range(0,n):
imgs.append(cv2.cvtColor(img1[i],cv2.COLOR_LAB2RGB))
imgs=np.array(imgs)
return imgs
img_out = get_rbg(gray_imgs = gray, ab_imgs = ab, n = 300)
model = Sequential()
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(UpSampling2D((2,2)))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.add(Conv2D(strides=1,kernel_size=3,filters=12,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(Conv2DTranspose(strides=1,kernel_size=3,filters=3,use_bias=True,bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,maxval=0.05),padding="valid",activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))
model.compile(optimizer=tf.keras.optimizers.Adam(),loss='mape',metrics=tf.keras.metrics.Accuracy())
model.fit(img_in,img_out,epochs=10,batch_size=16)
prediction=model.predict(img_in)
model.save('model_color.h5')
# plt.imshow(prediction[29])
# plt.show() | 0.664867 | 0.400632 |
from flask import abort, escape, Flask, render_template, request, session
from functools import wraps
import json
import sys
import uuid
app = Flask(__name__, static_folder="static")
# Decorators
def requires_admin(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session or session["team_id"] != settings["admin_id"]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_leader(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["leader"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_member(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_leader_or_member(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["leader"] for t in teams]:
abort(404)
elif session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_login(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] != settings["admin_id"] and session["team_id"] not in [t["leader"] for t in teams] and session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_post(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if request.method == "GET":
abort(405)
return f(*args, **kwargs)
return decorated_function
# Routes
@app.route("/")
def main():
if "team_id" in session:
if session["team_id"] == settings["admin_id"]:
# Administrator, so show admin page
return render_template("admin.html", **settings)
elif session["team_id"] in [t["leader"] for t in teams]:
# Team leader, so show entry page
team_data = [t for t in teams if t["leader"] == session["team_id"]][0]
return render_template("leader.html", **settings, team_data=team_data)
elif session["team_id"] in [t["member"] for t in teams]:
# Team member, so show submitted page
team_data = [t for t in teams if t["member"] == session["team_id"]][0]
return render_template("member.html", **settings, team_data=team_data)
# Not yet part of a team or an admin, so show basic index page
return render_template("index.html", **settings)
@app.route("/api/join", methods=["POST"])
def join():
if "team_id" not in request.form:
return "Team not specified", 400
requested_id = request.form["team_id"].lower()
if requested_id == settings["admin_id"] or requested_id in [t["leader"] for t in teams] or requested_id in [t["member"] for t in teams]:
session["team_id"] = requested_id
return "Team successfully joined", 200
else:
return "Team not found", 404
@app.route("/api/leave")
def leave():
if "team_id" in session:
session.pop("team_id")
return "Team successfully left", 200
else:
return "Already not in a team", 400
@app.route("/api/round/start", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_start():
if quiz["state"] != "preround":
return "Quiz not expecting to start a round", 403
elif "question_count" not in request.form or request.form["question_count"] == "":
return "Question count not specified", 400
else:
try:
question_count = int(request.form["question_count"])
except:
return "Question count not integer", 400
quiz["question_count"] = question_count
quiz["state"] = "answering"
quiz["round_id"] = str(uuid.uuid4())[:8]
for i in range(len(teams)):
teams[i]["submitted"] = False
teams[i]["answers"] = [""] * question_count
return f"Round started with {question_count} question{'s' if question_count != 1 else ''}", 200
@app.route("/api/round/stop", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_stop():
if quiz["state"] != "answering":
return "Quiz not expecting to stop a round", 403
else:
quiz["state"] = "postround"
return "Round stopped", 200
@app.route("/api/round/complete", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_complete():
if quiz["state"] != "postround":
return "Quiz not expecting to complete a round", 403
else:
quiz["state"] = "preround"
return "Round complete, waiting to start a new round", 200
@app.route("/api/status")
@requires_login
def status():
if session["team_id"] == settings["admin_id"]:
if quiz["state"] == "preround":
return {"state": quiz["state"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams]}, 200
elif quiz["state"] == "answering":
return {"state": quiz["state"],
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams],
"submitted": [t["name"] for t in teams if t["submitted"]]}, 200
elif quiz["state"] == "postround":
return {"state": quiz["state"],
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams],
"submissions": [{"name": t["name"], "answers": t["answers"]} for t in teams if t["submitted"]]}, 200
else:
return {"state": "invalid",
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams]}, 500
else:
if quiz["state"] == "preround":
return {"state": "preround"}, 200
elif quiz["state"] == "answering":
team_data = [t for t in teams if t["leader"] == session["team_id"] or t["member"] == session["team_id"]][0]
if team_data["submitted"]:
return {"state": "answering",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"submitted": team_data["submitted"],
"answers": team_data["answers"]}, 200
else:
return {"state": "answering",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"submitted": team_data["submitted"]}, 200
elif quiz["state"] == "postround":
team_data = [t for t in teams if t["leader"] == session["team_id"] or t["member"] == session["team_id"]][0]
return {"state": "postround",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"answers": team_data["answers"]}, 200
else:
return {"state": "invalid"}, 500
@app.route("/api/answers.txt")
@requires_admin
def answers():
if quiz["state"] != "postround":
return "Quiz not expecting to return an answers file", 403
else:
return "\n\n".join(
t['name'] + "\n" +
"\n".join(f"{i+1}) {a}" for i, a in enumerate(t['answers']))
for t in teams if t["submitted"]
), 200
@app.route("/api/create", methods=["GET", "POST"])
@requires_admin
@requires_post
def create():
if "team_name" not in request.form or request.form["team_name"] == "":
return "Team name not specified", 400
else:
existing_ids = [t["leader"] for t in teams] + [t["member"] for t in teams]
leader_id = str(uuid.uuid4())[:8]
while leader_id in existing_ids:
leader_id = str(uuid.uuid4())[:8]
member_id = str(uuid.uuid4())[:8]
while member_id in existing_ids:
member_id = str(uuid.uuid4())[:8]
teams.append({"name": escape(request.form["team_name"]),
"leader": leader_id,
"member": member_id,
"submitted": False,
"answers": []})
return {"leader": leader_id, "member": member_id}, 200
@app.route("/api/exportstate")
@requires_admin
def exportstate():
try:
with open("state_data.json", "w") as f:
state = json.dumps([quiz, teams])
f.write(state)
return "State exported successfully", 200
except Exception as e:
return "State failed to export:\n" + str(e), 500
@app.route("/api/submit", methods=["POST"])
@requires_team_leader
def submit():
if quiz["state"] != "answering":
return "Quiz not expecting to accept an answer submission", 403
elif "answers" not in request.form:
return "Answers not specified", 400
t = [i for i, t in enumerate(teams) if t["leader"] == session["team_id"]][0]
teams[t]["submitted"] = True
teams[t]["answers"] = [escape(a) for a in json.loads(request.form["answers"])]
return "Answers submitted successfully", 200
if __name__ == "__main__":
with open("quiz_settings.json") as f:
settings = json.loads(f.read())
if "" in settings.values():
print("All settings require values: please check quiz_settings.json")
exit(1)
if len(sys.argv) > 1:
print("Using predefined state from " + sys.argv[1])
with open(sys.argv[1]) as f:
quiz, teams = json.loads(f.read())
else:
quiz = {"state": "preround", "question_count": 0}
teams = []
app.secret_key = settings["secret_key"]
if settings["https_enabled"]:
if settings["ssl_fullchain"] != None and settings["ssl_privkey"] != None:
print("Running with HTTPS enabled")
app.run(host="0.0.0.0", port=443, ssl_context=(settings["ssl_fullchain"], settings["ssl_privkey"]))
exit()
print("Requested to run with HTTPS enabled, but ssl_fullchain or ssl_privkey settings not provided")
print("Running with HTTPS disabled")
app.run(host="0.0.0.0", port=80) | app.py | from flask import abort, escape, Flask, render_template, request, session
from functools import wraps
import json
import sys
import uuid
app = Flask(__name__, static_folder="static")
# Decorators
def requires_admin(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session or session["team_id"] != settings["admin_id"]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_leader(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["leader"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_member(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_team_leader_or_member(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] not in [t["leader"] for t in teams]:
abort(404)
elif session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_login(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "team_id" not in session:
abort(404)
elif session["team_id"] != settings["admin_id"] and session["team_id"] not in [t["leader"] for t in teams] and session["team_id"] not in [t["member"] for t in teams]:
abort(404)
return f(*args, **kwargs)
return decorated_function
def requires_post(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if request.method == "GET":
abort(405)
return f(*args, **kwargs)
return decorated_function
# Routes
@app.route("/")
def main():
if "team_id" in session:
if session["team_id"] == settings["admin_id"]:
# Administrator, so show admin page
return render_template("admin.html", **settings)
elif session["team_id"] in [t["leader"] for t in teams]:
# Team leader, so show entry page
team_data = [t for t in teams if t["leader"] == session["team_id"]][0]
return render_template("leader.html", **settings, team_data=team_data)
elif session["team_id"] in [t["member"] for t in teams]:
# Team member, so show submitted page
team_data = [t for t in teams if t["member"] == session["team_id"]][0]
return render_template("member.html", **settings, team_data=team_data)
# Not yet part of a team or an admin, so show basic index page
return render_template("index.html", **settings)
@app.route("/api/join", methods=["POST"])
def join():
if "team_id" not in request.form:
return "Team not specified", 400
requested_id = request.form["team_id"].lower()
if requested_id == settings["admin_id"] or requested_id in [t["leader"] for t in teams] or requested_id in [t["member"] for t in teams]:
session["team_id"] = requested_id
return "Team successfully joined", 200
else:
return "Team not found", 404
@app.route("/api/leave")
def leave():
if "team_id" in session:
session.pop("team_id")
return "Team successfully left", 200
else:
return "Already not in a team", 400
@app.route("/api/round/start", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_start():
if quiz["state"] != "preround":
return "Quiz not expecting to start a round", 403
elif "question_count" not in request.form or request.form["question_count"] == "":
return "Question count not specified", 400
else:
try:
question_count = int(request.form["question_count"])
except:
return "Question count not integer", 400
quiz["question_count"] = question_count
quiz["state"] = "answering"
quiz["round_id"] = str(uuid.uuid4())[:8]
for i in range(len(teams)):
teams[i]["submitted"] = False
teams[i]["answers"] = [""] * question_count
return f"Round started with {question_count} question{'s' if question_count != 1 else ''}", 200
@app.route("/api/round/stop", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_stop():
if quiz["state"] != "answering":
return "Quiz not expecting to stop a round", 403
else:
quiz["state"] = "postround"
return "Round stopped", 200
@app.route("/api/round/complete", methods=["GET", "POST"])
@requires_admin
@requires_post
def round_complete():
if quiz["state"] != "postround":
return "Quiz not expecting to complete a round", 403
else:
quiz["state"] = "preround"
return "Round complete, waiting to start a new round", 200
@app.route("/api/status")
@requires_login
def status():
if session["team_id"] == settings["admin_id"]:
if quiz["state"] == "preround":
return {"state": quiz["state"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams]}, 200
elif quiz["state"] == "answering":
return {"state": quiz["state"],
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams],
"submitted": [t["name"] for t in teams if t["submitted"]]}, 200
elif quiz["state"] == "postround":
return {"state": quiz["state"],
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams],
"submissions": [{"name": t["name"], "answers": t["answers"]} for t in teams if t["submitted"]]}, 200
else:
return {"state": "invalid",
"teams": [{"name": t["name"], "leader": t["leader"], "member": t["member"]} for t in teams]}, 500
else:
if quiz["state"] == "preround":
return {"state": "preround"}, 200
elif quiz["state"] == "answering":
team_data = [t for t in teams if t["leader"] == session["team_id"] or t["member"] == session["team_id"]][0]
if team_data["submitted"]:
return {"state": "answering",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"submitted": team_data["submitted"],
"answers": team_data["answers"]}, 200
else:
return {"state": "answering",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"submitted": team_data["submitted"]}, 200
elif quiz["state"] == "postround":
team_data = [t for t in teams if t["leader"] == session["team_id"] or t["member"] == session["team_id"]][0]
return {"state": "postround",
"question_count": quiz["question_count"],
"round_id": quiz["round_id"],
"answers": team_data["answers"]}, 200
else:
return {"state": "invalid"}, 500
@app.route("/api/answers.txt")
@requires_admin
def answers():
if quiz["state"] != "postround":
return "Quiz not expecting to return an answers file", 403
else:
return "\n\n".join(
t['name'] + "\n" +
"\n".join(f"{i+1}) {a}" for i, a in enumerate(t['answers']))
for t in teams if t["submitted"]
), 200
@app.route("/api/create", methods=["GET", "POST"])
@requires_admin
@requires_post
def create():
if "team_name" not in request.form or request.form["team_name"] == "":
return "Team name not specified", 400
else:
existing_ids = [t["leader"] for t in teams] + [t["member"] for t in teams]
leader_id = str(uuid.uuid4())[:8]
while leader_id in existing_ids:
leader_id = str(uuid.uuid4())[:8]
member_id = str(uuid.uuid4())[:8]
while member_id in existing_ids:
member_id = str(uuid.uuid4())[:8]
teams.append({"name": escape(request.form["team_name"]),
"leader": leader_id,
"member": member_id,
"submitted": False,
"answers": []})
return {"leader": leader_id, "member": member_id}, 200
@app.route("/api/exportstate")
@requires_admin
def exportstate():
try:
with open("state_data.json", "w") as f:
state = json.dumps([quiz, teams])
f.write(state)
return "State exported successfully", 200
except Exception as e:
return "State failed to export:\n" + str(e), 500
@app.route("/api/submit", methods=["POST"])
@requires_team_leader
def submit():
if quiz["state"] != "answering":
return "Quiz not expecting to accept an answer submission", 403
elif "answers" not in request.form:
return "Answers not specified", 400
t = [i for i, t in enumerate(teams) if t["leader"] == session["team_id"]][0]
teams[t]["submitted"] = True
teams[t]["answers"] = [escape(a) for a in json.loads(request.form["answers"])]
return "Answers submitted successfully", 200
if __name__ == "__main__":
with open("quiz_settings.json") as f:
settings = json.loads(f.read())
if "" in settings.values():
print("All settings require values: please check quiz_settings.json")
exit(1)
if len(sys.argv) > 1:
print("Using predefined state from " + sys.argv[1])
with open(sys.argv[1]) as f:
quiz, teams = json.loads(f.read())
else:
quiz = {"state": "preround", "question_count": 0}
teams = []
app.secret_key = settings["secret_key"]
if settings["https_enabled"]:
if settings["ssl_fullchain"] != None and settings["ssl_privkey"] != None:
print("Running with HTTPS enabled")
app.run(host="0.0.0.0", port=443, ssl_context=(settings["ssl_fullchain"], settings["ssl_privkey"]))
exit()
print("Requested to run with HTTPS enabled, but ssl_fullchain or ssl_privkey settings not provided")
print("Running with HTTPS disabled")
app.run(host="0.0.0.0", port=80) | 0.340485 | 0.119691 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('country', models.CharField(blank=True, max_length=50, null=True)),
('adress', models.CharField(blank=True, max_length=255, null=True)),
('contact_name', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Engineering',
fields=[
('eng_id', models.AutoField(primary_key=True, serialize=False)),
('reference', models.CharField(blank=True, max_length=50, null=True)),
('instructions', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField(blank=True, null=True)),
('value', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('product_id', models.AutoField(primary_key=True, serialize=False)),
('p_type', models.CharField(choices=[('BBT', 'BBT'), ('BTY', 'BTY'), ('BLS', 'BLS'), ('BRS', 'BRS'), ('CKS', 'CKS'), ('FMI', 'FMI'), ('GRD', 'GRD'), ('LID', 'LID'), ('MFT', 'MFT'), ('SHR', 'SHR'), ('SPN', 'SPN'), ('TRL', 'TRL')], default='BBT', max_length=20)),
('drawing_no', models.IntegerField()),
('description', models.CharField(max_length=255)),
('technology', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Request',
fields=[
('project_no', models.IntegerField(primary_key=True, serialize=False)),
('r_type', models.CharField(choices=[('ORDER', 'Order'), ('SAMPLE', 'Sample'), ('ECR', 'Ecr'), ('ERF', 'Erf')], default='ORDER', max_length=20)),
('post_date', models.DateTimeField(verbose_name='date posted')),
('request_date', models.DateField(blank=True, null=True, verbose_name='request date')),
('estimate', models.DateField(blank=True, null=True, verbose_name='estimate completion date')),
('status', models.CharField(choices=[('QUEUE', 'Queue'), ('WIP', 'Wip'), ('COMPLETE', 'Complete'), ('HOLD', 'Hold')], default='QUEUE', max_length=20)),
('comments', models.CharField(blank=True, max_length=255, null=True)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Customer')),
('eng', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Engineering')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Product')),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('staff_id', models.AutoField(primary_key=True, serialize=False)),
('role', models.CharField(max_length=50)),
('name', models.CharField(max_length=100)),
('email', models.CharField(blank=True, max_length=50, null=True)),
('phone', models.IntegerField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=50, null=True)),
('user', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.AddField(
model_name='request',
name='responsable',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
migrations.AddField(
model_name='product',
name='engineer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
migrations.AddField(
model_name='customer',
name='sales_responsable',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
] | APMS/apps/orders/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('country', models.CharField(blank=True, max_length=50, null=True)),
('adress', models.CharField(blank=True, max_length=255, null=True)),
('contact_name', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Engineering',
fields=[
('eng_id', models.AutoField(primary_key=True, serialize=False)),
('reference', models.CharField(blank=True, max_length=50, null=True)),
('instructions', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField(blank=True, null=True)),
('value', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('product_id', models.AutoField(primary_key=True, serialize=False)),
('p_type', models.CharField(choices=[('BBT', 'BBT'), ('BTY', 'BTY'), ('BLS', 'BLS'), ('BRS', 'BRS'), ('CKS', 'CKS'), ('FMI', 'FMI'), ('GRD', 'GRD'), ('LID', 'LID'), ('MFT', 'MFT'), ('SHR', 'SHR'), ('SPN', 'SPN'), ('TRL', 'TRL')], default='BBT', max_length=20)),
('drawing_no', models.IntegerField()),
('description', models.CharField(max_length=255)),
('technology', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Request',
fields=[
('project_no', models.IntegerField(primary_key=True, serialize=False)),
('r_type', models.CharField(choices=[('ORDER', 'Order'), ('SAMPLE', 'Sample'), ('ECR', 'Ecr'), ('ERF', 'Erf')], default='ORDER', max_length=20)),
('post_date', models.DateTimeField(verbose_name='date posted')),
('request_date', models.DateField(blank=True, null=True, verbose_name='request date')),
('estimate', models.DateField(blank=True, null=True, verbose_name='estimate completion date')),
('status', models.CharField(choices=[('QUEUE', 'Queue'), ('WIP', 'Wip'), ('COMPLETE', 'Complete'), ('HOLD', 'Hold')], default='QUEUE', max_length=20)),
('comments', models.CharField(blank=True, max_length=255, null=True)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Customer')),
('eng', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Engineering')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Product')),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('staff_id', models.AutoField(primary_key=True, serialize=False)),
('role', models.CharField(max_length=50)),
('name', models.CharField(max_length=100)),
('email', models.CharField(blank=True, max_length=50, null=True)),
('phone', models.IntegerField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=50, null=True)),
('user', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.AddField(
model_name='request',
name='responsable',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
migrations.AddField(
model_name='product',
name='engineer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
migrations.AddField(
model_name='customer',
name='sales_responsable',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.Staff'),
),
] | 0.5564 | 0.205157 |
# standards
import re
# canif
from .parser import ParserError
RE_SKIPPED = re.compile(r'(?:\s+|//.*)+')
RE_END = re.compile(r'$')
class Lexer:
"""
Splits the input text into tokens, i.e. the smallest, indivisible strings in the text.
Instances of this class keep track of where they are in the text, and advance through it gradually.
In its current implementation this is not written for performance. Maybe someday we'll look into a LEX/YACC sort of solution.
"""
def __init__(self, text):
self.text = text
self.position = 0
self.skip()
def skip(self):
"""
Advance the position past skippable characters in the text (i.e. whitespace and comments)
"""
match = RE_SKIPPED.match(self.text, self.position)
if match:
self.position = match.end()
def error(self, expected, message=None):
"""
Raise a `ParserError`. `expected` describes the token that was expected and not found at the current position.
"""
if message is None:
if not isinstance(expected, str):
expected = '/%s/' % expected.pattern
elif not re.search(r'^\w+$', expected):
expected = '`%s`' % expected
message = 'expected %s, found %r' % (
expected,
self.text[self.position : self.position + 30],
)
raise ParserError('Position %d: %s' % (self.position, message))
def pop(self, token, checked=False, do_skip=True, message=None):
"""
Match the text at the current position in the text against the given token (a `str`). Returns a boolean indicating whether a
match was found.
If `checked` is True, raise a `ParserError` rather than returning `False` when no match is false.
If `do_skip` is True (the default), advance past whitespace (by calling `self.skip()`) after the matching data.
"""
if self.text.startswith(token, self.position):
self.position += len(token)
if do_skip:
self.skip()
return True
elif checked:
self.error(token, message)
else:
return False
def pop_regex(self, regex, checked=False, do_skip=True, message=None):
"""
Same as `pop`, but accepts a regex instead of a plain string token. Returns `None` if `checked` is False (the default) and
no match is found, else returns the `Match object.
"""
match = regex.match(self.text, self.position)
if match:
self.position = match.end()
if do_skip:
self.skip()
elif checked:
self.error(regex, message)
return match
def peek(self, token):
"""
Returns a boolean indicating whether the text at the current position starts with the given `token`.
"""
return self.text.startswith(token, self.position)
def peek_regex(self, regex):
"""
Same as `peek`, but accepts a regex instead of a plain string token.
"""
regex = re.compile(regex)
return regex.match(self.text, self.position)
def end(self, checked=False):
return self.pop_regex(RE_END, checked=checked)
def flush(self, file_out):
"""
Writes to the given file object whatever was left unconsumed in our input data.
"""
file_out.write(self.text[self.position:]) | canif/lexer.py |
# standards
import re
# canif
from .parser import ParserError
RE_SKIPPED = re.compile(r'(?:\s+|//.*)+')
RE_END = re.compile(r'$')
class Lexer:
"""
Splits the input text into tokens, i.e. the smallest, indivisible strings in the text.
Instances of this class keep track of where they are in the text, and advance through it gradually.
In its current implementation this is not written for performance. Maybe someday we'll look into a LEX/YACC sort of solution.
"""
def __init__(self, text):
self.text = text
self.position = 0
self.skip()
def skip(self):
"""
Advance the position past skippable characters in the text (i.e. whitespace and comments)
"""
match = RE_SKIPPED.match(self.text, self.position)
if match:
self.position = match.end()
def error(self, expected, message=None):
"""
Raise a `ParserError`. `expected` describes the token that was expected and not found at the current position.
"""
if message is None:
if not isinstance(expected, str):
expected = '/%s/' % expected.pattern
elif not re.search(r'^\w+$', expected):
expected = '`%s`' % expected
message = 'expected %s, found %r' % (
expected,
self.text[self.position : self.position + 30],
)
raise ParserError('Position %d: %s' % (self.position, message))
def pop(self, token, checked=False, do_skip=True, message=None):
"""
Match the text at the current position in the text against the given token (a `str`). Returns a boolean indicating whether a
match was found.
If `checked` is True, raise a `ParserError` rather than returning `False` when no match is false.
If `do_skip` is True (the default), advance past whitespace (by calling `self.skip()`) after the matching data.
"""
if self.text.startswith(token, self.position):
self.position += len(token)
if do_skip:
self.skip()
return True
elif checked:
self.error(token, message)
else:
return False
def pop_regex(self, regex, checked=False, do_skip=True, message=None):
"""
Same as `pop`, but accepts a regex instead of a plain string token. Returns `None` if `checked` is False (the default) and
no match is found, else returns the `Match object.
"""
match = regex.match(self.text, self.position)
if match:
self.position = match.end()
if do_skip:
self.skip()
elif checked:
self.error(regex, message)
return match
def peek(self, token):
"""
Returns a boolean indicating whether the text at the current position starts with the given `token`.
"""
return self.text.startswith(token, self.position)
def peek_regex(self, regex):
"""
Same as `peek`, but accepts a regex instead of a plain string token.
"""
regex = re.compile(regex)
return regex.match(self.text, self.position)
def end(self, checked=False):
return self.pop_regex(RE_END, checked=checked)
def flush(self, file_out):
"""
Writes to the given file object whatever was left unconsumed in our input data.
"""
file_out.write(self.text[self.position:]) | 0.683947 | 0.531574 |
import warnings
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import auc, confusion_matrix, matthews_corrcoef, roc_curve
from sklearn.preprocessing import binarize
from src.models.metrics_utils import (confusion_matrix_to_dataframe,
mcc_auc_score, mcc_curve)
@pytest.fixture
def expected_confusion_matrix_numpy():
expected = np.array([[0, 2, 2],
[1, 1, 2],
[1, 3, 4]], dtype='int64')
return expected
@pytest.fixture
def expected_confusion_matrix_default(expected_confusion_matrix_numpy):
expected = pd.DataFrame(
data=expected_confusion_matrix_numpy,
index=['Observed negative', 'Observed positive', 'Predicted total'],
columns=['Predicted negative', 'Predicted positive', 'Observed total'])
return expected
@pytest.fixture
def expected_confusion_matrix(expected_confusion_matrix_numpy):
expected = pd.DataFrame(
data=expected_confusion_matrix_numpy,
index=['Measured negative', 'Measured positive', 'Classified total'],
columns=['Classified negative', 'Classified positive', 'Measured total'])
return expected
@pytest.fixture
def y_true_y_score():
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
return y_true, y_score
@pytest.fixture
def expected_roc_curve(y_true_y_score):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = roc_curve(y_true, y_score)
return fpr, tpr, thresholds
@pytest.fixture
def expected_roc_curve_probability(expected_roc_curve):
fpr, tpr, thresholds = expected_roc_curve
thresholds[0] = 1.0
thresholds = np.append(thresholds, 0.0)
fpr = np.append(fpr, 1.0)
tpr = np.append(tpr, 1.0)
return fpr, tpr, thresholds
@pytest.fixture
def expected_mcc_curve(y_true_y_score, expected_roc_curve):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = expected_roc_curve
mcc = []
for threshold in thresholds:
y_pred = (y_score > threshold).astype('int64')
coef = matthews_corrcoef(y_true, y_pred)
mcc.append(coef)
tnr = 1.0 - fpr
return np.array(mcc), tnr, tpr, thresholds
@pytest.fixture
def expected_mcc_curve_probability(y_true_y_score, expected_roc_curve_probability):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = expected_roc_curve_probability
mcc = []
for threshold in thresholds:
y_pred = (y_score > threshold).astype('int64')
coef = matthews_corrcoef(y_true, y_pred)
mcc.append(coef)
tnr = 1.0 - fpr
return np.array(mcc), tnr, tpr, thresholds
def test_confusion_matrix_to_dataframe_default_values(
expected_confusion_matrix_default):
y_true = [0, 1, 0, 1]
y_pred = [1, 1, 1, 0]
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = confusion_matrix_to_dataframe(conf_matrix)
assert(conf_matrix_df.equals(expected_confusion_matrix_default))
def test_confusion_matrix_to_dataframe(
expected_confusion_matrix):
y_true = [0, 1, 0, 1]
y_pred = [1, 1, 1, 0]
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = confusion_matrix_to_dataframe(
conf_matrix, index=expected_confusion_matrix.index[0:2],
columns=expected_confusion_matrix.columns[0:2],
index_total_label='Measured total',
column_total_label='Classified total')
assert(conf_matrix_df.equals(expected_confusion_matrix))
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_curve(y_true_y_score, expected_mcc_curve):
y_true, y_score = y_true_y_score
expected_mcc, expected_tnr, expected_tpr, expected_thresholds = expected_mcc_curve
mcc, tnr, tpr, thresholds = mcc_curve(y_true, y_score, probability=False)
np.testing.assert_allclose(thresholds, expected_thresholds)
np.testing.assert_allclose(mcc, expected_mcc)
np.testing.assert_allclose(tnr, expected_tnr)
np.testing.assert_allclose(tpr, expected_tpr)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_curve_probability(
y_true_y_score, expected_mcc_curve_probability):
y_true, y_score = y_true_y_score
expected_mcc, expected_tnr, expected_tpr, expected_thresholds = expected_mcc_curve_probability
mcc, tnr, tpr, thresholds = mcc_curve(y_true, y_score, probability=True)
np.testing.assert_allclose(thresholds, expected_thresholds)
np.testing.assert_allclose(mcc, expected_mcc)
np.testing.assert_allclose(tnr, expected_tnr)
np.testing.assert_allclose(tpr, expected_tpr)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_auc_score(y_true_y_score, expected_mcc_curve):
y_true, y_score = y_true_y_score
mcc, _, _, thresholds = expected_mcc_curve
expected_mcc_auc = auc(thresholds, mcc)
mcc_auc = mcc_auc_score(
y_true, y_score, probability=False, normalize=False)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=False, normalize=True)
normalized_thresholds = (
(thresholds - np.min(thresholds)) / (np.max(thresholds) - np.min(thresholds)))
expected_mcc_auc = auc(normalized_thresholds, mcc)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_auc_score_probability(y_true_y_score, expected_mcc_curve_probability):
y_true, y_score = y_true_y_score
mcc, _, _, thresholds = expected_mcc_curve_probability
expected_mcc_auc = auc(thresholds, mcc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=True, normalize=False)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=True, normalize=True)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc) | tests/src/models/test_metrics_utils.py | import warnings
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import auc, confusion_matrix, matthews_corrcoef, roc_curve
from sklearn.preprocessing import binarize
from src.models.metrics_utils import (confusion_matrix_to_dataframe,
mcc_auc_score, mcc_curve)
@pytest.fixture
def expected_confusion_matrix_numpy():
expected = np.array([[0, 2, 2],
[1, 1, 2],
[1, 3, 4]], dtype='int64')
return expected
@pytest.fixture
def expected_confusion_matrix_default(expected_confusion_matrix_numpy):
expected = pd.DataFrame(
data=expected_confusion_matrix_numpy,
index=['Observed negative', 'Observed positive', 'Predicted total'],
columns=['Predicted negative', 'Predicted positive', 'Observed total'])
return expected
@pytest.fixture
def expected_confusion_matrix(expected_confusion_matrix_numpy):
expected = pd.DataFrame(
data=expected_confusion_matrix_numpy,
index=['Measured negative', 'Measured positive', 'Classified total'],
columns=['Classified negative', 'Classified positive', 'Measured total'])
return expected
@pytest.fixture
def y_true_y_score():
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
return y_true, y_score
@pytest.fixture
def expected_roc_curve(y_true_y_score):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = roc_curve(y_true, y_score)
return fpr, tpr, thresholds
@pytest.fixture
def expected_roc_curve_probability(expected_roc_curve):
fpr, tpr, thresholds = expected_roc_curve
thresholds[0] = 1.0
thresholds = np.append(thresholds, 0.0)
fpr = np.append(fpr, 1.0)
tpr = np.append(tpr, 1.0)
return fpr, tpr, thresholds
@pytest.fixture
def expected_mcc_curve(y_true_y_score, expected_roc_curve):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = expected_roc_curve
mcc = []
for threshold in thresholds:
y_pred = (y_score > threshold).astype('int64')
coef = matthews_corrcoef(y_true, y_pred)
mcc.append(coef)
tnr = 1.0 - fpr
return np.array(mcc), tnr, tpr, thresholds
@pytest.fixture
def expected_mcc_curve_probability(y_true_y_score, expected_roc_curve_probability):
y_true, y_score = y_true_y_score
fpr, tpr, thresholds = expected_roc_curve_probability
mcc = []
for threshold in thresholds:
y_pred = (y_score > threshold).astype('int64')
coef = matthews_corrcoef(y_true, y_pred)
mcc.append(coef)
tnr = 1.0 - fpr
return np.array(mcc), tnr, tpr, thresholds
def test_confusion_matrix_to_dataframe_default_values(
expected_confusion_matrix_default):
y_true = [0, 1, 0, 1]
y_pred = [1, 1, 1, 0]
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = confusion_matrix_to_dataframe(conf_matrix)
assert(conf_matrix_df.equals(expected_confusion_matrix_default))
def test_confusion_matrix_to_dataframe(
expected_confusion_matrix):
y_true = [0, 1, 0, 1]
y_pred = [1, 1, 1, 0]
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = confusion_matrix_to_dataframe(
conf_matrix, index=expected_confusion_matrix.index[0:2],
columns=expected_confusion_matrix.columns[0:2],
index_total_label='Measured total',
column_total_label='Classified total')
assert(conf_matrix_df.equals(expected_confusion_matrix))
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_curve(y_true_y_score, expected_mcc_curve):
y_true, y_score = y_true_y_score
expected_mcc, expected_tnr, expected_tpr, expected_thresholds = expected_mcc_curve
mcc, tnr, tpr, thresholds = mcc_curve(y_true, y_score, probability=False)
np.testing.assert_allclose(thresholds, expected_thresholds)
np.testing.assert_allclose(mcc, expected_mcc)
np.testing.assert_allclose(tnr, expected_tnr)
np.testing.assert_allclose(tpr, expected_tpr)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_curve_probability(
y_true_y_score, expected_mcc_curve_probability):
y_true, y_score = y_true_y_score
expected_mcc, expected_tnr, expected_tpr, expected_thresholds = expected_mcc_curve_probability
mcc, tnr, tpr, thresholds = mcc_curve(y_true, y_score, probability=True)
np.testing.assert_allclose(thresholds, expected_thresholds)
np.testing.assert_allclose(mcc, expected_mcc)
np.testing.assert_allclose(tnr, expected_tnr)
np.testing.assert_allclose(tpr, expected_tpr)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_auc_score(y_true_y_score, expected_mcc_curve):
y_true, y_score = y_true_y_score
mcc, _, _, thresholds = expected_mcc_curve
expected_mcc_auc = auc(thresholds, mcc)
mcc_auc = mcc_auc_score(
y_true, y_score, probability=False, normalize=False)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=False, normalize=True)
normalized_thresholds = (
(thresholds - np.min(thresholds)) / (np.max(thresholds) - np.min(thresholds)))
expected_mcc_auc = auc(normalized_thresholds, mcc)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_mcc_auc_score_probability(y_true_y_score, expected_mcc_curve_probability):
y_true, y_score = y_true_y_score
mcc, _, _, thresholds = expected_mcc_curve_probability
expected_mcc_auc = auc(thresholds, mcc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=True, normalize=False)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc)
mcc_auc = mcc_auc_score(y_true, y_score, probability=True, normalize=True)
np.testing.assert_allclose(mcc_auc, expected_mcc_auc) | 0.852537 | 0.616618 |
from genshibasic.genshi import Genshi
from genshibasic.lexer import Lexer
import unittest
class LexerTestSuite(unittest.TestCase):
def test_hello(self):
tokens = self.__lex('hello')
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0].pos, (1, 5))
self.assertEqual(tokens[0].kind, 3)
self.assertEqual(tokens[0].lexeme, 'HELLO')
def test_general(self):
actual = self.__lex('10 LET X=3+4 * 6/2')
expected = [
{'pos': (1, 2), 'kind': Genshi.TT_UINT, 'lexeme': '10'},
{'pos': (4, 6), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (8, 8), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'X'},
{'pos': (9, 9), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (10, 10), 'kind': Genshi.TT_UINT, 'lexeme': '3'},
{'pos': (11, 11), 'kind': Genshi.SYM_ADD, 'lexeme': '+'},
{'pos': (12, 12), 'kind': Genshi.TT_UINT, 'lexeme': '4'},
{'pos': (14, 14), 'kind': Genshi.SYM_MUL, 'lexeme': '*'},
{'pos': (16, 16), 'kind': Genshi.TT_UINT, 'lexeme': '6'},
{'pos': (17, 17), 'kind': Genshi.SYM_DIV, 'lexeme': '/'},
{'pos': (18, 18), 'kind': Genshi.TT_UINT, 'lexeme': '2'},
]
self.__match(actual, expected)
def test_string(self):
actual = self.__lex('5 PRINT "HELLO"; " WORLD"')
expected = [
{'pos': (1, 1), 'kind': Genshi.TT_UINT, 'lexeme': '5'},
{'pos': (3, 7), 'kind': Genshi.KW_PRINT, 'lexeme': 'PRINT'},
{'pos': (9, 15), 'kind': Genshi.TT_STRING, 'lexeme': 'HELLO'},
{'pos': (16, 16), 'kind': Genshi.SYM_SEMICOLON, 'lexeme': ';'},
{'pos': (18, 25), 'kind': Genshi.TT_STRING, 'lexeme': ' WORLD'},
]
self.__match(actual, expected)
def test_float(self):
actual = self.__lex('250 LET PI= 3.14')
expected = [
{'pos': (1, 3), 'kind': Genshi.TT_UINT, 'lexeme': '250'},
{'pos': (5, 7), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (9, 10), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'PI'},
{'pos': (11, 11), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (13, 16), 'kind': Genshi.TT_UFLOAT, 'lexeme': '3.14'},
]
self.__match(actual, expected)
def test_sym2(self):
actual = self.__lex('100 LET X = 4 <> 5')
expected = [
{'pos': (1, 3), 'kind': Genshi.TT_UINT, 'lexeme': '100'},
{'pos': (5, 7), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (9, 9), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'X'},
{'pos': (11, 11), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (13, 13), 'kind': Genshi.TT_UINT, 'lexeme': '4'},
{'pos': (15, 16), 'kind': Genshi.SYM_NE, 'lexeme': '<>'},
{'pos': (18, 18), 'kind': Genshi.TT_UINT, 'lexeme': '5'},
]
self.__match(actual, expected)
def __lex(self, src, debug_print=False):
tokens = Lexer().lex(src)
if debug_print:
for t in tokens:
print(t)
return tokens
def __match(self, actual, expected):
self.assertEqual(len(expected), len(actual))
for i in range(len(expected)):
self.assertEqual(actual[i].pos, expected[i]['pos'])
self.assertEqual(actual[i].kind, expected[i]['kind'])
self.assertEqual(actual[i].lexeme, expected[i]['lexeme'])
if __name__ == '__main__':
unittest.main() | test/test_lexer.py | from genshibasic.genshi import Genshi
from genshibasic.lexer import Lexer
import unittest
class LexerTestSuite(unittest.TestCase):
def test_hello(self):
tokens = self.__lex('hello')
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0].pos, (1, 5))
self.assertEqual(tokens[0].kind, 3)
self.assertEqual(tokens[0].lexeme, 'HELLO')
def test_general(self):
actual = self.__lex('10 LET X=3+4 * 6/2')
expected = [
{'pos': (1, 2), 'kind': Genshi.TT_UINT, 'lexeme': '10'},
{'pos': (4, 6), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (8, 8), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'X'},
{'pos': (9, 9), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (10, 10), 'kind': Genshi.TT_UINT, 'lexeme': '3'},
{'pos': (11, 11), 'kind': Genshi.SYM_ADD, 'lexeme': '+'},
{'pos': (12, 12), 'kind': Genshi.TT_UINT, 'lexeme': '4'},
{'pos': (14, 14), 'kind': Genshi.SYM_MUL, 'lexeme': '*'},
{'pos': (16, 16), 'kind': Genshi.TT_UINT, 'lexeme': '6'},
{'pos': (17, 17), 'kind': Genshi.SYM_DIV, 'lexeme': '/'},
{'pos': (18, 18), 'kind': Genshi.TT_UINT, 'lexeme': '2'},
]
self.__match(actual, expected)
def test_string(self):
actual = self.__lex('5 PRINT "HELLO"; " WORLD"')
expected = [
{'pos': (1, 1), 'kind': Genshi.TT_UINT, 'lexeme': '5'},
{'pos': (3, 7), 'kind': Genshi.KW_PRINT, 'lexeme': 'PRINT'},
{'pos': (9, 15), 'kind': Genshi.TT_STRING, 'lexeme': 'HELLO'},
{'pos': (16, 16), 'kind': Genshi.SYM_SEMICOLON, 'lexeme': ';'},
{'pos': (18, 25), 'kind': Genshi.TT_STRING, 'lexeme': ' WORLD'},
]
self.__match(actual, expected)
def test_float(self):
actual = self.__lex('250 LET PI= 3.14')
expected = [
{'pos': (1, 3), 'kind': Genshi.TT_UINT, 'lexeme': '250'},
{'pos': (5, 7), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (9, 10), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'PI'},
{'pos': (11, 11), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (13, 16), 'kind': Genshi.TT_UFLOAT, 'lexeme': '3.14'},
]
self.__match(actual, expected)
def test_sym2(self):
actual = self.__lex('100 LET X = 4 <> 5')
expected = [
{'pos': (1, 3), 'kind': Genshi.TT_UINT, 'lexeme': '100'},
{'pos': (5, 7), 'kind': Genshi.KW_LET, 'lexeme': 'LET'},
{'pos': (9, 9), 'kind': Genshi.TT_IDENTIFIER, 'lexeme': 'X'},
{'pos': (11, 11), 'kind': Genshi.SYM_EQ, 'lexeme': '='},
{'pos': (13, 13), 'kind': Genshi.TT_UINT, 'lexeme': '4'},
{'pos': (15, 16), 'kind': Genshi.SYM_NE, 'lexeme': '<>'},
{'pos': (18, 18), 'kind': Genshi.TT_UINT, 'lexeme': '5'},
]
self.__match(actual, expected)
def __lex(self, src, debug_print=False):
tokens = Lexer().lex(src)
if debug_print:
for t in tokens:
print(t)
return tokens
def __match(self, actual, expected):
self.assertEqual(len(expected), len(actual))
for i in range(len(expected)):
self.assertEqual(actual[i].pos, expected[i]['pos'])
self.assertEqual(actual[i].kind, expected[i]['kind'])
self.assertEqual(actual[i].lexeme, expected[i]['lexeme'])
if __name__ == '__main__':
unittest.main() | 0.555918 | 0.547646 |
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from argparse import ArgumentParser
import threading
import json
import logging as log
import commands
class ERROR_CODE:
PARSE_ERROR = -32700 # Invalid JSON was received by the server.
INVALID_REQ = -32600 # The JSON sent is not a valid Request object.
METHOD_NOT_FOUND = -32601 # The method does not exist / is not available.
INVALID_PARAMS = -32602 # Invalid method parameter(s).
INTERNAL_ERROR = -32603 # Internal JSON-RPC error.
class vnetLabRpcHandler(BaseHTTPRequestHandler):
"""Implementation of JSON-RPC API, defines all API handler methods."""
def _buildResponse(self, json_id, result=None, error=None):
"""Returns JSON 2.0 compliant response."""
res = {}
res['jsonrpc'] = '2.0'
# result and error are mutually exclusive
if result is not None:
res['result'] = result
elif error is not None:
res['error'] = error
res['id'] = json_id
return res
def _buildError(self, code, message, data=None):
"""Returns JSON RPC 2.0 error object."""
res = {}
res['code'] = code
res['message'] = message
if data:
res['data'] = data
return res
def do_POST(self):
"""Handle HTTP POST calls."""
def reply(response):
response = json.dumps(response) + '\n'
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
# Put JSON message in data dict
l = self.headers.get("Content-Length", "")
data = ''
if l == "":
data = self.rfile.read()
else:
data = self.rfile.read(int(l))
try:
data = json.loads(data)
except:
msg = "Error parsing JSON request"
log.error(msg)
err = self._buildError(ERROR_CODE.PARSE_ERROR, msg)
result = self._buildResponse(None, error=err)
# Check if JSONRPC 2.0 compliant (correct version and json_id given)
json_id = data.get('id', None)
# Setup method to call
try:
methodName = "_exec_" + data.get('method')
method = getattr(self, methodName)
log.info(methodName)
except:
msg = "Method not found"
log.info(msg)
err = self._buildError(ERROR_CODE.METHOD_NOT_FOUND, msg)
result = self._buildResponse(json_id, error=err)
# Get method parameters
params = data.get('params', {})
# Call method
result = method(json_id, params)
reply(result)
def _exec_cmd(self, json_id, params):
"""Handler for client requests."""
log.info("Receive cmd request")
cmd_str = params.get('cmd')
#status_output: (status, output)
status_output = commands.getstatusoutput(cmd_str)
response = self._buildResponse(json_id, result={ 'status': status_output[0], 'output': status_output[1] })
return response
class vnetLabRpcServer(HTTPServer):
def __init__(self, opts):
HTTPServer.__init__(self, (opts['host'], opts['port']), vnetLabRpcHandler)
class RpcServer(threading.Thread):
"""JSON RPC 2.0 Server."""
def __init__(self, opts):
threading.Thread.__init__(self)
self.httpd = vnetLabRpcServer(opts)
self.setDaemon(True)
# Multi-threaded webserver
def run(self):
"""Main function run by thread."""
log.info("JSON RPC server starting")
try:
self.httpd.serve_forever()
finally:
self.httpd.server_close()
if __name__ == '__main__':
parser = ArgumentParser(description="vnetLab rpc client.")
parser.add_argument('--host', default='localhost', help='vnetLab rpc client host (default="localhost")')
parser.add_argument('--port', default=12345, type=int, help='vnetLab rpc client port (default=12345)')
parser.add_argument('--loglevel', default='INFO', help='log level (default="INFO")')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args = parser.parse_args()
opts = vars(args)
log.basicConfig(format='%(asctime)s %(message)s', level=getattr(log, opts['loglevel'].upper()))
rpcserver = RpcServer(opts)
rpcserver.run() | sdntestbed_source/sdntestbed/python/novaconsole-master/rpcserver.py | from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from argparse import ArgumentParser
import threading
import json
import logging as log
import commands
class ERROR_CODE:
PARSE_ERROR = -32700 # Invalid JSON was received by the server.
INVALID_REQ = -32600 # The JSON sent is not a valid Request object.
METHOD_NOT_FOUND = -32601 # The method does not exist / is not available.
INVALID_PARAMS = -32602 # Invalid method parameter(s).
INTERNAL_ERROR = -32603 # Internal JSON-RPC error.
class vnetLabRpcHandler(BaseHTTPRequestHandler):
"""Implementation of JSON-RPC API, defines all API handler methods."""
def _buildResponse(self, json_id, result=None, error=None):
"""Returns JSON 2.0 compliant response."""
res = {}
res['jsonrpc'] = '2.0'
# result and error are mutually exclusive
if result is not None:
res['result'] = result
elif error is not None:
res['error'] = error
res['id'] = json_id
return res
def _buildError(self, code, message, data=None):
"""Returns JSON RPC 2.0 error object."""
res = {}
res['code'] = code
res['message'] = message
if data:
res['data'] = data
return res
def do_POST(self):
"""Handle HTTP POST calls."""
def reply(response):
response = json.dumps(response) + '\n'
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
# Put JSON message in data dict
l = self.headers.get("Content-Length", "")
data = ''
if l == "":
data = self.rfile.read()
else:
data = self.rfile.read(int(l))
try:
data = json.loads(data)
except:
msg = "Error parsing JSON request"
log.error(msg)
err = self._buildError(ERROR_CODE.PARSE_ERROR, msg)
result = self._buildResponse(None, error=err)
# Check if JSONRPC 2.0 compliant (correct version and json_id given)
json_id = data.get('id', None)
# Setup method to call
try:
methodName = "_exec_" + data.get('method')
method = getattr(self, methodName)
log.info(methodName)
except:
msg = "Method not found"
log.info(msg)
err = self._buildError(ERROR_CODE.METHOD_NOT_FOUND, msg)
result = self._buildResponse(json_id, error=err)
# Get method parameters
params = data.get('params', {})
# Call method
result = method(json_id, params)
reply(result)
def _exec_cmd(self, json_id, params):
"""Handler for client requests."""
log.info("Receive cmd request")
cmd_str = params.get('cmd')
#status_output: (status, output)
status_output = commands.getstatusoutput(cmd_str)
response = self._buildResponse(json_id, result={ 'status': status_output[0], 'output': status_output[1] })
return response
class vnetLabRpcServer(HTTPServer):
def __init__(self, opts):
HTTPServer.__init__(self, (opts['host'], opts['port']), vnetLabRpcHandler)
class RpcServer(threading.Thread):
"""JSON RPC 2.0 Server."""
def __init__(self, opts):
threading.Thread.__init__(self)
self.httpd = vnetLabRpcServer(opts)
self.setDaemon(True)
# Multi-threaded webserver
def run(self):
"""Main function run by thread."""
log.info("JSON RPC server starting")
try:
self.httpd.serve_forever()
finally:
self.httpd.server_close()
if __name__ == '__main__':
parser = ArgumentParser(description="vnetLab rpc client.")
parser.add_argument('--host', default='localhost', help='vnetLab rpc client host (default="localhost")')
parser.add_argument('--port', default=12345, type=int, help='vnetLab rpc client port (default=12345)')
parser.add_argument('--loglevel', default='INFO', help='log level (default="INFO")')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args = parser.parse_args()
opts = vars(args)
log.basicConfig(format='%(asctime)s %(message)s', level=getattr(log, opts['loglevel'].upper()))
rpcserver = RpcServer(opts)
rpcserver.run() | 0.605799 | 0.072276 |
import urllib2
import time
import random
from datetime import timedelta
from bs4 import BeautifulSoup
from google.appengine.api import urlfetch
from models.models import Match, Map, Server
def scrape_matches(pages=2):
""" gets match statistics from oc.tc/matches pages
last_page - the highest match page to scrape data from. don't go too high!
out_file - the name of the output data file
info - if True, it will print stuff every 10 pages to the console as it
runs so you know what the script is up to.
"""
base_url = "https://oc.tc/matches?page="
first_page = 10 # Lots of matches before page 10 are "in progress"
last_page = first_page + pages
for page in range(first_page,last_page):
url = base_url+str(page)
page = urlfetch.fetch(url,validate_certificate=False,
headers = {'User-Agent': 'Mozilla/5.0'})
html = page.content
soup = BeautifulSoup(html, "html.parser")
table = soup.findAll('table', {'class':'table table-bordered table-striped'})
table = table[0].contents[3].findAll('tr')
# Short GS and blitz / rage matches clog the database. Only add them sometimes.
if random.randint(1,10) < 3:
do_short = True
else:
do_short = False
for row in table:
match = Match()
when = row.contents[1].a.contents[0].strip().lower() # when match took place
# make sure match ended, and convert time ago to minutes
if not 'in progress' in when:
map_name = row.contents[5].contents[0].strip()
match.map_name = map_name
server_name = row.contents[7].a.contents[0].strip()
sn_l = server_name.lower()
# see if match server is a "short" one (gs, blitz, rage)
short_server = (sn_l[:2] == "gs") or ("cronus" in sn_l) or ("chaos" in sn_l) or ("rage" in sn_l)
if short_server and not do_short:
continue
match.server = server_name
match.kills = int(row.contents[11].contents[0].strip())
match.deaths = int(row.contents[9].contents[0].strip())
match.participants = int(row.contents[13].contents[0].strip())
# convert the total match time to seconds
t = row.contents[3].contents[0].strip()
t = t.split(':')
t = timedelta(minutes=int(t[0]),seconds=int(t[1]))
match.length = t.seconds
match.put()
# create map object if there isn't already one
mapp = Map.get_or_insert(map_name)
# create server object if there isn't already one
server = Server.get_or_insert(server_name)
time.sleep(0.1) | src/controllers/scraper.py | import urllib2
import time
import random
from datetime import timedelta
from bs4 import BeautifulSoup
from google.appengine.api import urlfetch
from models.models import Match, Map, Server
def scrape_matches(pages=2):
""" gets match statistics from oc.tc/matches pages
last_page - the highest match page to scrape data from. don't go too high!
out_file - the name of the output data file
info - if True, it will print stuff every 10 pages to the console as it
runs so you know what the script is up to.
"""
base_url = "https://oc.tc/matches?page="
first_page = 10 # Lots of matches before page 10 are "in progress"
last_page = first_page + pages
for page in range(first_page,last_page):
url = base_url+str(page)
page = urlfetch.fetch(url,validate_certificate=False,
headers = {'User-Agent': 'Mozilla/5.0'})
html = page.content
soup = BeautifulSoup(html, "html.parser")
table = soup.findAll('table', {'class':'table table-bordered table-striped'})
table = table[0].contents[3].findAll('tr')
# Short GS and blitz / rage matches clog the database. Only add them sometimes.
if random.randint(1,10) < 3:
do_short = True
else:
do_short = False
for row in table:
match = Match()
when = row.contents[1].a.contents[0].strip().lower() # when match took place
# make sure match ended, and convert time ago to minutes
if not 'in progress' in when:
map_name = row.contents[5].contents[0].strip()
match.map_name = map_name
server_name = row.contents[7].a.contents[0].strip()
sn_l = server_name.lower()
# see if match server is a "short" one (gs, blitz, rage)
short_server = (sn_l[:2] == "gs") or ("cronus" in sn_l) or ("chaos" in sn_l) or ("rage" in sn_l)
if short_server and not do_short:
continue
match.server = server_name
match.kills = int(row.contents[11].contents[0].strip())
match.deaths = int(row.contents[9].contents[0].strip())
match.participants = int(row.contents[13].contents[0].strip())
# convert the total match time to seconds
t = row.contents[3].contents[0].strip()
t = t.split(':')
t = timedelta(minutes=int(t[0]),seconds=int(t[1]))
match.length = t.seconds
match.put()
# create map object if there isn't already one
mapp = Map.get_or_insert(map_name)
# create server object if there isn't already one
server = Server.get_or_insert(server_name)
time.sleep(0.1) | 0.197677 | 0.162579 |
import sys
import aws_lambda_wsgi
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
sys.path.append('.')
from pangea import about, sampling_numbers, sampling_period # noqa: E402
from app import app # noqa: E402
# the style arguments for the sidebar. We use position:fixed and a fixed width
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#f8f9fa",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
CHECKBOX_STYLE = {
'fontSize': "14px"
}
sidebar = html.Div(
[
html.H2("Dashboard Demo", className="display-5"),
html.Hr(),
html.P("Navigation", className="lead"),
dbc.Nav(
[
dbc.NavLink("Home", href="/", id="home-link"),
dbc.NavLink("Sampling Numbers", href="/sampling-numbers", id="sampling-numbers-link"),
dbc.NavLink("Sampling Period", href="/sampling-period", id="sampling-period-link"),
dbc.NavLink("About", href="/about", id="about-link"),
],
vertical=True,
pills=True,
),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content", style=CONTENT_STYLE)
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
sidebar,
content
])
server = app.server
index_layout = html.Div([
html.H5('Welcome to the PANGEA Dashboard Demo!'),
html.Br(),
html.P('I hope you are having a lovely day :)')
])
@app.callback(
Output('page-content', 'children'),
[Input(component_id='url', component_property='pathname')]
)
def display_page(pathname):
pathname = pathname.replace('/Prod', '/') if pathname else pathname
if pathname == '/':
return index_layout
elif pathname == "/sampling-numbers":
return sampling_numbers.layout
elif pathname == "/sampling-period":
return sampling_period.layout
elif pathname == "/about":
return about.layout
return dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)
def lambda_handler(event, context):
return aws_lambda_wsgi.response(server, event, context) | index.py | import sys
import aws_lambda_wsgi
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
sys.path.append('.')
from pangea import about, sampling_numbers, sampling_period # noqa: E402
from app import app # noqa: E402
# the style arguments for the sidebar. We use position:fixed and a fixed width
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#f8f9fa",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
CHECKBOX_STYLE = {
'fontSize': "14px"
}
sidebar = html.Div(
[
html.H2("Dashboard Demo", className="display-5"),
html.Hr(),
html.P("Navigation", className="lead"),
dbc.Nav(
[
dbc.NavLink("Home", href="/", id="home-link"),
dbc.NavLink("Sampling Numbers", href="/sampling-numbers", id="sampling-numbers-link"),
dbc.NavLink("Sampling Period", href="/sampling-period", id="sampling-period-link"),
dbc.NavLink("About", href="/about", id="about-link"),
],
vertical=True,
pills=True,
),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content", style=CONTENT_STYLE)
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
sidebar,
content
])
server = app.server
index_layout = html.Div([
html.H5('Welcome to the PANGEA Dashboard Demo!'),
html.Br(),
html.P('I hope you are having a lovely day :)')
])
@app.callback(
Output('page-content', 'children'),
[Input(component_id='url', component_property='pathname')]
)
def display_page(pathname):
pathname = pathname.replace('/Prod', '/') if pathname else pathname
if pathname == '/':
return index_layout
elif pathname == "/sampling-numbers":
return sampling_numbers.layout
elif pathname == "/sampling-period":
return sampling_period.layout
elif pathname == "/about":
return about.layout
return dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)
def lambda_handler(event, context):
return aws_lambda_wsgi.response(server, event, context) | 0.381104 | 0.214609 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs import (
ModuleParameters, ModuleManager, ArgumentSpec, V1Manager, V2Manager
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5ModuleError
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
force=True,
include_chassis_level_config=True,
no_license=True,
no_platform_check=True,
passphrase="<PASSWORD>",
reset_trust=True,
state='installed'
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.force is True
assert p.include_chassis_level_config is True
assert p.no_license is True
assert p.no_platform_check is True
assert p.passphrase == "<PASSWORD>"
assert p.reset_trust is True
assert p.install_command == \
"tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs " \
"include-chassis-level-config no-license no-platform-check " \
"passphrase <PASSWORD> reset-trust"
def test_module_parameters_false_ucs_booleans(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
include_chassis_level_config=False,
no_license=False,
no_platform_check=False,
reset_trust=False
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.include_chassis_level_config is False
assert p.no_license is False
assert p.no_platform_check is False
assert p.reset_trust is False
assert p.install_command == "tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs"
class TestV1Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '12.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
class TestV2Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value) | venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_ucs.py |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs import (
ModuleParameters, ModuleManager, ArgumentSpec, V1Manager, V2Manager
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5ModuleError
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
force=True,
include_chassis_level_config=True,
no_license=True,
no_platform_check=True,
passphrase="<PASSWORD>",
reset_trust=True,
state='installed'
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.force is True
assert p.include_chassis_level_config is True
assert p.no_license is True
assert p.no_platform_check is True
assert p.passphrase == "<PASSWORD>"
assert p.reset_trust is True
assert p.install_command == \
"tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs " \
"include-chassis-level-config no-license no-platform-check " \
"passphrase <PASSWORD> reset-trust"
def test_module_parameters_false_ucs_booleans(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
include_chassis_level_config=False,
no_license=False,
no_platform_check=False,
reset_trust=False
)
p = ModuleParameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.include_chassis_level_config is False
assert p.no_license is False
assert p.no_platform_check is False
assert p.reset_trust is False
assert p.install_command == "tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs"
class TestV1Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '12.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
class TestV2Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_ucs.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='installed',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value) | 0.426919 | 0.279165 |
from enum import Enum
ROWS = 6
COLS = 7
class Color(Enum):
RED = 1
BLACK = 2
class Board:
def __init__(self):
self.grid = list()
for _ in range(COLS):
col = list()
for _ in range(ROWS):
col.append(None)
self.grid.append(col)
self.occupancy = [0] * COLS
class IllegalMove(Exception):
pass
def play_piece(board, played_column, played_color):
if board.occupancy[played_column] == 6:
raise IllegalMove("Illegal move in this column")
played_row = board.occupancy[played_column]
board.grid[played_column][played_row] = played_color
board.occupancy[played_column] += 1
# check vertical
consecutive = 0
if len(board.grid[played_column]) > 4:
for color in board.grid[played_column]:
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check horizontal
consecutive = 0
for i in range(COLS):
color = board.grid[i][played_row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check positive-slope diagonal
consecutive = 0
offset = min(played_column, played_row)
col = played_column - offset
row = played_row - offset
while col < COLS and row < ROWS:
color = board.grid[col][row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check negative-slope diagonal
consecutive = 0
col = played_column + offset
row = played_row - offset
while col > 0 and row < ROWS:
color = board.grid[col][row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
return False
def play_game():
board = Board()
print("New board initialized")
turn = 0
players = [Color.RED, Color.BLACK]
while True:
player = players[turn % (len(players))]
print("{}'s turn.".format(player))
col_num = int(input("Enter a column number: "))
try:
won = play_piece(board, col_num, player)
except IllegalMove as e:
print(e)
continue
if won:
print("{} wins".format(player))
break
turn += 1
play_game() | solutions/problem_219.py | from enum import Enum
ROWS = 6
COLS = 7
class Color(Enum):
RED = 1
BLACK = 2
class Board:
def __init__(self):
self.grid = list()
for _ in range(COLS):
col = list()
for _ in range(ROWS):
col.append(None)
self.grid.append(col)
self.occupancy = [0] * COLS
class IllegalMove(Exception):
pass
def play_piece(board, played_column, played_color):
if board.occupancy[played_column] == 6:
raise IllegalMove("Illegal move in this column")
played_row = board.occupancy[played_column]
board.grid[played_column][played_row] = played_color
board.occupancy[played_column] += 1
# check vertical
consecutive = 0
if len(board.grid[played_column]) > 4:
for color in board.grid[played_column]:
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check horizontal
consecutive = 0
for i in range(COLS):
color = board.grid[i][played_row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check positive-slope diagonal
consecutive = 0
offset = min(played_column, played_row)
col = played_column - offset
row = played_row - offset
while col < COLS and row < ROWS:
color = board.grid[col][row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
# check negative-slope diagonal
consecutive = 0
col = played_column + offset
row = played_row - offset
while col > 0 and row < ROWS:
color = board.grid[col][row]
if color == played_color:
consecutive += 1
else:
consecutive = 0
if consecutive == 4:
return True
return False
def play_game():
board = Board()
print("New board initialized")
turn = 0
players = [Color.RED, Color.BLACK]
while True:
player = players[turn % (len(players))]
print("{}'s turn.".format(player))
col_num = int(input("Enter a column number: "))
try:
won = play_piece(board, col_num, player)
except IllegalMove as e:
print(e)
continue
if won:
print("{} wins".format(player))
break
turn += 1
play_game() | 0.568176 | 0.235394 |
from docopt import docopt
import os
import yaml
import json
def main(args):
output_filename = args['--output']
input_path = args['--input']
input_paths = []
templates = []
templates_target_path = args['--target-templates-path']
templates = []
for root, directories, files in os.walk(input_path, topdown=False):
for name in directories:
input_paths.append(os.path.join(root, name))
for path in input_paths:
for root, directories, files in os.walk(path, topdown=False):
template = {}
keywords = []
for file in files:
ext = os.path.splitext(file)[-1].lower()
if ext == '.html' or ext == '.mustache':
template['template'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
elif ext == '.css':
template['css'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
elif ext == '.json':
template['config'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
with open(os.path.abspath(os.path.join(root, file))) as json_file:
data = json.load(json_file)
keywords = [str(r) for r in data.keys()]
elif file == 'meta.yml':
meta_yml_filename = os.path.abspath(
os.path.join(root, file))
with open(meta_yml_filename) as yml_file:
meta = yaml.load(yml_file, Loader=yaml.FullLoader)
for key, value in meta.items():
template[key] = value
template['keywords'] = keywords
templates.append(template)
with open(output_filename, 'w') as output:
yaml.dump(templates, output)
print('Done ' + output_filename)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1.0')
main(arguments) | scripts/gen_data_templates.py | from docopt import docopt
import os
import yaml
import json
def main(args):
output_filename = args['--output']
input_path = args['--input']
input_paths = []
templates = []
templates_target_path = args['--target-templates-path']
templates = []
for root, directories, files in os.walk(input_path, topdown=False):
for name in directories:
input_paths.append(os.path.join(root, name))
for path in input_paths:
for root, directories, files in os.walk(path, topdown=False):
template = {}
keywords = []
for file in files:
ext = os.path.splitext(file)[-1].lower()
if ext == '.html' or ext == '.mustache':
template['template'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
elif ext == '.css':
template['css'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
elif ext == '.json':
template['config'] = os.path.relpath(os.path.abspath(
os.path.join(root, file)), templates_target_path).replace('\\', '/')
with open(os.path.abspath(os.path.join(root, file))) as json_file:
data = json.load(json_file)
keywords = [str(r) for r in data.keys()]
elif file == 'meta.yml':
meta_yml_filename = os.path.abspath(
os.path.join(root, file))
with open(meta_yml_filename) as yml_file:
meta = yaml.load(yml_file, Loader=yaml.FullLoader)
for key, value in meta.items():
template[key] = value
template['keywords'] = keywords
templates.append(template)
with open(output_filename, 'w') as output:
yaml.dump(templates, output)
print('Done ' + output_filename)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1.0')
main(arguments) | 0.189071 | 0.078148 |
from lewis.adapters.stream import StreamInterface
from lewis.core.logging import has_log
from lewis.utils.command_builder import CmdBuilder
from lewis.utils.replies import conditional_reply
from .dfkps_base import CommonStreamInterface
import logging
__all__ = ["Danfysik9X00StreamInterface"]
@has_log
class Danfysik9X00StreamInterface(CommonStreamInterface, StreamInterface):
"""
Stream interface for a Danfysik model 9100.
"""
in_terminator = "\r"
out_terminator = "\n\r"
protocol = 'model9X00'
# This is the address of the LOQ danfysik 8500
PSU_ADDRESS = 75
commands = CommonStreamInterface.commands + [
CmdBuilder("set_current").escape("DA 0 ").int().eos().build(),
CmdBuilder("get_current").escape("AD 8").eos().build(),
CmdBuilder("set_address").escape("ADR ").int().eos().build(),
CmdBuilder("get_address").escape("ADR").eos().build(),
CmdBuilder("init_comms").escape("REM").eos().build(),
CmdBuilder("init_comms").escape("UNLOCK").eos().build(),
CmdBuilder("get_slew_rate").escape("R").arg(r"[1-3]", argument_mapping=int).eos().build(),
CmdBuilder("set_slew_rate").escape("W").arg(r"[1-3]", argument_mapping=int).spaces().int().eos().build()
]
@conditional_reply("device_available")
@conditional_reply("comms_initialized")
def get_status(self):
"""
Respond to the get_status command (S1)
"""
response = "{power_off}{pol_normal}{pol_reversed}{spare}{crowbar}{imode}{is_percent}{external_interlock_0}"\
"{spare}{sum_interlock}{over_voltage}{dc_overcurrent}{dc_undervoltage}{spare}" \
"{phase_fail}{spare}{earth_leak_fail}{fan}{mps_overtemperature}" \
"{external_interlock_1}{external_interlock_2}{external_interlock_3}{mps_not_ready}{spare}".format(
spare=self.bit(False),
power_off=self.bit(not self.device.power),
pol_normal=self.bit(not self.device.negative_polarity),
pol_reversed=self.bit(self.device.negative_polarity),
crowbar=self.bit(False),
imode=self.bit(False),
is_percent=self.bit(False),
external_interlock_0=self.interlock("external_interlock_0"),
sum_interlock=self.bit(len(self.device.active_interlocks) > 0),
dc_overcurrent=self.interlock("dc_overcurrent"),
over_voltage=self.interlock("over_voltage"),
dc_undervoltage=self.interlock("dc_undervoltage"),
phase_fail=self.interlock("phase_fail"),
earth_leak_fail=self.interlock("earth_leak_fail"),
fan=self.interlock("fan"),
mps_overtemperature=self.interlock("mps_overtemperature"),
external_interlock_1=self.interlock("external_interlock_1"),
external_interlock_2=self.interlock("external_interlock_2"),
external_interlock_3=self.interlock("external_interlock_3"),
mps_not_ready=self.bit(not self.device.power),
)
assert len(response) == 24, "length should have been 24 but was {}".format(len(response))
return response
def set_address(self, value):
self.device.set_address(value)
@conditional_reply("comms_initialized")
def get_address(self):
return "{:03d}".format(self.address)
@conditional_reply("comms_initialized")
def get_slew_rate(self, dac_num):
return self.device.get_slew_rate(dac_num)
@conditional_reply("comms_initialized")
def set_slew_rate(self, dac_num, slew_rate_value):
self.device.set_slew_rate(dac_num, slew_rate_value) | lewis_emulators/danfysik/interfaces/dfkps_9X00.py | from lewis.adapters.stream import StreamInterface
from lewis.core.logging import has_log
from lewis.utils.command_builder import CmdBuilder
from lewis.utils.replies import conditional_reply
from .dfkps_base import CommonStreamInterface
import logging
__all__ = ["Danfysik9X00StreamInterface"]
@has_log
class Danfysik9X00StreamInterface(CommonStreamInterface, StreamInterface):
"""
Stream interface for a Danfysik model 9100.
"""
in_terminator = "\r"
out_terminator = "\n\r"
protocol = 'model9X00'
# This is the address of the LOQ danfysik 8500
PSU_ADDRESS = 75
commands = CommonStreamInterface.commands + [
CmdBuilder("set_current").escape("DA 0 ").int().eos().build(),
CmdBuilder("get_current").escape("AD 8").eos().build(),
CmdBuilder("set_address").escape("ADR ").int().eos().build(),
CmdBuilder("get_address").escape("ADR").eos().build(),
CmdBuilder("init_comms").escape("REM").eos().build(),
CmdBuilder("init_comms").escape("UNLOCK").eos().build(),
CmdBuilder("get_slew_rate").escape("R").arg(r"[1-3]", argument_mapping=int).eos().build(),
CmdBuilder("set_slew_rate").escape("W").arg(r"[1-3]", argument_mapping=int).spaces().int().eos().build()
]
@conditional_reply("device_available")
@conditional_reply("comms_initialized")
def get_status(self):
"""
Respond to the get_status command (S1)
"""
response = "{power_off}{pol_normal}{pol_reversed}{spare}{crowbar}{imode}{is_percent}{external_interlock_0}"\
"{spare}{sum_interlock}{over_voltage}{dc_overcurrent}{dc_undervoltage}{spare}" \
"{phase_fail}{spare}{earth_leak_fail}{fan}{mps_overtemperature}" \
"{external_interlock_1}{external_interlock_2}{external_interlock_3}{mps_not_ready}{spare}".format(
spare=self.bit(False),
power_off=self.bit(not self.device.power),
pol_normal=self.bit(not self.device.negative_polarity),
pol_reversed=self.bit(self.device.negative_polarity),
crowbar=self.bit(False),
imode=self.bit(False),
is_percent=self.bit(False),
external_interlock_0=self.interlock("external_interlock_0"),
sum_interlock=self.bit(len(self.device.active_interlocks) > 0),
dc_overcurrent=self.interlock("dc_overcurrent"),
over_voltage=self.interlock("over_voltage"),
dc_undervoltage=self.interlock("dc_undervoltage"),
phase_fail=self.interlock("phase_fail"),
earth_leak_fail=self.interlock("earth_leak_fail"),
fan=self.interlock("fan"),
mps_overtemperature=self.interlock("mps_overtemperature"),
external_interlock_1=self.interlock("external_interlock_1"),
external_interlock_2=self.interlock("external_interlock_2"),
external_interlock_3=self.interlock("external_interlock_3"),
mps_not_ready=self.bit(not self.device.power),
)
assert len(response) == 24, "length should have been 24 but was {}".format(len(response))
return response
def set_address(self, value):
self.device.set_address(value)
@conditional_reply("comms_initialized")
def get_address(self):
return "{:03d}".format(self.address)
@conditional_reply("comms_initialized")
def get_slew_rate(self, dac_num):
return self.device.get_slew_rate(dac_num)
@conditional_reply("comms_initialized")
def set_slew_rate(self, dac_num, slew_rate_value):
self.device.set_slew_rate(dac_num, slew_rate_value) | 0.685002 | 0.144511 |
from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg,
base_scalar, base_win, base_agg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
# Custom dispatching in call trees ============================================
class PostgresqlColumn(SqlColumn): pass
class PostgresqlColumnAgg(SqlColumnAgg, PostgresqlColumn): pass
# Custom translations =========================================================
def returns_float(ns, func_names):
return {k: wrap_annotate(ns[k], result_type = "float") for k in func_names}
def sql_log(col, base = None):
if base is None:
return sql.func.ln(col)
return sql.func.log(col)
@annotate(result_type = "float")
def sql_round(col, n):
return sql.func.round(col, n)
def sql_func_contains(col, pat, case = True, flags = 0, na = None, regex = True):
# TODO: warn there differences in regex for python and sql?
# TODO: validate pat is string?
if not isinstance(pat, str):
raise TypeError("pat argument must be a string")
if flags != 0 or na is not None:
raise NotImplementedError("flags and na options not supported")
if not regex:
case_col = col if case else col.lower()
return case_col.contains(pat, autoescape = True)
full_op = "~" if case else "~*"
return col.op(full_op)(pat)
def sql_func_truediv(x, y):
return sql.cast(x, sa_types.Float()) / y
scalar = extend_base(
base_scalar,
# TODO: remove log, not a pandas method
log = sql_log,
# TODO: bring up to date (not pandas methods)
concat = lambda col: sql.func.concat(col),
cat = lambda col: sql.func.concat(col),
str_c = lambda col: sql.func.concat(col),
# infix and infix methods ----
div = sql_func_truediv,
divide = sql_func_truediv,
rdiv = lambda x,y: sql_func_truediv(y, x),
__truediv__ = sql_func_truediv,
truediv = sql_func_truediv,
__rtruediv__ = lambda x, y: sql_func_truediv(y, x),
round = sql_round,
__round__ = sql_round,
**{
"str.contains": sql_func_contains,
},
**returns_float(base_scalar, [
"dt.day", "dt.dayofweek", "dt.dayofyear", "dt.days_in_month",
"dt.daysinmonth", "dt.hour", "dt.minute", "dt.month",
"dt.quarter", "dt.second", "dt.week", "dt.weekday",
"dt.weekofyear", "dt.year"
]),
)
window = extend_base(
base_win,
any = annotate(win_agg("bool_or"), input_type = "bool"),
all = annotate(win_agg("bool_and"), input_type = "bool"),
lag = win_agg("lag"),
std = win_agg("stddev_samp"),
var = win_agg("var_samp"),
# overrides ----
# note that postgres does sum(bigint) -> numeric
size = win_agg("count"), #TODO double check
)
aggregate = extend_base(
base_agg,
all = sql_agg("bool_and"),
any = sql_agg("bool_or"),
std = sql_agg("stddev_samp"),
var = sql_agg("var_samp"),
)
funcs = dict(scalar = scalar, aggregate = aggregate, window = window)
# translate(config, CallTreeLocal, PostgresqlColumn, _.a + _.b)
translator = SqlTranslator.from_mappings(
scalar, window, aggregate,
PostgresqlColumn, PostgresqlColumnAgg
) | siuba/sql/dialects/postgresql.py | from ..translate import (
win_agg, win_over, win_cumul, sql_scalar, sql_agg,
RankOver,
wrap_annotate, annotate,
extend_base,
SqlTranslator,
)
from .base import (
SqlColumn, SqlColumnAgg,
base_scalar, base_win, base_agg
)
import sqlalchemy.sql.sqltypes as sa_types
from sqlalchemy import sql
# Custom dispatching in call trees ============================================
class PostgresqlColumn(SqlColumn): pass
class PostgresqlColumnAgg(SqlColumnAgg, PostgresqlColumn): pass
# Custom translations =========================================================
def returns_float(ns, func_names):
return {k: wrap_annotate(ns[k], result_type = "float") for k in func_names}
def sql_log(col, base = None):
if base is None:
return sql.func.ln(col)
return sql.func.log(col)
@annotate(result_type = "float")
def sql_round(col, n):
return sql.func.round(col, n)
def sql_func_contains(col, pat, case = True, flags = 0, na = None, regex = True):
# TODO: warn there differences in regex for python and sql?
# TODO: validate pat is string?
if not isinstance(pat, str):
raise TypeError("pat argument must be a string")
if flags != 0 or na is not None:
raise NotImplementedError("flags and na options not supported")
if not regex:
case_col = col if case else col.lower()
return case_col.contains(pat, autoescape = True)
full_op = "~" if case else "~*"
return col.op(full_op)(pat)
def sql_func_truediv(x, y):
return sql.cast(x, sa_types.Float()) / y
scalar = extend_base(
base_scalar,
# TODO: remove log, not a pandas method
log = sql_log,
# TODO: bring up to date (not pandas methods)
concat = lambda col: sql.func.concat(col),
cat = lambda col: sql.func.concat(col),
str_c = lambda col: sql.func.concat(col),
# infix and infix methods ----
div = sql_func_truediv,
divide = sql_func_truediv,
rdiv = lambda x,y: sql_func_truediv(y, x),
__truediv__ = sql_func_truediv,
truediv = sql_func_truediv,
__rtruediv__ = lambda x, y: sql_func_truediv(y, x),
round = sql_round,
__round__ = sql_round,
**{
"str.contains": sql_func_contains,
},
**returns_float(base_scalar, [
"dt.day", "dt.dayofweek", "dt.dayofyear", "dt.days_in_month",
"dt.daysinmonth", "dt.hour", "dt.minute", "dt.month",
"dt.quarter", "dt.second", "dt.week", "dt.weekday",
"dt.weekofyear", "dt.year"
]),
)
window = extend_base(
base_win,
any = annotate(win_agg("bool_or"), input_type = "bool"),
all = annotate(win_agg("bool_and"), input_type = "bool"),
lag = win_agg("lag"),
std = win_agg("stddev_samp"),
var = win_agg("var_samp"),
# overrides ----
# note that postgres does sum(bigint) -> numeric
size = win_agg("count"), #TODO double check
)
aggregate = extend_base(
base_agg,
all = sql_agg("bool_and"),
any = sql_agg("bool_or"),
std = sql_agg("stddev_samp"),
var = sql_agg("var_samp"),
)
funcs = dict(scalar = scalar, aggregate = aggregate, window = window)
# translate(config, CallTreeLocal, PostgresqlColumn, _.a + _.b)
translator = SqlTranslator.from_mappings(
scalar, window, aggregate,
PostgresqlColumn, PostgresqlColumnAgg
) | 0.279238 | 0.202187 |
import asyncio
from typing import Optional
from main import os
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.utils import exceptions, executor
from models import User
import database
from loguru import logger as log
from celery import Celery
celery_app = Celery('tasks', broker=os.environ.get('AMQP_URL'), backend=os.environ.get('DATABASE_URL'))
@celery_app.task()
def ping():
log.info('Celery task triggered')
return 'pong'
async def send_message(user_id: int,
text: str,
buttons: Optional[list[dict[str, str]]] = None,
disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].
A button can have all the same keys that InlineKeyboardButton() take
:param disable_notification:
:return:
"""
from main import bot
try:
await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(
row_width=2,
resize_keyboard=True,
one_time_keyboard=True, ).add(
*[InlineKeyboardButton(**button) for button in buttons])
if buttons else None,
disable_notification=disable_notification)
log.info(f"Sent message to target [ID:{user_id}]")
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text, buttons) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return True
return False
async def broadcaster(text: str,
buttons: Optional[list[dict[str, str]]] = None) -> int:
"""
Simple broadcaster
:return: Count of messages
"""
# Init Tortoise database first
await database.init()
count = 0
try:
async for user in User.all():
if await send_message(user.pk, text, buttons):
log.info(f'Sent a message to user [ID:{user.pk}] [USERNAME:{user.name}]')
count += 1
await asyncio.sleep(.05) # 20 messages per second (Limit: 30 messages per second)
finally:
log.info(f"{count} messages successful sent.")
return count
@celery_app.task()
def broadcast_message(text: str,
buttons: Optional[list[dict[str, str]]] = None, *args):
"""
Celery task used to broadcast new messages to users
:param text: Text to be sent #TODO: [11/13/2020 by Mykola] Add formatting, such as HTML or Markdown
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}]
:return:
"""
from main import dp
executor.start(dp, broadcaster(text, buttons)) | tasks.py | import asyncio
from typing import Optional
from main import os
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.utils import exceptions, executor
from models import User
import database
from loguru import logger as log
from celery import Celery
celery_app = Celery('tasks', broker=os.environ.get('AMQP_URL'), backend=os.environ.get('DATABASE_URL'))
@celery_app.task()
def ping():
log.info('Celery task triggered')
return 'pong'
async def send_message(user_id: int,
text: str,
buttons: Optional[list[dict[str, str]]] = None,
disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].
A button can have all the same keys that InlineKeyboardButton() take
:param disable_notification:
:return:
"""
from main import bot
try:
await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(
row_width=2,
resize_keyboard=True,
one_time_keyboard=True, ).add(
*[InlineKeyboardButton(**button) for button in buttons])
if buttons else None,
disable_notification=disable_notification)
log.info(f"Sent message to target [ID:{user_id}]")
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text, buttons) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return True
return False
async def broadcaster(text: str,
buttons: Optional[list[dict[str, str]]] = None) -> int:
"""
Simple broadcaster
:return: Count of messages
"""
# Init Tortoise database first
await database.init()
count = 0
try:
async for user in User.all():
if await send_message(user.pk, text, buttons):
log.info(f'Sent a message to user [ID:{user.pk}] [USERNAME:{user.name}]')
count += 1
await asyncio.sleep(.05) # 20 messages per second (Limit: 30 messages per second)
finally:
log.info(f"{count} messages successful sent.")
return count
@celery_app.task()
def broadcast_message(text: str,
buttons: Optional[list[dict[str, str]]] = None, *args):
"""
Celery task used to broadcast new messages to users
:param text: Text to be sent #TODO: [11/13/2020 by Mykola] Add formatting, such as HTML or Markdown
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}]
:return:
"""
from main import dp
executor.start(dp, broadcaster(text, buttons)) | 0.668015 | 0.112065 |
import copy
import collections
from werkzeug.exceptions import Forbidden
from sqlalchemy import and_
from ggrc import db
from ggrc import models
from ggrc.utils import benchmark
from ggrc.rbac import permissions
from ggrc.query.default_handler import DefaultHandler
def _set_data(object_query, data):
"""Helper function for setting basic data in object_query"""
object_query["count"] = len(data)
object_query["total"] = len(data)
object_query["last_modified"] = None
object_query["values"] = data
return object_query
# pylint: disable=too-few-public-methods
class AssessmentRelatedObjects(DefaultHandler):
"""Handler for assessment filter on my assessments page.
Query filters with single relevant person and assessment statuses.
"""
@classmethod
def match(cls, query):
"""Check if the given query matches current handler."""
if len(query) != 6:
return False
query = copy.deepcopy(query)
assessment_ids = query[0]["filters"]["expression"]["ids"]
if not isinstance(assessment_ids, list) or len(assessment_ids) != 1:
return False
expected = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None}
},
"fields":[]
}, {
"object_name": "Comment",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None
}
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "EVIDENCE"
}
},
"keys": [None]
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "URL"
}
},
"keys": [None]
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "REFERENCE_URL"
}
},
"keys": [None]
},
"fields":[],
"order_by":[{"name": "created_at", "desc": True}]
}, {
"object_name": "Audit",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None}
},
"limit":[0, 1],
"fields":["id", "type", "title", "context"]
}]
return query == expected
def _assessment(self):
"""Get the assessment used in the query and verify its permissions."""
assessment_id = self.query[0]["filters"]["expression"]["ids"][0]
assessment = models.Assessment.query.get(assessment_id)
if permissions.is_allowed_read_for(assessment):
return assessment
raise Forbidden()
def set_audit_result(self, assessment):
"""Set audit result"""
object_query = self.query[5]
data = db.session.query(
models.Audit.id,
models.Audit.title,
models.Audit.context_id,
).filter(
models.Audit.id == assessment.audit_id
).first()
with benchmark("Get audit data"):
object_query["count"] = 1
object_query["total"] = 1
object_query["last_modified"] = None
object_query["values"] = [{
"id": data.id,
"title": data.title,
"type": models.Audit.__name__,
"context": {
"context_id": None,
"href": "/api/contexts/{}".format(data.context_id),
"id": data.context_id,
"type": "Context",
},
}]
def set_snapshot_result(self, assessment):
"""Set snapshot result"""
query = self.query[0]
with benchmark("Get assessment snapshot relationships"):
snapshots = db.session.query(
models.Snapshot
).join(
models.Relationship,
and_(
models.Snapshot.id == models.Relationship.source_id,
models.Relationship.source_type == "Snapshot",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Snapshot
).join(
models.Relationship,
and_(
models.Snapshot.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Snapshot",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
data = []
for snapshot in snapshots:
data.append({
"archived": snapshot.archived,
"revision": snapshot.revision.log_json(),
"related_sources": [],
"parent": {
"context_id": assessment.context_id,
"href": "/api/audits/{}".format(assessment.audit_id),
"type": "Audit",
"id": assessment.audit_id,
},
"child_type": snapshot.child_type,
"child_id": snapshot.child_id,
"related_destinations": [],
"id": snapshot.id,
"revisions": [],
"revision_id": snapshot.revision_id,
"type": snapshot.type,
})
_set_data(query, data)
def set_comment_result(self, assessment):
"""Set comment result"""
query = self.query[1]
self.query[1]["last_modified"] = None
with benchmark("Get assessment snapshot relationships"):
comments = db.session.query(
models.Comment
).join(
models.Relationship,
and_(
models.Comment.id == models.Relationship.source_id,
models.Relationship.source_type == "Comment",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Comment
).join(
models.Relationship,
and_(
models.Comment.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Comment",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
data = []
sorted_data = []
for comment in comments:
data.append(comment.log_json())
sorted_data = sorted(data,
key=lambda x: (x["created_at"], x["id"]),
reverse=True)
_set_data(query, sorted_data)
def set_document_result(self, assessment):
"""Set document result"""
data_map = collections.defaultdict(list)
query_map = {
models.Document.ATTACHMENT: self.query[2],
models.Document.URL: self.query[3],
models.Document.REFERENCE_URL: self.query[4],
}
self.query[1]["last_modified"] = None
with benchmark("Get assessment snapshot relationships"):
documents = db.session.query(
models.Document
).join(
models.Relationship,
and_(
models.Document.id == models.Relationship.source_id,
models.Relationship.source_type == "Document",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Document
).join(
models.Relationship,
and_(
models.Document.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Document",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
for document in documents:
data_map[document.document_type].append(document.log_json())
for document_type, query in query_map.items():
_set_data(query, data_map[document_type])
def get_results(self):
"""Filter the objects and get their information.
Updates self.query items with their results. The type of results required
is read from "type" parameter of every object_query in self.query.
Returns:
list of dicts: same query as the input with requested results that match
the filter.
"""
assessment = self._assessment()
self.set_snapshot_result(assessment)
self.set_comment_result(assessment)
self.set_document_result(assessment)
self.set_audit_result(assessment)
return self.query | src/ggrc/query/assessment_related_objects.py | import copy
import collections
from werkzeug.exceptions import Forbidden
from sqlalchemy import and_
from ggrc import db
from ggrc import models
from ggrc.utils import benchmark
from ggrc.rbac import permissions
from ggrc.query.default_handler import DefaultHandler
def _set_data(object_query, data):
"""Helper function for setting basic data in object_query"""
object_query["count"] = len(data)
object_query["total"] = len(data)
object_query["last_modified"] = None
object_query["values"] = data
return object_query
# pylint: disable=too-few-public-methods
class AssessmentRelatedObjects(DefaultHandler):
"""Handler for assessment filter on my assessments page.
Query filters with single relevant person and assessment statuses.
"""
@classmethod
def match(cls, query):
"""Check if the given query matches current handler."""
if len(query) != 6:
return False
query = copy.deepcopy(query)
assessment_ids = query[0]["filters"]["expression"]["ids"]
if not isinstance(assessment_ids, list) or len(assessment_ids) != 1:
return False
expected = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None}
},
"fields":[]
}, {
"object_name": "Comment",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None
}
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "EVIDENCE"
}
},
"keys": [None]
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "URL"
}
},
"keys": [None]
},
"order_by":[{"name": "created_at", "desc": True}],
"fields": []
}, {
"object_name": "Document",
"filters": {
"expression": {
"left": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"op": {"name": "AND"},
"right": {
"left": "document_type",
"op": {"name": "="},
"right": "REFERENCE_URL"
}
},
"keys": [None]
},
"fields":[],
"order_by":[{"name": "created_at", "desc": True}]
}, {
"object_name": "Audit",
"filters": {
"expression": {
"object_name": "Assessment",
"op": {"name": "relevant"},
"ids": assessment_ids
},
"keys": [],
"order_by":{"keys": [], "order":"", "compare":None}
},
"limit":[0, 1],
"fields":["id", "type", "title", "context"]
}]
return query == expected
def _assessment(self):
"""Get the assessment used in the query and verify its permissions."""
assessment_id = self.query[0]["filters"]["expression"]["ids"][0]
assessment = models.Assessment.query.get(assessment_id)
if permissions.is_allowed_read_for(assessment):
return assessment
raise Forbidden()
def set_audit_result(self, assessment):
"""Set audit result"""
object_query = self.query[5]
data = db.session.query(
models.Audit.id,
models.Audit.title,
models.Audit.context_id,
).filter(
models.Audit.id == assessment.audit_id
).first()
with benchmark("Get audit data"):
object_query["count"] = 1
object_query["total"] = 1
object_query["last_modified"] = None
object_query["values"] = [{
"id": data.id,
"title": data.title,
"type": models.Audit.__name__,
"context": {
"context_id": None,
"href": "/api/contexts/{}".format(data.context_id),
"id": data.context_id,
"type": "Context",
},
}]
def set_snapshot_result(self, assessment):
"""Set snapshot result"""
query = self.query[0]
with benchmark("Get assessment snapshot relationships"):
snapshots = db.session.query(
models.Snapshot
).join(
models.Relationship,
and_(
models.Snapshot.id == models.Relationship.source_id,
models.Relationship.source_type == "Snapshot",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Snapshot
).join(
models.Relationship,
and_(
models.Snapshot.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Snapshot",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
data = []
for snapshot in snapshots:
data.append({
"archived": snapshot.archived,
"revision": snapshot.revision.log_json(),
"related_sources": [],
"parent": {
"context_id": assessment.context_id,
"href": "/api/audits/{}".format(assessment.audit_id),
"type": "Audit",
"id": assessment.audit_id,
},
"child_type": snapshot.child_type,
"child_id": snapshot.child_id,
"related_destinations": [],
"id": snapshot.id,
"revisions": [],
"revision_id": snapshot.revision_id,
"type": snapshot.type,
})
_set_data(query, data)
def set_comment_result(self, assessment):
"""Set comment result"""
query = self.query[1]
self.query[1]["last_modified"] = None
with benchmark("Get assessment snapshot relationships"):
comments = db.session.query(
models.Comment
).join(
models.Relationship,
and_(
models.Comment.id == models.Relationship.source_id,
models.Relationship.source_type == "Comment",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Comment
).join(
models.Relationship,
and_(
models.Comment.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Comment",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
data = []
sorted_data = []
for comment in comments:
data.append(comment.log_json())
sorted_data = sorted(data,
key=lambda x: (x["created_at"], x["id"]),
reverse=True)
_set_data(query, sorted_data)
def set_document_result(self, assessment):
"""Set document result"""
data_map = collections.defaultdict(list)
query_map = {
models.Document.ATTACHMENT: self.query[2],
models.Document.URL: self.query[3],
models.Document.REFERENCE_URL: self.query[4],
}
self.query[1]["last_modified"] = None
with benchmark("Get assessment snapshot relationships"):
documents = db.session.query(
models.Document
).join(
models.Relationship,
and_(
models.Document.id == models.Relationship.source_id,
models.Relationship.source_type == "Document",
models.Relationship.destination_id == assessment.id,
models.Relationship.destination_type == "Assessment"
)
).union(
db.session.query(
models.Document
).join(
models.Relationship,
and_(
models.Document.id == models.Relationship.destination_id,
models.Relationship.destination_type == "Document",
models.Relationship.source_id == assessment.id,
models.Relationship.source_type == "Assessment"
)
)
).all()
with benchmark("Set assessment snapshot relationships"):
for document in documents:
data_map[document.document_type].append(document.log_json())
for document_type, query in query_map.items():
_set_data(query, data_map[document_type])
def get_results(self):
"""Filter the objects and get their information.
Updates self.query items with their results. The type of results required
is read from "type" parameter of every object_query in self.query.
Returns:
list of dicts: same query as the input with requested results that match
the filter.
"""
assessment = self._assessment()
self.set_snapshot_result(assessment)
self.set_comment_result(assessment)
self.set_document_result(assessment)
self.set_audit_result(assessment)
return self.query | 0.621656 | 0.335623 |
import networkx as nx
import matplotlib.pyplot as plt
from autoparse.automaton import preprocess, Automaton
class Transition:
def __init__(
self,
word: str,
state_in,
state_out,
transition_ids=[],
weight: int = 1,
variables={},
):
self.word = word
self.state_in = state_in
self.state_out = state_out
self.weight = weight
self.variables = variables
self.transitions_ids = set(transition_ids)
self.tid = next(iter(self.transitions_ids))
self.p = {}
def make_generic(self):
generic = "*"
best_count = 0
for var, count in self.variables.items():
if count > best_count:
generic = "<$" + var + ">"
best_count = count
self.word = generic
return generic
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.word == other.word
and self.state_in == other.state_in
and self.state_out == other.state_out
)
def __hash__(self):
return hash(str(self.state_in) + str(self.state_out))
def __repr__(self):
return " {:6d} --{:^20}--> {:6d} ".format(
self.state_in.id, self.word, self.state_out.id
)
class TransitionSet:
"""A set implementation that add weights when adding a transition multiple times"""
def __init__(self):
self._dict = {}
def __contains__(self, item):
return item in self._dict
def __iter__(self):
return self._dict.keys().__iter__()
def __len__(self):
return len(self._dict)
def __repr__(self):
return self._dict.__repr__()
def _add(self, item):
"""Do not cumulate weight"""
self._dict[item] = item
def add(self, item):
if not item in self._dict:
self._dict[item] = item
else:
transition = self._dict[item]
transition.weight += item.weight
transition.transitions_ids |= item.transitions_ids
for var in item.variables:
if not var in transition.variables:
transition.variables[var] = 0
transition.variables[var] += item.variables[var]
def remove(self, item):
if item in self._dict:
del self._dict[item]
class State:
def __init__(self, node_id: int, word: str):
self.id = node_id
self.transitions_in = TransitionSet()
self.transitions_out = TransitionSet()
self.word = word
@property
def weight(self):
total_weight = 0
for t in self.transitions_in:
total_weight += t.weight
return total_weight
@property
def child(self):
for t in self.transitions_out:
yield t.state_out
@property
def parents(self):
for t in self.transitions_in:
yield t.state_in
def merge_on(self, state):
transitions_to_delete = []
for t in self.transitions_in:
new_transition = Transition(
t.word,
t.state_in,
state,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_in(new_transition)
transitions_to_delete.append(t)
for t in self.transitions_out:
new_transition = Transition(
t.word,
state,
t.state_out,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_out(new_transition)
transitions_to_delete.append(t)
for t in transitions_to_delete:
t.state_out.remove_transition_in(t)
def generify(self, limit_weight):
if self.weight <= limit_weight:
self.word = "*"
for t in self.transitions_in:
generic = t.make_generic()
if generic != "*":
self.word = generic
def get_generic_ancestors(self):
"""return the last ancestors connected by generics transion and drop those transitions"""
if self.id == 0 or not self.word == "*":
return [self], []
else:
ancestors = []
intermediary_states = [self]
for transition in self.transitions_in:
new_ancestors, new_intermediary_states = (
transition.state_in.get_generic_ancestors()
)
ancestors += new_ancestors
intermediary_states += new_intermediary_states
return ancestors, intermediary_states
def merge_generic_parents(self):
if self.id == 0 or not self.word == "*":
return
ancestors, intermediary_states = self.get_generic_ancestors()
transitions_ids = set()
for state in intermediary_states:
transitions_to_remove = list(state.transitions_in)
for transition in transitions_to_remove:
transitions_ids |= transition.transitions_ids
state.remove_transition_in(transition)
for ancestor in ancestors:
self.add_transition_in(
Transition(self.word, ancestor, self, transitions_ids)
)
def get_trivial_group(self):
if len(self.transitions_in) <= 1:
return set()
merge_group = set()
for parent in self.parents:
if len(parent.transitions_out) == 1:
merge_group.add(parent.id)
return merge_group
def add_transition_in(self, transition):
self.transitions_in.add(transition)
transition.state_in.__add_transition_out(transition)
def add_transition_out(self, transition):
self.transitions_out.add(transition)
transition.state_out.__add_transition_in(transition)
def remove_transition_in(self, transition):
self.transitions_in.remove(transition)
transition.state_in.__remove_transition_out(transition)
def remove_transition_out(self, transition):
self.transitions_out.remove(transition)
transition.state_out.__remove_transition_in(transition)
def __add_transition_in(self, transition):
self.transitions_in._add(transition)
def __add_transition_out(self, transition):
self.transitions_out._add(transition)
def __remove_transition_in(self, transition):
self.transitions_in.remove(transition)
def __remove_transition_out(self, transition):
self.transitions_out.remove(transition)
class AutomatonFitter:
"""A class that fit an automaton on a list of documents
The documents are assumed to be produced by a few numbers of templates that includes both
fixed and variable words, produced with str.format() for instance. The fitted automaton
will guess which transitions hold variables and can extract them from new documents.
Methods
-------
fit:
Fit the automaton
build:
Return an executable automaton, should be called after fit
pprint:
Pretty printer using Networkx and matplotlib
print:
Regular printer in string format
"""
def __init__(self, docs, variables={}, order: int = 3):
"""Initialize the automaton
Parameters
----------
docs : str[]
Documents to fit the automaton on
variables: {str: str[]}
keys are the name of variables (e.g. city) an values list of examples (e.g. ["Paris", "London", ...])
order: int
The memory size of the internal markov model used to predict path probability.
"""
self.nb_docs = len(docs)
self.start_state = State(0, "<start>")
self.stop_state = State(1, "<stop>")
self.states = {0: self.start_state, 1: self.stop_state}
self.stateCounter = 2
self.transitionCounter = 1
self.transitions_sequences = []
self.order = order
for var in variables.keys():
variables[var] = set([v.lower() for v in variables[var]])
for doc in docs:
transition_sequence = []
previous = self.stop_state
doc = preprocess(doc)
doc = " ".join(doc.split("/"))
for word in doc.split(" ")[::-1]:
state = self.create_state(word)
var_count = self.get_variables(previous.word, variables)
transition_out, tid = self.create_transition(state, previous, var_count)
transition_sequence.append(tid)
state.add_transition_out(transition_out)
self.states[state.id] = state
previous = state
transition_out, tid = self.create_transition(self.start_state, state, {})
transition_sequence.append(tid)
self.start_state.add_transition_out(transition_out)
transition_sequence = (transition_sequence + [0] * order)[::-1]
self.transitions_sequences.append(transition_sequence)
@staticmethod
def get_variables(word, variables):
"""
Return the list of variables this word is matching based on examples
word: string
variables: {string: set()}
return: {string: int}
"""
var_count = {}
for var, examples in variables.items():
if word in examples:
var_count[var] = 1
return var_count
def create_transition(self, state_in, state_out, variables_count):
tid = self.transitionCounter
new_transition = Transition(
state_out.word, state_in, state_out, [tid], variables=variables_count
)
self.transitionCounter += 1
return new_transition, tid
def create_state(self, word):
new_state = State(self.stateCounter, word)
self.stateCounter += 1
return new_state
def iterate_states(self, f, acc=None):
"""Apply `acc = f(state, acc)` on each state, return acc"""
done = set()
stack = [self.stop_state]
while len(stack) > 0:
state = stack.pop()
if state.id in done:
continue
done.add(state.id)
acc = f(state, acc)
stack.extend(state.parents)
return acc
def count_word(self):
def add_word(state, word_count):
if not state.word in word_count:
word_count[state.word] = 0
word_count[state.word] += 1
return word_count
return self.iterate_states(add_word, {})
def count_variables(self):
def add_vars(state, vars_count):
for t in state.transitions_in:
for var, count in t.variables.items():
var = "<$" + var + ">"
if not var in vars_count:
vars_count[var] = 0
vars_count[var] += count
return vars_count
return self.iterate_states(add_vars, {})
def make_state_generic(self, threshold: float = 0):
limit_weight = threshold * self.nb_docs
def generify(state, limit_weight):
state.generify(limit_weight)
return limit_weight
self.iterate_states(generify, limit_weight)
def simplify_generic_chains(self):
def merge_generics(state, acc):
state.merge_generic_parents()
return acc
self.iterate_states(merge_generics)
def merge_trivial_groups(self):
def trivial_group(state, group_list):
group_list.append(state.get_trivial_group())
return group_list
merge_group_list = self.iterate_states(trivial_group, [])
for group in merge_group_list:
self.merge_group(group, 0)
def remove_rare_transitions(self, freq: float):
limit_weight = freq * self.nb_docs
def remove_rare_out_transitions(state, limit_weight):
transitions_to_remove = []
for t in state.transitions_out:
if t.weight <= limit_weight:
transitions_to_remove.append(t)
for t in transitions_to_remove:
state.remove_transition_out(t)
return limit_weight
self.iterate_states(remove_rare_out_transitions, limit_weight)
def merge_group(self, merge_group, threshold):
if (
not len(merge_group) >= 2
or not len(merge_group) >= threshold * self.nb_docs
):
return False
merge_state = self.states[next(iter(merge_group))]
merge_group.remove(merge_state.id)
def merge(state, acc):
if state.id in merge_group:
state.merge_on(merge_state)
return acc
self.iterate_states(merge)
return True
def find_merge_group(self, word: str):
incompatibles = set()
merge_group = set()
stack = [(self.stop_state, set())] # (state, set of descendants)
visited = {} # state -> [nb_visit, set of descendants]
while len(stack) > 0:
state, descendants = stack.pop()
new_descendant = set()
if state.word == word:
new_descendant.add(state.id)
merge_group.add(state.id)
for descendant_id in descendants:
incompatibles.add((descendant_id, state.id))
incompatibles.add((state.id, descendant_id))
if not state in visited:
visited[state] = [0, set()]
visited[state][0] += 1
visited[state][1] |= descendants
visited[state][1] |= new_descendant
if visited[state][0] >= len(state.transitions_out):
descendants = visited[state][1]
for parent in state.parents:
stack.append((parent, descendants))
return self.remove_incompatibles(merge_group, incompatibles)
def remove_incompatibles(self, merge_group, incompatibles):
incompatible_count = {}
for state1, state2 in incompatibles:
if not state1 in incompatible_count:
incompatible_count[state1] = 0
if not state2 in incompatible_count:
incompatible_count[state2] = 0
incompatible_count[state1] += 1
incompatible_count[state2] += 1
for state1, state2 in incompatibles:
if state1 in merge_group and state2 in merge_group:
if incompatible_count[state1] > incompatible_count[state2]:
merge_group.remove(state1)
else:
merge_group.remove(state2)
return merge_group
def merge_word(self, word: str, threshold: float = 0):
return self.merge_group(self.find_merge_group(word), threshold)
def reduce(self, threshold: float = 0, variables: bool = False, word_black_list=[]):
"""
Merge either on words or on variables. Should merge on variable only after
`self.make_state_generic` has been called.
"""
count_function = self.count_word
if variables == True:
count_function = self.count_variables
done = False
black_list = set([w.lower() for w in word_black_list])
for word, nb_occurrences in self.count_word().items():
if nb_occurrences < threshold * self.nb_docs:
black_list.add(word)
while not done:
transition_count = [
(word, nb_occurrences)
for word, nb_occurrences in count_function().items()
if word not in black_list
]
if len(transition_count) == 0:
done = True
break
transition_count.sort(key=lambda x: x[1])
word, count = transition_count.pop()
if count > 1:
success = self.merge_word(word, threshold)
if not success:
black_list.add(word)
else:
done = True
def compute_transition_probability(self):
for transitions_sequence in self.transitions_sequences:
previous_transition = None
state = self.start_state
for i in range(self.order, len(transitions_sequence)):
tid = transitions_sequence[i]
history = tuple(transitions_sequence[i - self.order : i])
found = False
for transition in state.transitions_out:
if tid in transition.transitions_ids:
found = True
transitions_sequence[i] = transition.tid
previous_transition = transition
state = transition.state_out
if not history in transition.p:
transition.p[history] = 0
transition.p[history] += 1
break
if not found and previous_transition != None:
if tid in previous_transition.transitions_ids:
found = True
transitions_sequence[i] = previous_transition.tid
if not history in previous_transition.p:
previous_transition.p[history] = 0
previous_transition.p[history] += 1
if not found:
break
def normalize_probabilities(state, acc):
for transition in state.transitions_in:
total = 0
for history, count in transition.p.items():
total += count
for history in transition.p.keys():
transition.p[history] /= total
return acc
self.iterate_states(normalize_probabilities)
def build(self):
"""Build and return an executable and lightweight automaton
"""
self.compute_transition_probability()
stationary_transition_id = self.transitionCounter
self.transitionCounter += 1
def build_state(state, automaton):
automaton.add_state(state.id)
for transition in state.transitions_in:
automaton.add_state(transition.state_in.id)
automaton.add_transition(
transition.word,
transition.state_in.id,
state.id,
transition.tid,
transition.p,
)
if transition.word == "*":
automaton.add_transition(
transition.word,
state.id,
state.id,
stationary_transition_id,
transition.p,
)
return automaton
return self.iterate_states(build_state, Automaton(self.order))
def fit(self, threshold: float = 0.2, min_freq: float = 0, word_black_list=[]):
"""Fit the automaton
Parameters
----------
threshold : float
The frequency threshold, each pattern should have a frequency higher than this threshold
min_freq: float
The minimum frequency, every transition with lower frequency will be discarded. Set 0
to keep all transitions.
word_black_list: str[]
Initialize the blacklist of words. Words with frequency higher than the threshold but that
are not part of the hidden template should be added to the blacklist if known.
"""
self.reduce(threshold, word_black_list=word_black_list)
self.make_state_generic(threshold)
self.reduce(threshold, variables=True)
self.simplify_generic_chains()
self.merge_trivial_groups()
if min_freq > 0:
self.remove_rare_transitions(min_freq)
def fit_build(self, threshold: float = 0.2, min_freq: float = 0, word_black_list=[]):
"""Fit and return an executable automaton
Parameters
----------
threshold : float
The frequency threshold, each pattern should have a frequency higher than this threshold
min_freq: float
The minimum frequency, every transition with lower frequency will be discarded. Set 0
to keep all transitions.
word_black_list: str[]
Initialize the blacklist of words. Words with frequency higher than the threshold but that
are not part of the hidden template should be added to the blacklist if known.
"""
self.fit(threshold, min_freq, word_black_list)
return self.build()
def graph(self):
"""Return a networkx graph object that correspond to the automaton
"""
G = nx.DiGraph()
done = set()
stack = [self.stop_state]
while len(stack) > 0:
state = stack.pop()
done.add(state.id)
for t in state.transitions_in:
G.add_edge(
t.state_in.id, t.state_out.id, label=t.word + " - " + str(t.weight)
)
if not t.state_in.id in done:
stack.append(t.state_in)
return G
def pprint(self):
"""Plot a graphic representation of the automaton
"""
G = self.graph()
fig = plt.figure(figsize=(14, 12))
pos = nx.kamada_kawai_layout(G)
nx.draw(G, pos, with_labels=True, alpha=0.6)
labels = nx.get_edge_attributes(G, "label")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
def print(self):
"""Print the transitions in string format
"""
def print_transitions(state, acc):
for t in state.transitions_in:
print(t)
return acc
self.iterate_states(print_transitions) | autoparse/automaton_fitter.py | import networkx as nx
import matplotlib.pyplot as plt
from autoparse.automaton import preprocess, Automaton
class Transition:
def __init__(
self,
word: str,
state_in,
state_out,
transition_ids=[],
weight: int = 1,
variables={},
):
self.word = word
self.state_in = state_in
self.state_out = state_out
self.weight = weight
self.variables = variables
self.transitions_ids = set(transition_ids)
self.tid = next(iter(self.transitions_ids))
self.p = {}
def make_generic(self):
generic = "*"
best_count = 0
for var, count in self.variables.items():
if count > best_count:
generic = "<$" + var + ">"
best_count = count
self.word = generic
return generic
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.word == other.word
and self.state_in == other.state_in
and self.state_out == other.state_out
)
def __hash__(self):
return hash(str(self.state_in) + str(self.state_out))
def __repr__(self):
return " {:6d} --{:^20}--> {:6d} ".format(
self.state_in.id, self.word, self.state_out.id
)
class TransitionSet:
"""A set implementation that add weights when adding a transition multiple times"""
def __init__(self):
self._dict = {}
def __contains__(self, item):
return item in self._dict
def __iter__(self):
return self._dict.keys().__iter__()
def __len__(self):
return len(self._dict)
def __repr__(self):
return self._dict.__repr__()
def _add(self, item):
"""Do not cumulate weight"""
self._dict[item] = item
def add(self, item):
if not item in self._dict:
self._dict[item] = item
else:
transition = self._dict[item]
transition.weight += item.weight
transition.transitions_ids |= item.transitions_ids
for var in item.variables:
if not var in transition.variables:
transition.variables[var] = 0
transition.variables[var] += item.variables[var]
def remove(self, item):
if item in self._dict:
del self._dict[item]
class State:
def __init__(self, node_id: int, word: str):
self.id = node_id
self.transitions_in = TransitionSet()
self.transitions_out = TransitionSet()
self.word = word
@property
def weight(self):
total_weight = 0
for t in self.transitions_in:
total_weight += t.weight
return total_weight
@property
def child(self):
for t in self.transitions_out:
yield t.state_out
@property
def parents(self):
for t in self.transitions_in:
yield t.state_in
def merge_on(self, state):
transitions_to_delete = []
for t in self.transitions_in:
new_transition = Transition(
t.word,
t.state_in,
state,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_in(new_transition)
transitions_to_delete.append(t)
for t in self.transitions_out:
new_transition = Transition(
t.word,
state,
t.state_out,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_out(new_transition)
transitions_to_delete.append(t)
for t in transitions_to_delete:
t.state_out.remove_transition_in(t)
def generify(self, limit_weight):
if self.weight <= limit_weight:
self.word = "*"
for t in self.transitions_in:
generic = t.make_generic()
if generic != "*":
self.word = generic
def get_generic_ancestors(self):
"""return the last ancestors connected by generics transion and drop those transitions"""
if self.id == 0 or not self.word == "*":
return [self], []
else:
ancestors = []
intermediary_states = [self]
for transition in self.transitions_in:
new_ancestors, new_intermediary_states = (
transition.state_in.get_generic_ancestors()
)
ancestors += new_ancestors
intermediary_states += new_intermediary_states
return ancestors, intermediary_states
def merge_generic_parents(self):
if self.id == 0 or not self.word == "*":
return
ancestors, intermediary_states = self.get_generic_ancestors()
transitions_ids = set()
for state in intermediary_states:
transitions_to_remove = list(state.transitions_in)
for transition in transitions_to_remove:
transitions_ids |= transition.transitions_ids
state.remove_transition_in(transition)
for ancestor in ancestors:
self.add_transition_in(
Transition(self.word, ancestor, self, transitions_ids)
)
def get_trivial_group(self):
if len(self.transitions_in) <= 1:
return set()
merge_group = set()
for parent in self.parents:
if len(parent.transitions_out) == 1:
merge_group.add(parent.id)
return merge_group
def add_transition_in(self, transition):
self.transitions_in.add(transition)
transition.state_in.__add_transition_out(transition)
def add_transition_out(self, transition):
self.transitions_out.add(transition)
transition.state_out.__add_transition_in(transition)
def remove_transition_in(self, transition):
self.transitions_in.remove(transition)
transition.state_in.__remove_transition_out(transition)
def remove_transition_out(self, transition):
self.transitions_out.remove(transition)
transition.state_out.__remove_transition_in(transition)
def __add_transition_in(self, transition):
self.transitions_in._add(transition)
def __add_transition_out(self, transition):
self.transitions_out._add(transition)
def __remove_transition_in(self, transition):
self.transitions_in.remove(transition)
def __remove_transition_out(self, transition):
self.transitions_out.remove(transition)
class AutomatonFitter:
"""A class that fit an automaton on a list of documents
The documents are assumed to be produced by a few numbers of templates that includes both
fixed and variable words, produced with str.format() for instance. The fitted automaton
will guess which transitions hold variables and can extract them from new documents.
Methods
-------
fit:
Fit the automaton
build:
Return an executable automaton, should be called after fit
pprint:
Pretty printer using Networkx and matplotlib
print:
Regular printer in string format
"""
def __init__(self, docs, variables={}, order: int = 3):
"""Initialize the automaton
Parameters
----------
docs : str[]
Documents to fit the automaton on
variables: {str: str[]}
keys are the name of variables (e.g. city) an values list of examples (e.g. ["Paris", "London", ...])
order: int
The memory size of the internal markov model used to predict path probability.
"""
self.nb_docs = len(docs)
self.start_state = State(0, "<start>")
self.stop_state = State(1, "<stop>")
self.states = {0: self.start_state, 1: self.stop_state}
self.stateCounter = 2
self.transitionCounter = 1
self.transitions_sequences = []
self.order = order
for var in variables.keys():
variables[var] = set([v.lower() for v in variables[var]])
for doc in docs:
transition_sequence = []
previous = self.stop_state
doc = preprocess(doc)
doc = " ".join(doc.split("/"))
for word in doc.split(" ")[::-1]:
state = self.create_state(word)
var_count = self.get_variables(previous.word, variables)
transition_out, tid = self.create_transition(state, previous, var_count)
transition_sequence.append(tid)
state.add_transition_out(transition_out)
self.states[state.id] = state
previous = state
transition_out, tid = self.create_transition(self.start_state, state, {})
transition_sequence.append(tid)
self.start_state.add_transition_out(transition_out)
transition_sequence = (transition_sequence + [0] * order)[::-1]
self.transitions_sequences.append(transition_sequence)
@staticmethod
def get_variables(word, variables):
"""
Return the list of variables this word is matching based on examples
word: string
variables: {string: set()}
return: {string: int}
"""
var_count = {}
for var, examples in variables.items():
if word in examples:
var_count[var] = 1
return var_count
def create_transition(self, state_in, state_out, variables_count):
tid = self.transitionCounter
new_transition = Transition(
state_out.word, state_in, state_out, [tid], variables=variables_count
)
self.transitionCounter += 1
return new_transition, tid
def create_state(self, word):
new_state = State(self.stateCounter, word)
self.stateCounter += 1
return new_state
def iterate_states(self, f, acc=None):
"""Apply `acc = f(state, acc)` on each state, return acc"""
done = set()
stack = [self.stop_state]
while len(stack) > 0:
state = stack.pop()
if state.id in done:
continue
done.add(state.id)
acc = f(state, acc)
stack.extend(state.parents)
return acc
def count_word(self):
def add_word(state, word_count):
if not state.word in word_count:
word_count[state.word] = 0
word_count[state.word] += 1
return word_count
return self.iterate_states(add_word, {})
def count_variables(self):
def add_vars(state, vars_count):
for t in state.transitions_in:
for var, count in t.variables.items():
var = "<$" + var + ">"
if not var in vars_count:
vars_count[var] = 0
vars_count[var] += count
return vars_count
return self.iterate_states(add_vars, {})
def make_state_generic(self, threshold: float = 0):
limit_weight = threshold * self.nb_docs
def generify(state, limit_weight):
state.generify(limit_weight)
return limit_weight
self.iterate_states(generify, limit_weight)
def simplify_generic_chains(self):
def merge_generics(state, acc):
state.merge_generic_parents()
return acc
self.iterate_states(merge_generics)
def merge_trivial_groups(self):
def trivial_group(state, group_list):
group_list.append(state.get_trivial_group())
return group_list
merge_group_list = self.iterate_states(trivial_group, [])
for group in merge_group_list:
self.merge_group(group, 0)
def remove_rare_transitions(self, freq: float):
limit_weight = freq * self.nb_docs
def remove_rare_out_transitions(state, limit_weight):
transitions_to_remove = []
for t in state.transitions_out:
if t.weight <= limit_weight:
transitions_to_remove.append(t)
for t in transitions_to_remove:
state.remove_transition_out(t)
return limit_weight
self.iterate_states(remove_rare_out_transitions, limit_weight)
def merge_group(self, merge_group, threshold):
if (
not len(merge_group) >= 2
or not len(merge_group) >= threshold * self.nb_docs
):
return False
merge_state = self.states[next(iter(merge_group))]
merge_group.remove(merge_state.id)
def merge(state, acc):
if state.id in merge_group:
state.merge_on(merge_state)
return acc
self.iterate_states(merge)
return True
def find_merge_group(self, word: str):
incompatibles = set()
merge_group = set()
stack = [(self.stop_state, set())] # (state, set of descendants)
visited = {} # state -> [nb_visit, set of descendants]
while len(stack) > 0:
state, descendants = stack.pop()
new_descendant = set()
if state.word == word:
new_descendant.add(state.id)
merge_group.add(state.id)
for descendant_id in descendants:
incompatibles.add((descendant_id, state.id))
incompatibles.add((state.id, descendant_id))
if not state in visited:
visited[state] = [0, set()]
visited[state][0] += 1
visited[state][1] |= descendants
visited[state][1] |= new_descendant
if visited[state][0] >= len(state.transitions_out):
descendants = visited[state][1]
for parent in state.parents:
stack.append((parent, descendants))
return self.remove_incompatibles(merge_group, incompatibles)
def remove_incompatibles(self, merge_group, incompatibles):
incompatible_count = {}
for state1, state2 in incompatibles:
if not state1 in incompatible_count:
incompatible_count[state1] = 0
if not state2 in incompatible_count:
incompatible_count[state2] = 0
incompatible_count[state1] += 1
incompatible_count[state2] += 1
for state1, state2 in incompatibles:
if state1 in merge_group and state2 in merge_group:
if incompatible_count[state1] > incompatible_count[state2]:
merge_group.remove(state1)
else:
merge_group.remove(state2)
return merge_group
def merge_word(self, word: str, threshold: float = 0):
return self.merge_group(self.find_merge_group(word), threshold)
def reduce(self, threshold: float = 0, variables: bool = False, word_black_list=[]):
"""
Merge either on words or on variables. Should merge on variable only after
`self.make_state_generic` has been called.
"""
count_function = self.count_word
if variables == True:
count_function = self.count_variables
done = False
black_list = set([w.lower() for w in word_black_list])
for word, nb_occurrences in self.count_word().items():
if nb_occurrences < threshold * self.nb_docs:
black_list.add(word)
while not done:
transition_count = [
(word, nb_occurrences)
for word, nb_occurrences in count_function().items()
if word not in black_list
]
if len(transition_count) == 0:
done = True
break
transition_count.sort(key=lambda x: x[1])
word, count = transition_count.pop()
if count > 1:
success = self.merge_word(word, threshold)
if not success:
black_list.add(word)
else:
done = True
def compute_transition_probability(self):
for transitions_sequence in self.transitions_sequences:
previous_transition = None
state = self.start_state
for i in range(self.order, len(transitions_sequence)):
tid = transitions_sequence[i]
history = tuple(transitions_sequence[i - self.order : i])
found = False
for transition in state.transitions_out:
if tid in transition.transitions_ids:
found = True
transitions_sequence[i] = transition.tid
previous_transition = transition
state = transition.state_out
if not history in transition.p:
transition.p[history] = 0
transition.p[history] += 1
break
if not found and previous_transition != None:
if tid in previous_transition.transitions_ids:
found = True
transitions_sequence[i] = previous_transition.tid
if not history in previous_transition.p:
previous_transition.p[history] = 0
previous_transition.p[history] += 1
if not found:
break
def normalize_probabilities(state, acc):
for transition in state.transitions_in:
total = 0
for history, count in transition.p.items():
total += count
for history in transition.p.keys():
transition.p[history] /= total
return acc
self.iterate_states(normalize_probabilities)
def build(self):
"""Build and return an executable and lightweight automaton
"""
self.compute_transition_probability()
stationary_transition_id = self.transitionCounter
self.transitionCounter += 1
def build_state(state, automaton):
automaton.add_state(state.id)
for transition in state.transitions_in:
automaton.add_state(transition.state_in.id)
automaton.add_transition(
transition.word,
transition.state_in.id,
state.id,
transition.tid,
transition.p,
)
if transition.word == "*":
automaton.add_transition(
transition.word,
state.id,
state.id,
stationary_transition_id,
transition.p,
)
return automaton
return self.iterate_states(build_state, Automaton(self.order))
def fit(self, threshold: float = 0.2, min_freq: float = 0, word_black_list=[]):
"""Fit the automaton
Parameters
----------
threshold : float
The frequency threshold, each pattern should have a frequency higher than this threshold
min_freq: float
The minimum frequency, every transition with lower frequency will be discarded. Set 0
to keep all transitions.
word_black_list: str[]
Initialize the blacklist of words. Words with frequency higher than the threshold but that
are not part of the hidden template should be added to the blacklist if known.
"""
self.reduce(threshold, word_black_list=word_black_list)
self.make_state_generic(threshold)
self.reduce(threshold, variables=True)
self.simplify_generic_chains()
self.merge_trivial_groups()
if min_freq > 0:
self.remove_rare_transitions(min_freq)
def fit_build(self, threshold: float = 0.2, min_freq: float = 0, word_black_list=[]):
"""Fit and return an executable automaton
Parameters
----------
threshold : float
The frequency threshold, each pattern should have a frequency higher than this threshold
min_freq: float
The minimum frequency, every transition with lower frequency will be discarded. Set 0
to keep all transitions.
word_black_list: str[]
Initialize the blacklist of words. Words with frequency higher than the threshold but that
are not part of the hidden template should be added to the blacklist if known.
"""
self.fit(threshold, min_freq, word_black_list)
return self.build()
def graph(self):
"""Return a networkx graph object that correspond to the automaton
"""
G = nx.DiGraph()
done = set()
stack = [self.stop_state]
while len(stack) > 0:
state = stack.pop()
done.add(state.id)
for t in state.transitions_in:
G.add_edge(
t.state_in.id, t.state_out.id, label=t.word + " - " + str(t.weight)
)
if not t.state_in.id in done:
stack.append(t.state_in)
return G
def pprint(self):
"""Plot a graphic representation of the automaton
"""
G = self.graph()
fig = plt.figure(figsize=(14, 12))
pos = nx.kamada_kawai_layout(G)
nx.draw(G, pos, with_labels=True, alpha=0.6)
labels = nx.get_edge_attributes(G, "label")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
def print(self):
"""Print the transitions in string format
"""
def print_transitions(state, acc):
for t in state.transitions_in:
print(t)
return acc
self.iterate_states(print_transitions) | 0.792223 | 0.303719 |
import os
import PIL.Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from keras.applications import resnet50
from keras.applications.resnet50 import ResNet50
from keras.backend import set_session
from scipy.stats import truncnorm
# Initialize the module
module_path = 'https://tfhub.dev/deepmind/biggan-256/2'
tf.reset_default_graph()
module = hub.Module(module_path)
inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
for k, v in module.get_input_info_dict().items()}
output = module(inputs)
input_z = inputs['z']
input_y = inputs['y']
input_trunc = inputs['truncation']
dim_z = input_z.shape.as_list()[1]
vocab_size = input_y.shape.as_list()[1]
# Initialize TensorFlow session
initializer = tf.global_variables_initializer()
graph = tf.get_default_graph()
with graph.as_default():
sess = tf.Session()
sess.run(initializer)
set_session(sess)
# Categories found here: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
seed = np.random.randn(1, 140) # (1, 140)
resolution = 256
assert seed.shape == (1, 140)
def featurize(image_node):
image_node = (image_node + 1.0) / 2.0 * 255.0
input_tensor = resnet50.preprocess_input(tf.image.resize_images(image_node, [224, 224]))
model = ResNet50(include_top=True, weights='imagenet', input_shape=(224, 224, 3), input_tensor=input_tensor)
return model.output.op.inputs[0]
target = tf.placeholder(tf.float32, (None, resolution, resolution, 3), name="target")
output_node = tf.identity(output, name="output")
deep_output = featurize(output_node)
deep_target = tf.identity(featurize(target), name="deep_target_logits")
loss = tf.reduce_mean(tf.square((output_node - target)), name="loss")
print("output_node")
print(output_node)
print("target")
print(target)
deep_loss = tf.reduce_mean(tf.square((deep_output - deep_target)), name="deep_loss")
print(input_y.name, input_z.name, loss.name, target.name, output_node.name)
saver = tf.train.Saver()
saver.export_meta_graph("checkpoints/generator_test_biggan_1.meta")
saver.save(sess, "checkpoints/generator_test_biggan_1.ckpt")
# Quick sanity check: Network classifies tiger correctly
def toNetworkSpace(img):
img = np.array(img.resize((resolution, resolution), PIL.Image.ANTIALIAS))
return (img - (255.0 / 2.0)) / 255.0
target_img = np.array([toNetworkSpace(PIL.Image.open(os.path.join("test_images", "tiger.jpg")))])
[out] = sess.run([deep_target], {target: target_img})
assert np.argmax(out) == 292 #Check that the class is tigery | utilities/gan-inversion/01_model_prep.py | import os
import PIL.Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from keras.applications import resnet50
from keras.applications.resnet50 import ResNet50
from keras.backend import set_session
from scipy.stats import truncnorm
# Initialize the module
module_path = 'https://tfhub.dev/deepmind/biggan-256/2'
tf.reset_default_graph()
module = hub.Module(module_path)
inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
for k, v in module.get_input_info_dict().items()}
output = module(inputs)
input_z = inputs['z']
input_y = inputs['y']
input_trunc = inputs['truncation']
dim_z = input_z.shape.as_list()[1]
vocab_size = input_y.shape.as_list()[1]
# Initialize TensorFlow session
initializer = tf.global_variables_initializer()
graph = tf.get_default_graph()
with graph.as_default():
sess = tf.Session()
sess.run(initializer)
set_session(sess)
# Categories found here: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
seed = np.random.randn(1, 140) # (1, 140)
resolution = 256
assert seed.shape == (1, 140)
def featurize(image_node):
image_node = (image_node + 1.0) / 2.0 * 255.0
input_tensor = resnet50.preprocess_input(tf.image.resize_images(image_node, [224, 224]))
model = ResNet50(include_top=True, weights='imagenet', input_shape=(224, 224, 3), input_tensor=input_tensor)
return model.output.op.inputs[0]
target = tf.placeholder(tf.float32, (None, resolution, resolution, 3), name="target")
output_node = tf.identity(output, name="output")
deep_output = featurize(output_node)
deep_target = tf.identity(featurize(target), name="deep_target_logits")
loss = tf.reduce_mean(tf.square((output_node - target)), name="loss")
print("output_node")
print(output_node)
print("target")
print(target)
deep_loss = tf.reduce_mean(tf.square((deep_output - deep_target)), name="deep_loss")
print(input_y.name, input_z.name, loss.name, target.name, output_node.name)
saver = tf.train.Saver()
saver.export_meta_graph("checkpoints/generator_test_biggan_1.meta")
saver.save(sess, "checkpoints/generator_test_biggan_1.ckpt")
# Quick sanity check: Network classifies tiger correctly
def toNetworkSpace(img):
img = np.array(img.resize((resolution, resolution), PIL.Image.ANTIALIAS))
return (img - (255.0 / 2.0)) / 255.0
target_img = np.array([toNetworkSpace(PIL.Image.open(os.path.join("test_images", "tiger.jpg")))])
[out] = sess.run([deep_target], {target: target_img})
assert np.argmax(out) == 292 #Check that the class is tigery | 0.758958 | 0.322286 |
from lib import script
import random
rand = random.randint
def warp_uptown_east(pc):
result = script.select(pc, ("enter", "north", "south", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(217, 218), rand(126, 129)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_uptown_west(pc):
result = script.select(pc, ("enter", "north", "east", "south", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(37, 38), rand(126, 129)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 4:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
def warp_uptown_south(pc):
result = script.select(pc, ("enter", "north", "east", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(126, 129), rand(37, 38)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_uptown_north(pc):
result = script.select(pc, ("enter", "east", "south", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(126, 129), rand(37, 38)) #アップタウン
elif result == 2:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 3:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_guild_lobby(pc):
result = script.select(pc, ("1f", "2f", "3f", "4f", "5f", "cancel"), "warp")
if result == 1:
script.warp(pc, 30110000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー1F
elif result == 2:
script.warp(pc, 30111000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー2F
elif result == 3:
script.warp(pc, 30112000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー3F
elif result == 4:
script.warp(pc, 30113000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー4F
elif result == 5:
script.warp(pc, 30114000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー5F
def warp_10000700(pc):
script.effect(pc, 4023)
script.wait(pc, 1000)
script.warp(pc, 20015000, 9, 36) #アイシー島への地下通路
def warp_10000817(pc):
result = script.select(pc, ("中立の島", "海賊の島", "聖女の島", "やっぱやめた"), "どこにする?")
if result == 1:
script.warp(pc, 10054100, 224, 86) #フシギ団の砦(北部)
elif result == 2:
script.warp(pc, 10054100, 123, 77) #フシギ団の砦(北部)
elif result == 3:
script.warp(pc, 10054000, 72, 140) #フシギ団の砦
def warp_10001723(pc):
script.say(pc, "".join(
"上にあるクジラの口まで$R;",
"ロープが伸びている…$R;",
"$R伝って登れば、$R;",
"クジラの口の中に入れそうだ。$R;"
), "warp")
result = script.select(pc, ("登らない", "登ってみる"), "登る?")
if result == 2:
script.warp(pc, 21190000, 32, 184) #口内淵
ID = {
10000003: warp_uptown_east, #アップタウン東可動橋
10000013: warp_uptown_west, #アップタウン西可動橋
10000023: warp_uptown_south, #アップタウン南可動橋
10000033: warp_uptown_north, #アップタウン北可動橋
10000164: warp_guild_lobby, #ギルド元宮ロビー1F
10000165: warp_guild_lobby, #ギルド元宮ロビー2F
10000166: warp_guild_lobby, #ギルド元宮ロビー3F
10000167: warp_guild_lobby, #ギルド元宮ロビー4F
10000168: warp_guild_lobby, #ギルド元宮ロビー5F
10000228: (30113000, 25, 13), #アルケミストギルド→ギルド元宮ロビー4F
10000229: (30113000, 1, 13), #マリオネストギルド→ギルド元宮ロビー4F
10000230: (30113000, 13, 25), #レンジャーギルド→ギルド元宮ロビー4F
10000231: (30113000, 13, 1), #マーチャントギルド→ギルド元宮ロビー4F
10000432: (30020001, 3, 5), #イストー岬→民家
10000600: (30010001, 3, 5), #ノーザンプロムナード→ノーザン酒屋
#10000624: None,
#10000632: None,
#10000634: None,
10000638: (30170000, 3, 6), #永遠への北限→イグルー
10000483: (10051000, 96, 123), #アイシー島→永遠への北限
10000700: warp_10000700, #アイシー島への地下通路
10000769: (30077000, 8, 12), #アイアンシティ下層階→動力制御室
10000817: warp_10000817, #フシギ団本部
10001317: (30091001, 6, 15), #東アクロニア平原→東平原初心者学校
10001318: (10025000, 108, 123), #東平原初心者学校→東アクロニア平原
10001319: (30091002, 6, 15), #西アクロニア平原→西平原初心者学校
10001320: (10022000, 143, 133), #西平原初心者学校→西アクロニア平原
10001321: (30091003, 6, 15), #南アクロニア平原→南平原初心者学校
10001322: (10031000, 132, 121), #南平原初心者学校→南アクロニア平原
10001323: (30091004, 6, 15), #北アクロニア平原→北平原初心者学校
10001324: (30091004, 6, 15), #北平原初心者学校→北アクロニア平原
10001723: warp_10001723,
12001118: (30131001, 6, 1), #フシギ団の砦→フシギ団本部
}
def main(pc):
warp_info = ID[pc.event_id]
if callable(warp_info):
warp_info(pc)
return
map_id = warp_info[0]
if len(warp_info) == 3:
x = warp_info[1]
y = warp_info[2]
else:
x = random.randint(warp_info[1], warp_info[3])
y = random.randint(warp_info[2], warp_info[4])
script.warp(pc, map_id, x, y)
#Copyright (C) ゆとり鯖 All Rights Reserved. | script/site_packages/warp_event.py | from lib import script
import random
rand = random.randint
def warp_uptown_east(pc):
result = script.select(pc, ("enter", "north", "south", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(217, 218), rand(126, 129)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_uptown_west(pc):
result = script.select(pc, ("enter", "north", "east", "south", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(37, 38), rand(126, 129)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 4:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
def warp_uptown_south(pc):
result = script.select(pc, ("enter", "north", "east", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(126, 129), rand(37, 38)) #アップタウン
elif result == 2:
script.warp(pc, 10023400, rand(126, 129), rand(29, 32)) #アップタウン北可動橋
elif result == 3:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_uptown_north(pc):
result = script.select(pc, ("enter", "east", "south", "west", "cancel"), "warp")
if result == 1:
script.warp(pc, 10023000, rand(126, 129), rand(37, 38)) #アップタウン
elif result == 2:
script.warp(pc, 10023100, rand(224, 227), rand(126, 129)) #アップタウン東可動橋
elif result == 3:
script.warp(pc, 10023300, rand(126, 129), rand(224, 227)) #アップタウン南可動橋
elif result == 4:
script.warp(pc, 10023200, rand(29, 32), rand(126, 129)) #アップタウン西可動橋
def warp_guild_lobby(pc):
result = script.select(pc, ("1f", "2f", "3f", "4f", "5f", "cancel"), "warp")
if result == 1:
script.warp(pc, 30110000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー1F
elif result == 2:
script.warp(pc, 30111000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー2F
elif result == 3:
script.warp(pc, 30112000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー3F
elif result == 4:
script.warp(pc, 30113000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー4F
elif result == 5:
script.warp(pc, 30114000, rand(12, 14), rand(14, 16)) #ギルド元宮ロビー5F
def warp_10000700(pc):
script.effect(pc, 4023)
script.wait(pc, 1000)
script.warp(pc, 20015000, 9, 36) #アイシー島への地下通路
def warp_10000817(pc):
result = script.select(pc, ("中立の島", "海賊の島", "聖女の島", "やっぱやめた"), "どこにする?")
if result == 1:
script.warp(pc, 10054100, 224, 86) #フシギ団の砦(北部)
elif result == 2:
script.warp(pc, 10054100, 123, 77) #フシギ団の砦(北部)
elif result == 3:
script.warp(pc, 10054000, 72, 140) #フシギ団の砦
def warp_10001723(pc):
script.say(pc, "".join(
"上にあるクジラの口まで$R;",
"ロープが伸びている…$R;",
"$R伝って登れば、$R;",
"クジラの口の中に入れそうだ。$R;"
), "warp")
result = script.select(pc, ("登らない", "登ってみる"), "登る?")
if result == 2:
script.warp(pc, 21190000, 32, 184) #口内淵
ID = {
10000003: warp_uptown_east, #アップタウン東可動橋
10000013: warp_uptown_west, #アップタウン西可動橋
10000023: warp_uptown_south, #アップタウン南可動橋
10000033: warp_uptown_north, #アップタウン北可動橋
10000164: warp_guild_lobby, #ギルド元宮ロビー1F
10000165: warp_guild_lobby, #ギルド元宮ロビー2F
10000166: warp_guild_lobby, #ギルド元宮ロビー3F
10000167: warp_guild_lobby, #ギルド元宮ロビー4F
10000168: warp_guild_lobby, #ギルド元宮ロビー5F
10000228: (30113000, 25, 13), #アルケミストギルド→ギルド元宮ロビー4F
10000229: (30113000, 1, 13), #マリオネストギルド→ギルド元宮ロビー4F
10000230: (30113000, 13, 25), #レンジャーギルド→ギルド元宮ロビー4F
10000231: (30113000, 13, 1), #マーチャントギルド→ギルド元宮ロビー4F
10000432: (30020001, 3, 5), #イストー岬→民家
10000600: (30010001, 3, 5), #ノーザンプロムナード→ノーザン酒屋
#10000624: None,
#10000632: None,
#10000634: None,
10000638: (30170000, 3, 6), #永遠への北限→イグルー
10000483: (10051000, 96, 123), #アイシー島→永遠への北限
10000700: warp_10000700, #アイシー島への地下通路
10000769: (30077000, 8, 12), #アイアンシティ下層階→動力制御室
10000817: warp_10000817, #フシギ団本部
10001317: (30091001, 6, 15), #東アクロニア平原→東平原初心者学校
10001318: (10025000, 108, 123), #東平原初心者学校→東アクロニア平原
10001319: (30091002, 6, 15), #西アクロニア平原→西平原初心者学校
10001320: (10022000, 143, 133), #西平原初心者学校→西アクロニア平原
10001321: (30091003, 6, 15), #南アクロニア平原→南平原初心者学校
10001322: (10031000, 132, 121), #南平原初心者学校→南アクロニア平原
10001323: (30091004, 6, 15), #北アクロニア平原→北平原初心者学校
10001324: (30091004, 6, 15), #北平原初心者学校→北アクロニア平原
10001723: warp_10001723,
12001118: (30131001, 6, 1), #フシギ団の砦→フシギ団本部
}
def main(pc):
warp_info = ID[pc.event_id]
if callable(warp_info):
warp_info(pc)
return
map_id = warp_info[0]
if len(warp_info) == 3:
x = warp_info[1]
y = warp_info[2]
else:
x = random.randint(warp_info[1], warp_info[3])
y = random.randint(warp_info[2], warp_info[4])
script.warp(pc, map_id, x, y)
#Copyright (C) ゆとり鯖 All Rights Reserved. | 0.157785 | 0.250913 |
from asyncio import DatagramTransport
import json, yaml
from paramiko import SSHException
import requests
from server.utils.response_util import RET
from flask import jsonify, current_app, g
from typing import List
from flask import current_app, jsonify
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from sqlalchemy import or_, and_
from server import db, redis_client
from server.model.permission import ReScopeRole, Role, Scope
from server.utils.db import Insert, Precise, Like
from server.utils.redis_util import RedisKey
from server.model import ReUserGroup
class PermissionManager:
def get_api_list(self, table_name, path, item_id):
with open(path, 'r', encoding='utf-8') as f:
result = yaml.load(f.read(), Loader=yaml.FullLoader)
allow_list = []
deny_list = []
result = result.get(table_name)
for scope in result:
allow_list.append({
"uri": scope["uri"] % int(item_id),
"alias": scope["alias"] + "_" + str(item_id) + "_allow",
"act": scope["act"],
"eft": "allow"
})
deny_list.append({
"uri": scope["uri"] % int(item_id),
"alias": scope["alias"] + "_" + str(item_id) + "_deny",
"act": scope["act"],
"eft": "deny"
})
return allow_list, deny_list
def insert_scope(self, scope_datas):
scope_ids = []
get_scope_ids = []
for sdata in scope_datas:
try:
_scope = Scope.query.filter_by(alias=sdata['alias']).first()
if not _scope:
scope_id = Insert(Scope, sdata).insert_id(Scope, '/scope')
scope_ids.append(scope_id)
if sdata["act"] == "get":
get_scope_ids.append(scope_id)
except (IntegrityError, SQLAlchemyError) as e:
current_app.logger.error(str(e))
continue
return scope_ids, get_scope_ids
def generate(self, scope_datas_allow, scope_datas_deny, _data: dict, admin_only=False):
default_role_filter = []
role_filter = []
if _data["permission_type"] == "public":
role_filter = [and_(
Role.name == "admin",
Role.type == "public",
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "public",
)]
elif _data["permission_type"] == "group":
role_filter = [and_(
Role.name == "admin",
Role.type == "group",
Role.group_id == int(_data["group_id"])
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "group",
Role.group_id == int(_data["group_id"])
)]
elif _data["permission_type"] == "org":
org_id = int(redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')) if redis_client.hget(
RedisKey.user(g.gitee_id), 'current_org_id') else int(_data["org_id"])
role_filter = [and_(
Role.name == "admin",
Role.type == "org",
Role.org_id == org_id
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "org",
Role.org_id == org_id
)]
scope_allow_ids, get_scope_allow_ids = self.insert_scope(scope_datas_allow)
_, _ = self.insert_scope(scope_datas_deny)
if _data["permission_type"] != "person":
default_role = Role.query.filter(*default_role_filter).first()
if not default_role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
role = Role.query.filter(*role_filter).first()
if not role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
if not admin_only:
try:
for _id in get_scope_allow_ids:
scope_role_data = {
"scope_id": _id,
"role_id": default_role.id
}
Insert(ReScopeRole, scope_role_data).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
try:
for _id in scope_allow_ids:
scope_role_data = {
"scope_id": _id,
"role_id": role.id
}
Insert(ReScopeRole, scope_role_data).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
_role = Role.query.filter_by(name=str(g.gitee_id), type="person").first()
if not _role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
try:
for _id in scope_allow_ids:
scope_role_data_creator = {
"scope_id": _id,
"role_id": _role.id
}
Insert(ReScopeRole, scope_role_data_creator).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
def clean(self, uri_part, item_ids: List[int]):
try:
for item_id in item_ids:
filter_str = uri_part + str(item_id)
filter_params = []
filter_params.append(Scope.uri.like(f'%{filter_str}%'))
scopes = Scope.query.filter(*filter_params).all()
for scope in scopes:
db.session.delete(scope)
db.session.commit()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
class GetAllByPermission:
def __init__(self, _table) -> None:
self._table = _table
current_org_id = redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')
self.filter_params = [
or_(
self._table.permission_type == "public",
and_(
self._table.permission_type == "org",
self._table.org_id == int(current_org_id)
),
and_(
self._table.permission_type == "person",
self._table.org_id == int(current_org_id),
self._table.creator_id == int(g.gitee_id)
)
)
]
_re_user_groups = ReUserGroup.query.filter_by(
user_gitee_id=int(g.gitee_id), org_id=int(current_org_id)
).all()
if _re_user_groups:
group_ids = [re_user_group.group_id for re_user_group in _re_user_groups]
self.filter_params = [
or_(
self._table.permission_type == "public",
and_(
self._table.permission_type == "org",
self._table.org_id == int(current_org_id)
),
and_(
self._table.permission_type == "group",
self._table.org_id == int(current_org_id),
self._table.group_id.in_(group_ids)),
and_(
self._table.permission_type == "person",
self._table.org_id == int(current_org_id),
self._table.creator_id == int(g.gitee_id)
)
)
]
def get_filter(self):
return self.filter_params
def get(self):
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def fuzz(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key).like("%{}%".format(value)))
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def precise(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key) == value)
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def MultiCondition(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
if not isinstance(value, list):
value = [value]
self.filter_params.append(getattr(self._table, key).in_(value))
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def single(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key) == value)
tdata = self._table.query.filter(*self.filter_params).first()
return tdata
class PermissionItemsPool:
def __init__(
self, origin_pool, namespace, act, auth):
self.origin_pool = origin_pool
self._root_url = "api/{}/{}".format(
current_app.config.get("OFFICIAL_API_VERSION"),
namespace
)
self.act = act
self.auth = auth
def _get_items(self, eft):
return_data = []
for _item in self.origin_pool:
try:
_url = "{}/{}".format(self._root_url, _item.id)
_resp = requests.request(
method=self.act,
url="https://{}/{}".format(
current_app.config.get("SERVER_ADDR"),
_url
),
headers={
'Content-Type': 'application/json;charset=utf8',
'Authorization': self.auth,
},
verify=True if current_app.config.get("CA_VERIFY") =="True" \
else current_app.config.get("SERVER_CERT_PATH")
)
if _resp.status_code != 200:
raise RuntimeError(_resp.text)
_output = None
try:
_output = json.loads(_resp.text)
except AttributeError:
try:
_output = _resp.json
except AttributeError as e:
raise RuntimeError(str(e))
if (_output.get("error_code") != RET.UNAUTHORIZE_ERR) == (eft == "allow"):
return_data.append(_item.to_json())
except (SSHException, RuntimeError) as e:
current_app.logger.warn(str(e))
continue
return return_data
@property
def allow_list(self):
return self._get_items("allow")
@property
def deny_list(self):
return self._get_items("deny") | radiaTest-server/server/utils/permission_utils.py | from asyncio import DatagramTransport
import json, yaml
from paramiko import SSHException
import requests
from server.utils.response_util import RET
from flask import jsonify, current_app, g
from typing import List
from flask import current_app, jsonify
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from sqlalchemy import or_, and_
from server import db, redis_client
from server.model.permission import ReScopeRole, Role, Scope
from server.utils.db import Insert, Precise, Like
from server.utils.redis_util import RedisKey
from server.model import ReUserGroup
class PermissionManager:
def get_api_list(self, table_name, path, item_id):
with open(path, 'r', encoding='utf-8') as f:
result = yaml.load(f.read(), Loader=yaml.FullLoader)
allow_list = []
deny_list = []
result = result.get(table_name)
for scope in result:
allow_list.append({
"uri": scope["uri"] % int(item_id),
"alias": scope["alias"] + "_" + str(item_id) + "_allow",
"act": scope["act"],
"eft": "allow"
})
deny_list.append({
"uri": scope["uri"] % int(item_id),
"alias": scope["alias"] + "_" + str(item_id) + "_deny",
"act": scope["act"],
"eft": "deny"
})
return allow_list, deny_list
def insert_scope(self, scope_datas):
scope_ids = []
get_scope_ids = []
for sdata in scope_datas:
try:
_scope = Scope.query.filter_by(alias=sdata['alias']).first()
if not _scope:
scope_id = Insert(Scope, sdata).insert_id(Scope, '/scope')
scope_ids.append(scope_id)
if sdata["act"] == "get":
get_scope_ids.append(scope_id)
except (IntegrityError, SQLAlchemyError) as e:
current_app.logger.error(str(e))
continue
return scope_ids, get_scope_ids
def generate(self, scope_datas_allow, scope_datas_deny, _data: dict, admin_only=False):
default_role_filter = []
role_filter = []
if _data["permission_type"] == "public":
role_filter = [and_(
Role.name == "admin",
Role.type == "public",
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "public",
)]
elif _data["permission_type"] == "group":
role_filter = [and_(
Role.name == "admin",
Role.type == "group",
Role.group_id == int(_data["group_id"])
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "group",
Role.group_id == int(_data["group_id"])
)]
elif _data["permission_type"] == "org":
org_id = int(redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')) if redis_client.hget(
RedisKey.user(g.gitee_id), 'current_org_id') else int(_data["org_id"])
role_filter = [and_(
Role.name == "admin",
Role.type == "org",
Role.org_id == org_id
)]
default_role_filter = [and_(
Role.name == "default",
Role.type == "org",
Role.org_id == org_id
)]
scope_allow_ids, get_scope_allow_ids = self.insert_scope(scope_datas_allow)
_, _ = self.insert_scope(scope_datas_deny)
if _data["permission_type"] != "person":
default_role = Role.query.filter(*default_role_filter).first()
if not default_role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
role = Role.query.filter(*role_filter).first()
if not role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
if not admin_only:
try:
for _id in get_scope_allow_ids:
scope_role_data = {
"scope_id": _id,
"role_id": default_role.id
}
Insert(ReScopeRole, scope_role_data).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
try:
for _id in scope_allow_ids:
scope_role_data = {
"scope_id": _id,
"role_id": role.id
}
Insert(ReScopeRole, scope_role_data).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
_role = Role.query.filter_by(name=str(g.gitee_id), type="person").first()
if not _role:
return jsonify(error_code=RET.NO_DATA_ERR, error_msg="Role has not been exist")
try:
for _id in scope_allow_ids:
scope_role_data_creator = {
"scope_id": _id,
"role_id": _role.id
}
Insert(ReScopeRole, scope_role_data_creator).insert_id()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
def clean(self, uri_part, item_ids: List[int]):
try:
for item_id in item_ids:
filter_str = uri_part + str(item_id)
filter_params = []
filter_params.append(Scope.uri.like(f'%{filter_str}%'))
scopes = Scope.query.filter(*filter_params).all()
for scope in scopes:
db.session.delete(scope)
db.session.commit()
except (SQLAlchemyError, IntegrityError) as e:
raise RuntimeError(str(e)) from e
class GetAllByPermission:
def __init__(self, _table) -> None:
self._table = _table
current_org_id = redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')
self.filter_params = [
or_(
self._table.permission_type == "public",
and_(
self._table.permission_type == "org",
self._table.org_id == int(current_org_id)
),
and_(
self._table.permission_type == "person",
self._table.org_id == int(current_org_id),
self._table.creator_id == int(g.gitee_id)
)
)
]
_re_user_groups = ReUserGroup.query.filter_by(
user_gitee_id=int(g.gitee_id), org_id=int(current_org_id)
).all()
if _re_user_groups:
group_ids = [re_user_group.group_id for re_user_group in _re_user_groups]
self.filter_params = [
or_(
self._table.permission_type == "public",
and_(
self._table.permission_type == "org",
self._table.org_id == int(current_org_id)
),
and_(
self._table.permission_type == "group",
self._table.org_id == int(current_org_id),
self._table.group_id.in_(group_ids)),
and_(
self._table.permission_type == "person",
self._table.org_id == int(current_org_id),
self._table.creator_id == int(g.gitee_id)
)
)
]
def get_filter(self):
return self.filter_params
def get(self):
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def fuzz(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key).like("%{}%".format(value)))
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def precise(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key) == value)
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def MultiCondition(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
if not isinstance(value, list):
value = [value]
self.filter_params.append(getattr(self._table, key).in_(value))
tdata = self._table.query.filter(*self.filter_params).all()
data = []
if tdata:
data = [dt.to_json() for dt in tdata]
return jsonify(
error_code=RET.OK,
error_msg="OK!",
data=data
)
def single(self, _data):
for key, value in _data.items():
if hasattr(self._table, key) and value is not None and key not in ("permission_type", "group_id"):
self.filter_params.append(getattr(self._table, key) == value)
tdata = self._table.query.filter(*self.filter_params).first()
return tdata
class PermissionItemsPool:
def __init__(
self, origin_pool, namespace, act, auth):
self.origin_pool = origin_pool
self._root_url = "api/{}/{}".format(
current_app.config.get("OFFICIAL_API_VERSION"),
namespace
)
self.act = act
self.auth = auth
def _get_items(self, eft):
return_data = []
for _item in self.origin_pool:
try:
_url = "{}/{}".format(self._root_url, _item.id)
_resp = requests.request(
method=self.act,
url="https://{}/{}".format(
current_app.config.get("SERVER_ADDR"),
_url
),
headers={
'Content-Type': 'application/json;charset=utf8',
'Authorization': self.auth,
},
verify=True if current_app.config.get("CA_VERIFY") =="True" \
else current_app.config.get("SERVER_CERT_PATH")
)
if _resp.status_code != 200:
raise RuntimeError(_resp.text)
_output = None
try:
_output = json.loads(_resp.text)
except AttributeError:
try:
_output = _resp.json
except AttributeError as e:
raise RuntimeError(str(e))
if (_output.get("error_code") != RET.UNAUTHORIZE_ERR) == (eft == "allow"):
return_data.append(_item.to_json())
except (SSHException, RuntimeError) as e:
current_app.logger.warn(str(e))
continue
return return_data
@property
def allow_list(self):
return self._get_items("allow")
@property
def deny_list(self):
return self._get_items("deny") | 0.424531 | 0.098947 |
# Golden Search, mimics the secant method, but for finding the Global Max and min (optimization of a function)
# Strategy in selecting the bounds of the interval:
# l0 = distance between estimate,
# l0 = l1+l2 ; l1/l0 = l2/l1
# R = (l2/l1)**-1 (reciprocal)
# From substitution : 1 +R = 1/R -> R**2 + R - 1 = 0
# R = [sqrt(5)-1]/2 <- GOLDEN RATIO
# d = R(x_u - x_l)
#x1 = x_l + d ; x2 = x_u - d
import numpy as np
import math
import matplotlib.pyplot as plt
"""
Interval Selection
"""
# Parameters
xu = 20 #int(input("Please choose a upper bound: "))
xl = -20 #int(input("Please choose a lower bound: "))
N = 100 #int(input("Please choose Maxt number of iterations: "))
# Golden Ratio
R = (math.sqrt(5) - 1)/2
"""
Evaluation of the Function
"""
# Evaluated function
f = lambda x: 2*np.sin(x) - x**2/10
def GoldenSearchMax(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 > fx2 :
xl = x2
elif fx1 < fx2:
xu = x1
else:
#print("The local maxima is located at:", x1, fx1)
break
return x1, fx1
def GoldenSearchMin(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 < fx2 :
xl = x2
elif fx1 > fx2:
xu = x1
else:
#print("The local minima is located at:", x1, fx1)
break
return x1, fx1
# Arrays to store the numbers
Max = GoldenSearchMax(xu, xl, f, N)
Min = GoldenSearchMin(xu, xl, f, N)
print('The local max and min of the interval is:', Max, Min)
# Initializing Arrays
x_value = np.linspace(xl, xu, N-1)
y_value = np.zeros(N-1)
# Populating y_array
for k in range(N-1):
y_value[k] = f(x_value[k])
# Plotting the function f
plt.plot(x_value ,y_value)
plt.scatter(Max[0], Max[1], label = 'Maxima', color = 'r')
plt.scatter(Min[0], Min[1], label = 'Maxima', color = 'g')
plt.legend(['Function', 'Maxima', 'Minima'])
plt.xlabel('x')
plt.ylabel('y')
plt.show() | GoldSearch.py | # Golden Search, mimics the secant method, but for finding the Global Max and min (optimization of a function)
# Strategy in selecting the bounds of the interval:
# l0 = distance between estimate,
# l0 = l1+l2 ; l1/l0 = l2/l1
# R = (l2/l1)**-1 (reciprocal)
# From substitution : 1 +R = 1/R -> R**2 + R - 1 = 0
# R = [sqrt(5)-1]/2 <- GOLDEN RATIO
# d = R(x_u - x_l)
#x1 = x_l + d ; x2 = x_u - d
import numpy as np
import math
import matplotlib.pyplot as plt
"""
Interval Selection
"""
# Parameters
xu = 20 #int(input("Please choose a upper bound: "))
xl = -20 #int(input("Please choose a lower bound: "))
N = 100 #int(input("Please choose Maxt number of iterations: "))
# Golden Ratio
R = (math.sqrt(5) - 1)/2
"""
Evaluation of the Function
"""
# Evaluated function
f = lambda x: 2*np.sin(x) - x**2/10
def GoldenSearchMax(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 > fx2 :
xl = x2
elif fx1 < fx2:
xu = x1
else:
#print("The local maxima is located at:", x1, fx1)
break
return x1, fx1
def GoldenSearchMin(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 < fx2 :
xl = x2
elif fx1 > fx2:
xu = x1
else:
#print("The local minima is located at:", x1, fx1)
break
return x1, fx1
# Arrays to store the numbers
Max = GoldenSearchMax(xu, xl, f, N)
Min = GoldenSearchMin(xu, xl, f, N)
print('The local max and min of the interval is:', Max, Min)
# Initializing Arrays
x_value = np.linspace(xl, xu, N-1)
y_value = np.zeros(N-1)
# Populating y_array
for k in range(N-1):
y_value[k] = f(x_value[k])
# Plotting the function f
plt.plot(x_value ,y_value)
plt.scatter(Max[0], Max[1], label = 'Maxima', color = 'r')
plt.scatter(Min[0], Min[1], label = 'Maxima', color = 'g')
plt.legend(['Function', 'Maxima', 'Minima'])
plt.xlabel('x')
plt.ylabel('y')
plt.show() | 0.384912 | 0.644505 |
import scrapy
import sys
from scrapy.selector import Selector
import amazon_crawler.spider_logger as db_logger
from amazon_crawler.items import AmazonItem as ReviewerItem
import amazon_crawler.mysql_helper as db
import amazon_crawler.html_extractor as html_extractor
from amazon_crawler.spider_base import SpiderBase
from cssselect import GenericTranslator, SelectorError
from scrapy import log
from scrapy import exceptions
import re
class ProductSpider(SpiderBase):
name = "reviewer"
html_page = 'AmazonReviewer'
allowed_domains = ["amazon.com"]
def __init__(self, *args, **kwargs):
super(ProductSpider, self).__init__(*args, **kwargs)
self.uid_list = self.require_arg('uid', *args, **kwargs)
self.url_template = self.require_crawler_setting('UrlTemplate')
self.start_urls = [re.sub('<<UID>>', a, self.url_template) for a in self.uid_list.split(',')]
def parse(self, response):
#super(ProductSpider, self).parse(response)
item = ReviewerItem()
if response.status != 200:
db_log('url(%s) response code is %d, 200 is expected'%(response.url, response.status),
lv='error', )
item['success'] = False
return item
m = re.search('\/([0-9A-Z]{10,24})(?![0-9A-Z])', response.url)
if not m:
db_log('cannot parse uid from response url: %s'%response.url, lv='error', spider=self.name)
log.msg('cannot parse uid from response url: %s'%response.url, level=log.ERROR)
raise exceptions.CloseSpider('cannot parse uid from response url:%s'%response.url)
uid = m.group(1)
sel = Selector(response)
extractor_list = db.get_page_extractor_list(self.html_page)
if not extractor_list:
db_log(message = 'no extractor for Page=%s, refer to table HtmlExtractor'%self.html_page,
lv = 'fatal',spider = self.name)
extract_result = html_extractor.extract(sel, extractor_list, self.name, uid)
if extract_result['mismatch']:
item['success'] = False
item['message'] = 'some required fields are not extracted correctely due to missing selector, detail is in database'
else:
item['success'] = True
reviewer = extract_result['data']
reviewer[u'UID'] = uid
item['data'] = reviewer
item['debug'] = False
if self.debug:
item['debug'] = True
return item | amazon_crawler/amazon_crawler/spiders/reviewer.py | import scrapy
import sys
from scrapy.selector import Selector
import amazon_crawler.spider_logger as db_logger
from amazon_crawler.items import AmazonItem as ReviewerItem
import amazon_crawler.mysql_helper as db
import amazon_crawler.html_extractor as html_extractor
from amazon_crawler.spider_base import SpiderBase
from cssselect import GenericTranslator, SelectorError
from scrapy import log
from scrapy import exceptions
import re
class ProductSpider(SpiderBase):
name = "reviewer"
html_page = 'AmazonReviewer'
allowed_domains = ["amazon.com"]
def __init__(self, *args, **kwargs):
super(ProductSpider, self).__init__(*args, **kwargs)
self.uid_list = self.require_arg('uid', *args, **kwargs)
self.url_template = self.require_crawler_setting('UrlTemplate')
self.start_urls = [re.sub('<<UID>>', a, self.url_template) for a in self.uid_list.split(',')]
def parse(self, response):
#super(ProductSpider, self).parse(response)
item = ReviewerItem()
if response.status != 200:
db_log('url(%s) response code is %d, 200 is expected'%(response.url, response.status),
lv='error', )
item['success'] = False
return item
m = re.search('\/([0-9A-Z]{10,24})(?![0-9A-Z])', response.url)
if not m:
db_log('cannot parse uid from response url: %s'%response.url, lv='error', spider=self.name)
log.msg('cannot parse uid from response url: %s'%response.url, level=log.ERROR)
raise exceptions.CloseSpider('cannot parse uid from response url:%s'%response.url)
uid = m.group(1)
sel = Selector(response)
extractor_list = db.get_page_extractor_list(self.html_page)
if not extractor_list:
db_log(message = 'no extractor for Page=%s, refer to table HtmlExtractor'%self.html_page,
lv = 'fatal',spider = self.name)
extract_result = html_extractor.extract(sel, extractor_list, self.name, uid)
if extract_result['mismatch']:
item['success'] = False
item['message'] = 'some required fields are not extracted correctely due to missing selector, detail is in database'
else:
item['success'] = True
reviewer = extract_result['data']
reviewer[u'UID'] = uid
item['data'] = reviewer
item['debug'] = False
if self.debug:
item['debug'] = True
return item | 0.145267 | 0.050075 |
from string import *
import re
from zapps.rt import *
class CommandParserScanner(Scanner):
patterns = [
('"/"', re.compile('/')),
('[ \t]+', re.compile('[ \t]+')),
('NUMBER', re.compile('[0-9]+')),
('STRING', re.compile('".*"')),
('FLOAT', re.compile('[0-9]+\\.[0-9]+')),
('ID', re.compile('[a-zA-Z]+')),
('END', re.compile('\n')),
('START', re.compile('/')),
('MESSAGE', re.compile('[^/].*')),
('END', re.compile('\n')),
]
def __init__(self, str):
Scanner.__init__(self,None,['[ \t]+'],str)
class CommandParser(Parser):
def arg(self):
_token_ = self._peek('ID', 'NUMBER', 'FLOAT', 'STRING')
if _token_ == 'ID':
ID = self._scan('ID')
return ID
elif _token_ == 'NUMBER':
NUMBER = self._scan('NUMBER')
return atoi(NUMBER)
elif _token_ == 'FLOAT':
FLOAT = self._scan('FLOAT')
return atof(FLOAT)
else:# == 'STRING'
STRING = self._scan('STRING')
return STRING
def parameters(self, PARAMS):
while self._peek('ID', 'NUMBER', 'FLOAT', 'STRING', 'END') != 'END':
arg = self.arg()
PARAMS.append(arg)
def command(self):
self._scan('"/"')
ID = self._scan('ID')
cmd = [ID] ; params = []
parameters = self.parameters(params)
END = self._scan('END')
cmd.append(params); return cmd
def message(self):
MESSAGE = self._scan('MESSAGE')
END = self._scan('END')
return MESSAGE
def input(self):
while 1:
_token_ = self._peek('"/"', 'MESSAGE')
if _token_ == '"/"':
command = self.command()
return command
else:# == 'MESSAGE'
message = self.message()
return message
if 0: break
def parse(rule, text):
P = CommandParser(CommandParserScanner(text))
return wrap_error_reporter(P, rule)
if __name__=='__main__':
while 1:
try: s = raw_input('>>> ')
except EOFError: break
if not s: break
print parse('input', s + "\n")
print 'Bye.' | examples/command.py |
from string import *
import re
from zapps.rt import *
class CommandParserScanner(Scanner):
patterns = [
('"/"', re.compile('/')),
('[ \t]+', re.compile('[ \t]+')),
('NUMBER', re.compile('[0-9]+')),
('STRING', re.compile('".*"')),
('FLOAT', re.compile('[0-9]+\\.[0-9]+')),
('ID', re.compile('[a-zA-Z]+')),
('END', re.compile('\n')),
('START', re.compile('/')),
('MESSAGE', re.compile('[^/].*')),
('END', re.compile('\n')),
]
def __init__(self, str):
Scanner.__init__(self,None,['[ \t]+'],str)
class CommandParser(Parser):
def arg(self):
_token_ = self._peek('ID', 'NUMBER', 'FLOAT', 'STRING')
if _token_ == 'ID':
ID = self._scan('ID')
return ID
elif _token_ == 'NUMBER':
NUMBER = self._scan('NUMBER')
return atoi(NUMBER)
elif _token_ == 'FLOAT':
FLOAT = self._scan('FLOAT')
return atof(FLOAT)
else:# == 'STRING'
STRING = self._scan('STRING')
return STRING
def parameters(self, PARAMS):
while self._peek('ID', 'NUMBER', 'FLOAT', 'STRING', 'END') != 'END':
arg = self.arg()
PARAMS.append(arg)
def command(self):
self._scan('"/"')
ID = self._scan('ID')
cmd = [ID] ; params = []
parameters = self.parameters(params)
END = self._scan('END')
cmd.append(params); return cmd
def message(self):
MESSAGE = self._scan('MESSAGE')
END = self._scan('END')
return MESSAGE
def input(self):
while 1:
_token_ = self._peek('"/"', 'MESSAGE')
if _token_ == '"/"':
command = self.command()
return command
else:# == 'MESSAGE'
message = self.message()
return message
if 0: break
def parse(rule, text):
P = CommandParser(CommandParserScanner(text))
return wrap_error_reporter(P, rule)
if __name__=='__main__':
while 1:
try: s = raw_input('>>> ')
except EOFError: break
if not s: break
print parse('input', s + "\n")
print 'Bye.' | 0.257672 | 0.09556 |
import WelcomeNote
import math
import OLSDims
import mdl
import EnvSettings
from osgeo import osr
import Circ
import os
import ObsData
class dataInput:
ip = mdl.Data()
f=ip.f
AppOLS = OLSDims.AppDim.AppOLS
ToOLS = OLSDims.TODim.ToOLS
AppOLSNAME=OLSDims.AppDim.AppOLSNAME
AppOLSDIMS=OLSDims.AppDim.AppOLSDIMS
TOOLSNAME=OLSDims.TODim.TOOLSNAME
NRunwayInfo=ip.NRunwayInfo
SRunwayInfo=ip.SRunwayInfo
NIns = ip.NIns
if NIns == 'Y':
NPrc=ip.NPrc
if NPrc != 'N':
NBLDist=ip.NBLDist
CN = ip.CN
DayOnly = ip.CN
CL=ip.CL
RED=ip.RED
MTOW5700kg = ip.MTOW5700kg
RPT = ip.RPT
SIns = ip.SIns
if SIns == 'Y':
SPrc=ip.SPrc
if SPrc != 'N':
SBLDist=ip.SBLDist
RPT = ip.RPT
RWY_WID=ip.RWY_WID
RSW=ip.RSW
CodeNo = range(len(AppOLS))
Surfaces = range(len(AppOLS[0]))
ToSurfs = range(len(ToOLS[0]))
NE=ip.NE
SE=ip.SE
NTE=ip.NTE
NTN=ip.NTN
STE=ip.STE
STN=ip.STN
ARP=ip.ARP
SE=ip.SE
NE=ip.NE
zone=ip.zone
KML_NAME=ip.KML_NAME
completeName=ip.completeName
NCLWY=ip.NCLWY
SCLWY=ip.SCLWY
NTOIns=ip.NTOIns
STOIns=ip.STOIns
RwyLen = math.sqrt((NTE-STE)*(NTE-STE) + (NTN-STN)*(NTN-STN))
if CN == 'ALA':
NApOls = []
SApOls = []
else:
if NTOIns == 'N':
if CN == 2:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
else:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if NTOIns == 'Y':
if CN == 2:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
else:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NIns == 'N':
if CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[1][i])
else:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[0][i])
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[2][i])
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if RWY_WID <= 30:
NApOls[3][0] = 90
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if CN == 4:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[3][i])
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if NIns == 'Y' and NPrc == 'N':
if CN == 1 or CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[4][i])
if CN == 3:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[5][i])
if CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[6][i])
if NIns == 'Y' and NPrc == 'Y1':
if CN == 1 or CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[7][i])
NApOls[7][1] = NBLDist
elif CN == 3 or CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[8][i])
if NBLDist <= NApOls[7][1]:
NApOls[7][1] = NBLDist
if NIns == 'Y':
if NPrc == 'Y2' or NPrc == 'Y3':
if CN == 3 or CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[9][i])
if STOIns == 'N':
if CN == 2:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
else:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[2][i])
if SMTOW22700kg == 'N' and DayOnly == 'Y':
SToOls[0] = 90
if STOTurn15d == 'N' and DayOnly == 'Y':
SToOls[3] = 1200
if STOIns == 'Y':
if CN == 2:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
else:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[2][i])
if SIns == 'N':
if CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[1][i])
else:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[0][i])
if CN == 3:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[2][i])
if RWY_WID <= 30:
SApOls[3][0] = 90
if CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[3][i])
if SIns == 'Y' and SPrc == 'N':
if CN == 1 or CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[4][i])
if CN == 3:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[5][i])
if CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[6][i])
if SIns == 'Y' and SPrc == 'Y1':
if CN == 1 or CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[7][i])
SApOls[7][1] = SBLDist
if CN == 3 or CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[8][i])
if SBLDist <= SApOls[7][1]:
SApOls[7][1] = SBLDist
if SIns == 'Y':
if SPrc == 'Y2' or SPrc == 'Y3':
if CN == 3 or CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[9][i])
accur = raw_input("Insert size of surface cells in metres (i.e. enter a, such that cell = a*a): ")
colour = "19ff0011"
string = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<Document>
<name>Points</name>
<Style id="s_ylw-pushpin_hl">
<IconStyle>
<color>ff1e8ff7</color>
<scale>1.2</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle_highlight.png</href>
</Icon>
</IconStyle>
<ListStyle>
</ListStyle>
</Style>
<StyleMap id="m_ylw-pushpin">
<Pair>
<key>normal</key>
<styleUrl>#s_ylw-pushpin</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#s_ylw-pushpin_hl</styleUrl>
</Pair>
</StyleMap>
<Style id="s_ylw-pushpin">
<IconStyle>
<color>ff1e8ff7</color>
<scale>1.2</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png</href>
</Icon>
</IconStyle>
<ListStyle>
</ListStyle>
</Style>
"""
f.write(string)
#ObsData.NthObs(NCLWY)
#ObsData.SthObs(SCLWY)
## ObsData.NthObs2(NCLWY,NToOls)
## ObsData.SthObs2(SCLWY,SToOls)
## ObsData.RwyEnds()
ObsData.DEM()
f.write( '</Document>\n')
f.write( '</kml>\n')
#f.close()
os.startfile(completeName)
print 'OK, done now' | Point_Engine.py | import WelcomeNote
import math
import OLSDims
import mdl
import EnvSettings
from osgeo import osr
import Circ
import os
import ObsData
class dataInput:
ip = mdl.Data()
f=ip.f
AppOLS = OLSDims.AppDim.AppOLS
ToOLS = OLSDims.TODim.ToOLS
AppOLSNAME=OLSDims.AppDim.AppOLSNAME
AppOLSDIMS=OLSDims.AppDim.AppOLSDIMS
TOOLSNAME=OLSDims.TODim.TOOLSNAME
NRunwayInfo=ip.NRunwayInfo
SRunwayInfo=ip.SRunwayInfo
NIns = ip.NIns
if NIns == 'Y':
NPrc=ip.NPrc
if NPrc != 'N':
NBLDist=ip.NBLDist
CN = ip.CN
DayOnly = ip.CN
CL=ip.CL
RED=ip.RED
MTOW5700kg = ip.MTOW5700kg
RPT = ip.RPT
SIns = ip.SIns
if SIns == 'Y':
SPrc=ip.SPrc
if SPrc != 'N':
SBLDist=ip.SBLDist
RPT = ip.RPT
RWY_WID=ip.RWY_WID
RSW=ip.RSW
CodeNo = range(len(AppOLS))
Surfaces = range(len(AppOLS[0]))
ToSurfs = range(len(ToOLS[0]))
NE=ip.NE
SE=ip.SE
NTE=ip.NTE
NTN=ip.NTN
STE=ip.STE
STN=ip.STN
ARP=ip.ARP
SE=ip.SE
NE=ip.NE
zone=ip.zone
KML_NAME=ip.KML_NAME
completeName=ip.completeName
NCLWY=ip.NCLWY
SCLWY=ip.SCLWY
NTOIns=ip.NTOIns
STOIns=ip.STOIns
RwyLen = math.sqrt((NTE-STE)*(NTE-STE) + (NTN-STN)*(NTN-STN))
if CN == 'ALA':
NApOls = []
SApOls = []
else:
if NTOIns == 'N':
if CN == 2:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
else:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if NTOIns == 'Y':
if CN == 2:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[1][i])
else:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
NToOls = []
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NIns == 'N':
if CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[1][i])
else:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[0][i])
for i in ToSurfs:
NToOls.append(ToOLS[0][i])
if CN == 3:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[2][i])
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if RWY_WID <= 30:
NApOls[3][0] = 90
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if CN == 4:
NApOls = []
NToOls = []
for i in Surfaces:
NApOls.append(AppOLS[3][i])
for i in ToSurfs:
NToOls.append(ToOLS[2][i])
if NMTOW22700kg == 'N' and DayOnly == 'Y':
NToOls[0] = 90
if NTOTurn15d == 'N' and DayOnly == 'Y':
NToOls[3] = 1200
if NIns == 'Y' and NPrc == 'N':
if CN == 1 or CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[4][i])
if CN == 3:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[5][i])
if CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[6][i])
if NIns == 'Y' and NPrc == 'Y1':
if CN == 1 or CN == 2:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[7][i])
NApOls[7][1] = NBLDist
elif CN == 3 or CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[8][i])
if NBLDist <= NApOls[7][1]:
NApOls[7][1] = NBLDist
if NIns == 'Y':
if NPrc == 'Y2' or NPrc == 'Y3':
if CN == 3 or CN == 4:
NApOls = []
for i in Surfaces:
NApOls.append(AppOLS[9][i])
if STOIns == 'N':
if CN == 2:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
else:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[2][i])
if SMTOW22700kg == 'N' and DayOnly == 'Y':
SToOls[0] = 90
if STOTurn15d == 'N' and DayOnly == 'Y':
SToOls[3] = 1200
if STOIns == 'Y':
if CN == 2:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[1][i])
else:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[0][i])
if CN == 3 or CN == 4:
SToOls = []
for i in ToSurfs:
SToOls.append(ToOLS[2][i])
if SIns == 'N':
if CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[1][i])
if CN == 1:
if DayOnly == 'N':
if MTOW5700kg == 'Y':
if RPT == 'Y':
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[1][i])
else:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[0][i])
if CN == 3:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[2][i])
if RWY_WID <= 30:
SApOls[3][0] = 90
if CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[3][i])
if SIns == 'Y' and SPrc == 'N':
if CN == 1 or CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[4][i])
if CN == 3:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[5][i])
if CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[6][i])
if SIns == 'Y' and SPrc == 'Y1':
if CN == 1 or CN == 2:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[7][i])
SApOls[7][1] = SBLDist
if CN == 3 or CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[8][i])
if SBLDist <= SApOls[7][1]:
SApOls[7][1] = SBLDist
if SIns == 'Y':
if SPrc == 'Y2' or SPrc == 'Y3':
if CN == 3 or CN == 4:
SApOls = []
for i in Surfaces:
SApOls.append(AppOLS[9][i])
accur = raw_input("Insert size of surface cells in metres (i.e. enter a, such that cell = a*a): ")
colour = "19ff0011"
string = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<Document>
<name>Points</name>
<Style id="s_ylw-pushpin_hl">
<IconStyle>
<color>ff1e8ff7</color>
<scale>1.2</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle_highlight.png</href>
</Icon>
</IconStyle>
<ListStyle>
</ListStyle>
</Style>
<StyleMap id="m_ylw-pushpin">
<Pair>
<key>normal</key>
<styleUrl>#s_ylw-pushpin</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#s_ylw-pushpin_hl</styleUrl>
</Pair>
</StyleMap>
<Style id="s_ylw-pushpin">
<IconStyle>
<color>ff1e8ff7</color>
<scale>1.2</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png</href>
</Icon>
</IconStyle>
<ListStyle>
</ListStyle>
</Style>
"""
f.write(string)
#ObsData.NthObs(NCLWY)
#ObsData.SthObs(SCLWY)
## ObsData.NthObs2(NCLWY,NToOls)
## ObsData.SthObs2(SCLWY,SToOls)
## ObsData.RwyEnds()
ObsData.DEM()
f.write( '</Document>\n')
f.write( '</kml>\n')
#f.close()
os.startfile(completeName)
print 'OK, done now' | 0.035763 | 0.166134 |
import os
from dotenv import load_dotenv
import praw
import json
load_dotenv(verbose=True)
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
USER_AGENT = os.environ.get("USER_AGENT")
USERNAME = os.environ.get("USERNAME")
PASSWORD = os.environ.get("PASSWORD")
def get_json():
"""Load JSON file content if the file exist. Else it returns an empty list.
Returns:
json_content (list): Content of JSON file or [],
"""
try:
with open("reddit-saved.json", "r", encoding='utf-8') as file:
json_content = json.load(file)
print(f"Chargement du fichier JSON.")
except FileNotFoundError:
print("Création du fichier JSON.")
json_content = []
return json_content
def get_entries(reddit, last_saved_id):
"""Return a list of choosen attributes from saved comments/posts.
Args:
reddit (praw.Reddit): PRAW reddit instance,
last_id (str): ID of the last comment/post already in the JSON file,
Returns:
new_data (list[dict]): Comments and posts useful attributes,
"""
new_entry_count = 0
new_data = []
for item in reddit.user.me().saved(limit=None, params={"before": last_saved_id}): # ISSUE: limited to 100 entries with limit=None
data = handle_saved(item)
new_data.append(data)
new_entry_count += 1
print(f"Nombre d'entrées ajoutées: {new_entry_count}")
return new_data
def handle_saved(item):
"""Retrieves interesting attributes depending on whether the entry is a comment or a post.
Args:
item (praw.models.reddit.submission.Submission or
praw.models.reddit.comment.Comment ): Comment or Post Object,
Returns:
data (dict): Dict of attributes,
"""
data = {}
# pprint(vars(item))
data["id"] = item.name
if isinstance(item, praw.models.Submission):
# Posts item.name is t3_<id>
data["permalink"] = item.permalink
data["title"] = item.title
if item.is_self:
data["content"] = item.selftext
else:
data["content"] = item.url
else:
# Comments item.name is t1_<id>
# print("post_author :", item.author)
# item.id and comment's author is in the permalink
data["permalink"] = item.permalink # https://www.reddit.com<permalink>
data["content"] = item.body
return data
def save_json(all_data):
"""Overwrite the JSON file.
Args:
all_data (list[dict]): Content to write,
"""
with open("reddit-saved.json", "w", encoding='utf-8') as file:
json.dump(all_data, file, ensure_ascii=False, indent=4)
def main():
""" main function """
reddit = praw.Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent=USER_AGENT,
username=USERNAME,
password=PASSWORD)
print(f"Utilisateur: {reddit.user.me()}")
print(f"Read-only: {reddit.read_only}")
json_content = get_json()
last_saved_id = json_content[0]["id"] if json_content else None
entries_count = len(json_content) if json_content else 0
print(f"Nombre d'entrées: {entries_count}")
new_data = get_entries(reddit, last_saved_id)
if new_data: save_json(new_data + json_content)
if __name__ == "__main__":
main() | reddit_comments.py | import os
from dotenv import load_dotenv
import praw
import json
load_dotenv(verbose=True)
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
USER_AGENT = os.environ.get("USER_AGENT")
USERNAME = os.environ.get("USERNAME")
PASSWORD = os.environ.get("PASSWORD")
def get_json():
"""Load JSON file content if the file exist. Else it returns an empty list.
Returns:
json_content (list): Content of JSON file or [],
"""
try:
with open("reddit-saved.json", "r", encoding='utf-8') as file:
json_content = json.load(file)
print(f"Chargement du fichier JSON.")
except FileNotFoundError:
print("Création du fichier JSON.")
json_content = []
return json_content
def get_entries(reddit, last_saved_id):
"""Return a list of choosen attributes from saved comments/posts.
Args:
reddit (praw.Reddit): PRAW reddit instance,
last_id (str): ID of the last comment/post already in the JSON file,
Returns:
new_data (list[dict]): Comments and posts useful attributes,
"""
new_entry_count = 0
new_data = []
for item in reddit.user.me().saved(limit=None, params={"before": last_saved_id}): # ISSUE: limited to 100 entries with limit=None
data = handle_saved(item)
new_data.append(data)
new_entry_count += 1
print(f"Nombre d'entrées ajoutées: {new_entry_count}")
return new_data
def handle_saved(item):
"""Retrieves interesting attributes depending on whether the entry is a comment or a post.
Args:
item (praw.models.reddit.submission.Submission or
praw.models.reddit.comment.Comment ): Comment or Post Object,
Returns:
data (dict): Dict of attributes,
"""
data = {}
# pprint(vars(item))
data["id"] = item.name
if isinstance(item, praw.models.Submission):
# Posts item.name is t3_<id>
data["permalink"] = item.permalink
data["title"] = item.title
if item.is_self:
data["content"] = item.selftext
else:
data["content"] = item.url
else:
# Comments item.name is t1_<id>
# print("post_author :", item.author)
# item.id and comment's author is in the permalink
data["permalink"] = item.permalink # https://www.reddit.com<permalink>
data["content"] = item.body
return data
def save_json(all_data):
"""Overwrite the JSON file.
Args:
all_data (list[dict]): Content to write,
"""
with open("reddit-saved.json", "w", encoding='utf-8') as file:
json.dump(all_data, file, ensure_ascii=False, indent=4)
def main():
""" main function """
reddit = praw.Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent=USER_AGENT,
username=USERNAME,
password=PASSWORD)
print(f"Utilisateur: {reddit.user.me()}")
print(f"Read-only: {reddit.read_only}")
json_content = get_json()
last_saved_id = json_content[0]["id"] if json_content else None
entries_count = len(json_content) if json_content else 0
print(f"Nombre d'entrées: {entries_count}")
new_data = get_entries(reddit, last_saved_id)
if new_data: save_json(new_data + json_content)
if __name__ == "__main__":
main() | 0.405096 | 0.079424 |
import json
import logging
import time
from datetime import datetime
from tempfile import SpooledTemporaryFile
from typing import List, Union, Dict, Any, Optional
import pandas
import requests
from fastapi.encoders import jsonable_encoder
from pytz import timezone
from sentry_sdk import capture_exception
from sqlalchemy import and_, not_, func
from sqlalchemy.orm import Session, Query
from app import crud, models, schemas
from app.core.config import settings
from app.crud.base import CRUDBase
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class CRUDFact(CRUDBase[models.Fact, schemas.FactCreate, schemas.FactUpdate]):
def get(self, db: Session, id: Any) -> Optional[models.Fact]:
db_obj = db.query(self.model).filter(models.Fact.fact_id == id).first()
return db_obj
def get_schema_with_perm(self, db_obj: models.Fact, user: models.User):
schema = schemas.Fact.from_orm(db_obj)
schema.permission = db_obj.permissions(user)
schema.marked = db_obj.is_marked(user)
schema.suspended = db_obj.is_suspended(user)
schema.reports = db_obj.find_reports(user)
return schema
def create_with_owner(
self, db: Session, *, obj_in: schemas.FactCreate, user: models.User
) -> models.Fact:
obj_in_data = jsonable_encoder(obj_in)
now = datetime.now(timezone('UTC')).isoformat()
db_obj = self.model(**obj_in_data,
user_id=user.id,
create_date=now,
update_date=now)
db.add(db_obj)
db.commit()
return db_obj
def get_multi_by_owner(
self,
db: Session,
*,
user: Optional[models.User] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
) -> List[models.Fact]:
query = db.query(self.model)
if user:
query = query.filter(models.Fact.user_id == user.id)
if skip:
query = query.offset(skip)
if limit:
query = query.offset(limit)
return query.all()
def update(
self, db: Session, *, db_obj: models.Fact, obj_in: Union[schemas.FactUpdate, Dict[str, Any]]
) -> models.Fact:
update_data = obj_in.dict(exclude_unset=True)
update_data["update_date"] = datetime.now(timezone('UTC')).isoformat()
return super().update(db, db_obj=db_obj, obj_in=update_data)
def remove(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
delete = models.Deleted(deleter=user, deleted_fact=db_obj, date_deleted=now)
db.add(delete)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.delete,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def suspend(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
suspend = models.Suspended(suspender=user,
suspended_fact=db_obj,
date_suspended=now)
db.add(suspend)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.suspend,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def report(
self, db: Session, *, db_obj: models.Fact, user: models.User, suggestion: schemas.FactToReport
) -> models.Fact:
now = datetime.now(timezone('UTC'))
report = models.Reported(reporter=user,
reported_fact=db_obj,
date_reported=datetime.now(timezone('UTC')),
suggestion=suggestion)
db.add(report)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def mark(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
mark = models.Marked(marker=user, marked_fact=db_obj, date_marked=now)
db.add(mark)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.mark,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_remove(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Deleted).filter(
and_(models.Deleted.fact_id == db_obj.fact_id, models.Deleted.user_id == user.id)).delete(
synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_delete,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_suspend(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Suspended) \
.filter(and_(models.Suspended.suspended_fact == db_obj,
models.Suspended.suspender == user)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_suspend,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_report(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Reported) \
.filter(and_(models.Reported.fact_id == db_obj.fact_id,
models.Reported.user_id == user.id)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def resolve_report(
self, db: Session, *, user: models.User, db_obj: models.Fact
) -> models.Fact:
db.query(models.Reported) \
.filter(models.Reported.fact_id == db_obj.fact_id).delete(
synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.resolve_report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_mark(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Marked) \
.filter(and_(models.Marked.marked_fact == db_obj,
models.Marked.marker == user)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_mark,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def build_facts_query(self, db: Session, *, user: models.User, filters: schemas.FactSearch = schemas.FactSearch()):
visible_decks = (
db.query(models.Deck.id).join(models.User_Deck).filter(models.User_Deck.owner_id == user.id).subquery())
user_facts = (db.query(models.Fact).join(visible_decks, models.Fact.deck_id == visible_decks.c.id).filter(
models.Fact.user_id == user.id))
deck_owners = (db.query(models.User_Deck.deck_id, models.User_Deck.owner_id)
.outerjoin(visible_decks)
.filter(models.User_Deck.permissions == schemas.Permission.owner).subquery())
filtered_facts = (db.query(models.Fact)
.join(visible_decks, models.Fact.deck_id == visible_decks.c.id)
.join(deck_owners,
and_(models.Fact.deck_id == deck_owners.c.deck_id,
models.Fact.user_id == deck_owners.c.owner_id)))
facts_query = (user_facts.union(filtered_facts))
# Don't allow Jeopardy facts
facts_query = facts_query.filter(models.Fact.deck_id != 2)
if filters.studyable:
facts_query = (facts_query
.outerjoin(models.Deleted,
and_(models.Fact.fact_id == models.Deleted.fact_id,
models.Deleted.user_id == user.id))
.filter(models.Deleted.user_id == None)
.outerjoin(models.Reported,
and_(models.Fact.fact_id == models.Reported.fact_id,
models.Reported.user_id == user.id)
)
.filter(models.Reported.user_id == None)
.outerjoin(models.Suspended,
and_(models.Fact.fact_id == models.Suspended.fact_id,
models.Suspended.user_id == user.id)
)
.filter(models.Suspended.user_id == None))
else:
facts_query = (facts_query
.outerjoin(models.Deleted,
and_(models.Fact.fact_id == models.Deleted.fact_id,
models.Deleted.user_id == user.id))
.filter(models.Deleted.user_id == None))
if filters.suspended is not None:
if filters.suspended:
facts_query = facts_query.join(models.Suspended).filter(models.Suspended.user_id == user.id)
else:
facts_query = (facts_query
.outerjoin(models.Suspended,
and_(models.Fact.fact_id == models.Suspended.fact_id,
models.Suspended.user_id == user.id)
)
.filter(models.Suspended.user_id == None))
if filters.reported is not None:
if filters.reported:
facts_query = facts_query.join(models.Reported)
if not user.is_superuser:
facts_query = facts_query.filter(models.Reported.user_id == user.id)
else:
facts_query = (facts_query
.outerjoin(models.Reported,
and_(models.Fact.fact_id == models.Reported.fact_id,
models.Reported.user_id == user.id)
)
.filter(models.Reported.user_id == None))
if filters.all:
facts_query = facts_query.filter(
models.Fact.__ts_vector__.op('@@')(func.plainto_tsquery('english', filters.all)))
if filters.text:
facts_query = facts_query.filter(models.Fact.text.ilike(filters.text))
if filters.answer:
facts_query = facts_query.filter(models.Fact.answer.ilike(filters.answer))
if filters.category:
facts_query = facts_query.filter(models.Fact.category.ilike(filters.category))
if filters.identifier:
facts_query = facts_query.filter(models.Fact.identifier.ilike(filters.identifier))
if filters.deck_ids:
facts_query = facts_query.filter(models.Fact.deck_id.in_(filters.deck_ids))
if filters.deck_id:
facts_query = facts_query.filter(models.Fact.deck_id == filters.deck_id)
if filters.marked is not None:
if filters.marked:
facts_query = facts_query.filter(models.Fact.markers.any(id=user.id))
else:
facts_query = facts_query.filter(not_(models.Fact.markers.any(id=user.id)))
if filters.randomize:
facts_query = facts_query.order_by(func.random())
return facts_query
def count_eligible_facts(
self, query: Query
) -> int:
begin_overall_start = time.time()
facts = query.distinct().count()
overall_end_time = time.time()
overall_total_time = overall_end_time - begin_overall_start
logger.info("overall time count: " + str(overall_total_time))
return facts
def get_eligible_facts(
self, query: Query, skip: int = None, limit: int = None
) -> List[models.Fact]:
begin_overall_start = time.time()
if skip:
query = query.offset(skip)
if limit:
query = query.limit(limit)
facts = query.all()
overall_end_time = time.time()
overall_total_time = overall_end_time - begin_overall_start
logger.info("overall time facts: " + str(overall_total_time))
return facts
def get_study_set(
self,
db: Session,
*,
user: models.User,
deck_ids: List[int] = None,
return_limit: Optional[int] = None,
send_limit: Optional[int] = 300,
) -> Union[List[schemas.Fact], requests.exceptions.RequestException, json.decoder.JSONDecodeError]:
filters = schemas.FactSearch(deck_ids=deck_ids, limit=send_limit, randomize=True, studyable=True)
query = crud.fact.build_facts_query(db=db, user=user, filters=filters)
eligible_facts = self.get_eligible_facts(query=query, limit=send_limit)
if not eligible_facts:
return []
karl_list = []
karl_list_start = time.time()
for each_card in eligible_facts:
karl_list.append(schemas.KarlFact(
text=each_card.text,
answer=each_card.answer,
category=each_card.category,
deck_name=each_card.deck.title,
deck_id=each_card.deck_id,
user_id=user.id,
fact_id=each_card.fact_id,
repetition_model=user.repetition_model,
env=settings.ENVIRONMENT
).dict())
eligible_fact_time = time.time() - karl_list_start
logger.info("eligible fact time: " + str(eligible_fact_time))
karl_query_start = time.time()
try:
scheduler_response = requests.post(settings.INTERFACE + "api/karl/schedule", json=karl_list)
response_json = scheduler_response.json()
card_order = response_json["order"]
rationale = response_json["rationale"]
debug_id = response_json["debug_id"]
query_time = time.time() - karl_query_start
logger.info(scheduler_response.request)
logger.info("query time: " + str(query_time))
facts = []
if rationale != "<p>no fact received</p>":
reordered_karl_list = [karl_list[x] for x in card_order]
if return_limit:
for _, each_karl_fact in zip(range(return_limit), reordered_karl_list):
retrieved_fact = self.get(db=db, id=int(each_karl_fact["fact_id"]))
fact_schema = self.get_schema_with_perm(db_obj=retrieved_fact, user=user)
fact_schema.rationale = rationale
fact_schema.debug_id = debug_id
if retrieved_fact:
fact_schema.marked = True if user in retrieved_fact.markers else False
facts.append(fact_schema)
else:
for each_karl_fact in reordered_karl_list:
retrieved_fact = self.get(db=db, id=int(each_karl_fact["fact_id"]))
fact_schema = self.get_schema_with_perm(db_obj=retrieved_fact, user=user)
fact_schema.rationale = rationale
# MARK: maybe not the most efficient solution for determining if user has marked a fact
if retrieved_fact:
fact_schema.marked = retrieved_fact.is_marked(user)
facts.append(fact_schema)
details = {
"study_system": user.repetition_model,
"first_fact": facts[0] if len(facts) != 0 else "empty",
"eligible_fact_time": query_time,
"scheduler_query_time": eligible_fact_time,
"debug_id": debug_id,
}
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
log_type=schemas.Log.get_facts,
details=details
)
crud.history.create(db=db, obj_in=history_in)
return facts
except requests.exceptions.RequestException as e:
capture_exception(e)
return e
except json.decoder.JSONDecodeError as e:
capture_exception(e)
return e
def update_schedule(
self, db: Session, *, user: models.User, db_obj: models.Fact, schedule: schemas.Schedule
) -> Union[bool, requests.exceptions.RequestException, json.decoder.JSONDecodeError]:
try:
response = schedule.response
date_studied = datetime.now(timezone('UTC')).isoformat()
details = {
"study_system": user.repetition_model,
"typed": schedule.typed,
"response": schedule.response,
"debug_id": schedule.debug_id,
}
if schedule.elapsed_seconds_text:
details["elapsed_seconds_text"] = schedule.elapsed_seconds_text
details["elapsed_seconds_answer"] = schedule.elapsed_seconds_answer
else:
details["elapsed_milliseconds_text"] = schedule.elapsed_milliseconds_text
details["elapsed_milliseconds_answer"] = schedule.elapsed_milliseconds_answer
history_in = schemas.HistoryCreate(
time=date_studied,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.study,
details=details
)
history = crud.history.create(db=db, obj_in=history_in)
payload_update = [schemas.KarlFactUpdate(
text=db_obj.text,
user_id=user.id,
repetition_model=user.repetition_model,
fact_id=db_obj.fact_id,
history_id=history.id,
category=db_obj.category,
deck_name=db_obj.deck.title,
deck_id=db_obj.deck_id,
answer=db_obj.answer,
env=settings.ENVIRONMENT,
elapsed_seconds_text=schedule.elapsed_seconds_text,
elapsed_seconds_answer=schedule.elapsed_seconds_answer,
elapsed_milliseconds_text=schedule.elapsed_milliseconds_text,
elapsed_milliseconds_answer=schedule.elapsed_milliseconds_answer,
label=response,
debug_id=schedule.debug_id).dict(exclude_unset=True)]
logger.info(payload_update[0])
request = requests.post(settings.INTERFACE + "api/karl/update", json=payload_update)
logger.info(request.request)
if 200 <= request.status_code < 300:
return True
else:
return False
except requests.exceptions.RequestException as e:
capture_exception(e)
return e
except json.decoder.JSONDecodeError as e:
capture_exception(e)
return e
def load_json_facts(self, db: Session, file: SpooledTemporaryFile, user: models.User) -> str:
count = 0
json_data = json.load(file)
for fact_obj in json_data:
self.create_fact(db, fact_obj, user, False)
count += 1
logger.info(f"{count} facts loaded from txt file")
def load_txt_facts(self, db: Session, file: SpooledTemporaryFile, user: models.User,
props: schemas.FileProps) -> str:
count = 0
with file as f:
df = pandas.read_csv(f, sep=props.delimeter, names=props.headers, index_col=False)
for index, fact_obj in df.iterrows():
if schemas.Field.deck in props.headers and not pandas.isna(fact_obj[schemas.Field.deck]):
deck_id = crud.deck.find_or_create(db, proposed_deck=fact_obj["deck"], user=user).id
else:
deck_id = props.default_deck.id
fact_in = schemas.FactCreate(
text=fact_obj[schemas.Field.text],
answer=fact_obj[schemas.Field.answer],
deck_id=deck_id,
answer_lines=[fact_obj[schemas.Field.answer]],
extra={"type": "uploaded"}
)
if schemas.Field.identifier in props.headers and not pandas.isna(fact_obj[schemas.Field.identifier]):
fact_in.identifier = fact_obj[schemas.Field.identifier]
if schemas.Field.category in props.headers and not pandas.isna(fact_obj[schemas.Field.category]):
fact_in.identifier = fact_obj[schemas.Field.category]
crud.fact.create_with_owner(db, obj_in=fact_in, user=user)
count += 1
logger.info(f"{count} facts loaded from txt file")
def create_fact(self, db: Session, fact_obj: Any, user: models.User, public: bool):
deck = crud.deck.find_or_create(db, proposed_deck=fact_obj["deck"], user=user, public=public)
fact_in = schemas.FactCreate(
text=fact_obj["text"],
answer=fact_obj["answer"],
deck_id=deck.id,
answer_lines=fact_obj["answer_lines"],
identifier=fact_obj["identifier"],
category=fact_obj["category"],
extra=fact_obj["extra"]
)
crud.fact.create_with_owner(db, obj_in=fact_in, user=user)
fact = CRUDFact(models.Fact) | backend/app/app/crud/crud_fact.py | import json
import logging
import time
from datetime import datetime
from tempfile import SpooledTemporaryFile
from typing import List, Union, Dict, Any, Optional
import pandas
import requests
from fastapi.encoders import jsonable_encoder
from pytz import timezone
from sentry_sdk import capture_exception
from sqlalchemy import and_, not_, func
from sqlalchemy.orm import Session, Query
from app import crud, models, schemas
from app.core.config import settings
from app.crud.base import CRUDBase
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class CRUDFact(CRUDBase[models.Fact, schemas.FactCreate, schemas.FactUpdate]):
def get(self, db: Session, id: Any) -> Optional[models.Fact]:
db_obj = db.query(self.model).filter(models.Fact.fact_id == id).first()
return db_obj
def get_schema_with_perm(self, db_obj: models.Fact, user: models.User):
schema = schemas.Fact.from_orm(db_obj)
schema.permission = db_obj.permissions(user)
schema.marked = db_obj.is_marked(user)
schema.suspended = db_obj.is_suspended(user)
schema.reports = db_obj.find_reports(user)
return schema
def create_with_owner(
self, db: Session, *, obj_in: schemas.FactCreate, user: models.User
) -> models.Fact:
obj_in_data = jsonable_encoder(obj_in)
now = datetime.now(timezone('UTC')).isoformat()
db_obj = self.model(**obj_in_data,
user_id=user.id,
create_date=now,
update_date=now)
db.add(db_obj)
db.commit()
return db_obj
def get_multi_by_owner(
self,
db: Session,
*,
user: Optional[models.User] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
) -> List[models.Fact]:
query = db.query(self.model)
if user:
query = query.filter(models.Fact.user_id == user.id)
if skip:
query = query.offset(skip)
if limit:
query = query.offset(limit)
return query.all()
def update(
self, db: Session, *, db_obj: models.Fact, obj_in: Union[schemas.FactUpdate, Dict[str, Any]]
) -> models.Fact:
update_data = obj_in.dict(exclude_unset=True)
update_data["update_date"] = datetime.now(timezone('UTC')).isoformat()
return super().update(db, db_obj=db_obj, obj_in=update_data)
def remove(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
delete = models.Deleted(deleter=user, deleted_fact=db_obj, date_deleted=now)
db.add(delete)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.delete,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def suspend(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
suspend = models.Suspended(suspender=user,
suspended_fact=db_obj,
date_suspended=now)
db.add(suspend)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.suspend,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def report(
self, db: Session, *, db_obj: models.Fact, user: models.User, suggestion: schemas.FactToReport
) -> models.Fact:
now = datetime.now(timezone('UTC'))
report = models.Reported(reporter=user,
reported_fact=db_obj,
date_reported=datetime.now(timezone('UTC')),
suggestion=suggestion)
db.add(report)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def mark(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
now = datetime.now(timezone('UTC'))
mark = models.Marked(marker=user, marked_fact=db_obj, date_marked=now)
db.add(mark)
db.commit()
history_in = schemas.HistoryCreate(
time=now,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.mark,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_remove(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Deleted).filter(
and_(models.Deleted.fact_id == db_obj.fact_id, models.Deleted.user_id == user.id)).delete(
synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_delete,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_suspend(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Suspended) \
.filter(and_(models.Suspended.suspended_fact == db_obj,
models.Suspended.suspender == user)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_suspend,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_report(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Reported) \
.filter(and_(models.Reported.fact_id == db_obj.fact_id,
models.Reported.user_id == user.id)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def resolve_report(
self, db: Session, *, user: models.User, db_obj: models.Fact
) -> models.Fact:
db.query(models.Reported) \
.filter(models.Reported.fact_id == db_obj.fact_id).delete(
synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.resolve_report,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def undo_mark(
self, db: Session, *, db_obj: models.Fact, user: models.User
) -> models.Fact:
db.query(models.Marked) \
.filter(and_(models.Marked.marked_fact == db_obj,
models.Marked.marker == user)).delete(synchronize_session=False)
db.commit()
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.undo_mark,
details={"study_system": user.repetition_model}
)
crud.history.create(db=db, obj_in=history_in)
return db_obj
def build_facts_query(self, db: Session, *, user: models.User, filters: schemas.FactSearch = schemas.FactSearch()):
visible_decks = (
db.query(models.Deck.id).join(models.User_Deck).filter(models.User_Deck.owner_id == user.id).subquery())
user_facts = (db.query(models.Fact).join(visible_decks, models.Fact.deck_id == visible_decks.c.id).filter(
models.Fact.user_id == user.id))
deck_owners = (db.query(models.User_Deck.deck_id, models.User_Deck.owner_id)
.outerjoin(visible_decks)
.filter(models.User_Deck.permissions == schemas.Permission.owner).subquery())
filtered_facts = (db.query(models.Fact)
.join(visible_decks, models.Fact.deck_id == visible_decks.c.id)
.join(deck_owners,
and_(models.Fact.deck_id == deck_owners.c.deck_id,
models.Fact.user_id == deck_owners.c.owner_id)))
facts_query = (user_facts.union(filtered_facts))
# Don't allow Jeopardy facts
facts_query = facts_query.filter(models.Fact.deck_id != 2)
if filters.studyable:
facts_query = (facts_query
.outerjoin(models.Deleted,
and_(models.Fact.fact_id == models.Deleted.fact_id,
models.Deleted.user_id == user.id))
.filter(models.Deleted.user_id == None)
.outerjoin(models.Reported,
and_(models.Fact.fact_id == models.Reported.fact_id,
models.Reported.user_id == user.id)
)
.filter(models.Reported.user_id == None)
.outerjoin(models.Suspended,
and_(models.Fact.fact_id == models.Suspended.fact_id,
models.Suspended.user_id == user.id)
)
.filter(models.Suspended.user_id == None))
else:
facts_query = (facts_query
.outerjoin(models.Deleted,
and_(models.Fact.fact_id == models.Deleted.fact_id,
models.Deleted.user_id == user.id))
.filter(models.Deleted.user_id == None))
if filters.suspended is not None:
if filters.suspended:
facts_query = facts_query.join(models.Suspended).filter(models.Suspended.user_id == user.id)
else:
facts_query = (facts_query
.outerjoin(models.Suspended,
and_(models.Fact.fact_id == models.Suspended.fact_id,
models.Suspended.user_id == user.id)
)
.filter(models.Suspended.user_id == None))
if filters.reported is not None:
if filters.reported:
facts_query = facts_query.join(models.Reported)
if not user.is_superuser:
facts_query = facts_query.filter(models.Reported.user_id == user.id)
else:
facts_query = (facts_query
.outerjoin(models.Reported,
and_(models.Fact.fact_id == models.Reported.fact_id,
models.Reported.user_id == user.id)
)
.filter(models.Reported.user_id == None))
if filters.all:
facts_query = facts_query.filter(
models.Fact.__ts_vector__.op('@@')(func.plainto_tsquery('english', filters.all)))
if filters.text:
facts_query = facts_query.filter(models.Fact.text.ilike(filters.text))
if filters.answer:
facts_query = facts_query.filter(models.Fact.answer.ilike(filters.answer))
if filters.category:
facts_query = facts_query.filter(models.Fact.category.ilike(filters.category))
if filters.identifier:
facts_query = facts_query.filter(models.Fact.identifier.ilike(filters.identifier))
if filters.deck_ids:
facts_query = facts_query.filter(models.Fact.deck_id.in_(filters.deck_ids))
if filters.deck_id:
facts_query = facts_query.filter(models.Fact.deck_id == filters.deck_id)
if filters.marked is not None:
if filters.marked:
facts_query = facts_query.filter(models.Fact.markers.any(id=user.id))
else:
facts_query = facts_query.filter(not_(models.Fact.markers.any(id=user.id)))
if filters.randomize:
facts_query = facts_query.order_by(func.random())
return facts_query
def count_eligible_facts(
self, query: Query
) -> int:
begin_overall_start = time.time()
facts = query.distinct().count()
overall_end_time = time.time()
overall_total_time = overall_end_time - begin_overall_start
logger.info("overall time count: " + str(overall_total_time))
return facts
def get_eligible_facts(
self, query: Query, skip: int = None, limit: int = None
) -> List[models.Fact]:
begin_overall_start = time.time()
if skip:
query = query.offset(skip)
if limit:
query = query.limit(limit)
facts = query.all()
overall_end_time = time.time()
overall_total_time = overall_end_time - begin_overall_start
logger.info("overall time facts: " + str(overall_total_time))
return facts
def get_study_set(
self,
db: Session,
*,
user: models.User,
deck_ids: List[int] = None,
return_limit: Optional[int] = None,
send_limit: Optional[int] = 300,
) -> Union[List[schemas.Fact], requests.exceptions.RequestException, json.decoder.JSONDecodeError]:
filters = schemas.FactSearch(deck_ids=deck_ids, limit=send_limit, randomize=True, studyable=True)
query = crud.fact.build_facts_query(db=db, user=user, filters=filters)
eligible_facts = self.get_eligible_facts(query=query, limit=send_limit)
if not eligible_facts:
return []
karl_list = []
karl_list_start = time.time()
for each_card in eligible_facts:
karl_list.append(schemas.KarlFact(
text=each_card.text,
answer=each_card.answer,
category=each_card.category,
deck_name=each_card.deck.title,
deck_id=each_card.deck_id,
user_id=user.id,
fact_id=each_card.fact_id,
repetition_model=user.repetition_model,
env=settings.ENVIRONMENT
).dict())
eligible_fact_time = time.time() - karl_list_start
logger.info("eligible fact time: " + str(eligible_fact_time))
karl_query_start = time.time()
try:
scheduler_response = requests.post(settings.INTERFACE + "api/karl/schedule", json=karl_list)
response_json = scheduler_response.json()
card_order = response_json["order"]
rationale = response_json["rationale"]
debug_id = response_json["debug_id"]
query_time = time.time() - karl_query_start
logger.info(scheduler_response.request)
logger.info("query time: " + str(query_time))
facts = []
if rationale != "<p>no fact received</p>":
reordered_karl_list = [karl_list[x] for x in card_order]
if return_limit:
for _, each_karl_fact in zip(range(return_limit), reordered_karl_list):
retrieved_fact = self.get(db=db, id=int(each_karl_fact["fact_id"]))
fact_schema = self.get_schema_with_perm(db_obj=retrieved_fact, user=user)
fact_schema.rationale = rationale
fact_schema.debug_id = debug_id
if retrieved_fact:
fact_schema.marked = True if user in retrieved_fact.markers else False
facts.append(fact_schema)
else:
for each_karl_fact in reordered_karl_list:
retrieved_fact = self.get(db=db, id=int(each_karl_fact["fact_id"]))
fact_schema = self.get_schema_with_perm(db_obj=retrieved_fact, user=user)
fact_schema.rationale = rationale
# MARK: maybe not the most efficient solution for determining if user has marked a fact
if retrieved_fact:
fact_schema.marked = retrieved_fact.is_marked(user)
facts.append(fact_schema)
details = {
"study_system": user.repetition_model,
"first_fact": facts[0] if len(facts) != 0 else "empty",
"eligible_fact_time": query_time,
"scheduler_query_time": eligible_fact_time,
"debug_id": debug_id,
}
history_in = schemas.HistoryCreate(
time=datetime.now(timezone('UTC')).isoformat(),
user_id=user.id,
log_type=schemas.Log.get_facts,
details=details
)
crud.history.create(db=db, obj_in=history_in)
return facts
except requests.exceptions.RequestException as e:
capture_exception(e)
return e
except json.decoder.JSONDecodeError as e:
capture_exception(e)
return e
def update_schedule(
self, db: Session, *, user: models.User, db_obj: models.Fact, schedule: schemas.Schedule
) -> Union[bool, requests.exceptions.RequestException, json.decoder.JSONDecodeError]:
try:
response = schedule.response
date_studied = datetime.now(timezone('UTC')).isoformat()
details = {
"study_system": user.repetition_model,
"typed": schedule.typed,
"response": schedule.response,
"debug_id": schedule.debug_id,
}
if schedule.elapsed_seconds_text:
details["elapsed_seconds_text"] = schedule.elapsed_seconds_text
details["elapsed_seconds_answer"] = schedule.elapsed_seconds_answer
else:
details["elapsed_milliseconds_text"] = schedule.elapsed_milliseconds_text
details["elapsed_milliseconds_answer"] = schedule.elapsed_milliseconds_answer
history_in = schemas.HistoryCreate(
time=date_studied,
user_id=user.id,
fact_id=db_obj.fact_id,
log_type=schemas.Log.study,
details=details
)
history = crud.history.create(db=db, obj_in=history_in)
payload_update = [schemas.KarlFactUpdate(
text=db_obj.text,
user_id=user.id,
repetition_model=user.repetition_model,
fact_id=db_obj.fact_id,
history_id=history.id,
category=db_obj.category,
deck_name=db_obj.deck.title,
deck_id=db_obj.deck_id,
answer=db_obj.answer,
env=settings.ENVIRONMENT,
elapsed_seconds_text=schedule.elapsed_seconds_text,
elapsed_seconds_answer=schedule.elapsed_seconds_answer,
elapsed_milliseconds_text=schedule.elapsed_milliseconds_text,
elapsed_milliseconds_answer=schedule.elapsed_milliseconds_answer,
label=response,
debug_id=schedule.debug_id).dict(exclude_unset=True)]
logger.info(payload_update[0])
request = requests.post(settings.INTERFACE + "api/karl/update", json=payload_update)
logger.info(request.request)
if 200 <= request.status_code < 300:
return True
else:
return False
except requests.exceptions.RequestException as e:
capture_exception(e)
return e
except json.decoder.JSONDecodeError as e:
capture_exception(e)
return e
def load_json_facts(self, db: Session, file: SpooledTemporaryFile, user: models.User) -> str:
count = 0
json_data = json.load(file)
for fact_obj in json_data:
self.create_fact(db, fact_obj, user, False)
count += 1
logger.info(f"{count} facts loaded from txt file")
def load_txt_facts(self, db: Session, file: SpooledTemporaryFile, user: models.User,
props: schemas.FileProps) -> str:
count = 0
with file as f:
df = pandas.read_csv(f, sep=props.delimeter, names=props.headers, index_col=False)
for index, fact_obj in df.iterrows():
if schemas.Field.deck in props.headers and not pandas.isna(fact_obj[schemas.Field.deck]):
deck_id = crud.deck.find_or_create(db, proposed_deck=fact_obj["deck"], user=user).id
else:
deck_id = props.default_deck.id
fact_in = schemas.FactCreate(
text=fact_obj[schemas.Field.text],
answer=fact_obj[schemas.Field.answer],
deck_id=deck_id,
answer_lines=[fact_obj[schemas.Field.answer]],
extra={"type": "uploaded"}
)
if schemas.Field.identifier in props.headers and not pandas.isna(fact_obj[schemas.Field.identifier]):
fact_in.identifier = fact_obj[schemas.Field.identifier]
if schemas.Field.category in props.headers and not pandas.isna(fact_obj[schemas.Field.category]):
fact_in.identifier = fact_obj[schemas.Field.category]
crud.fact.create_with_owner(db, obj_in=fact_in, user=user)
count += 1
logger.info(f"{count} facts loaded from txt file")
def create_fact(self, db: Session, fact_obj: Any, user: models.User, public: bool):
deck = crud.deck.find_or_create(db, proposed_deck=fact_obj["deck"], user=user, public=public)
fact_in = schemas.FactCreate(
text=fact_obj["text"],
answer=fact_obj["answer"],
deck_id=deck.id,
answer_lines=fact_obj["answer_lines"],
identifier=fact_obj["identifier"],
category=fact_obj["category"],
extra=fact_obj["extra"]
)
crud.fact.create_with_owner(db, obj_in=fact_in, user=user)
fact = CRUDFact(models.Fact) | 0.720368 | 0.075244 |
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage.filters import gaussian
import scipy
import cv2
from scipy import ndimage
import Image_preperation as prep
import FitFunction as fit
import FileManager as fm
import Image_preperation as prep
def calc_mean(points):
size = len(points)
p1 = points[-1]
p2 = points[0]
mean_sum = scipy.spatial.distance.euclidean(p1,p2)
for i in range(size-1):
p1 = points[i]
p2 = points[i+1]
mean_sum += scipy.spatial.distance.euclidean(p1,p2)
return mean_sum / size
def calc_internal2(p1,p2,mean_points):
return np.sum( (p2 - p1)**2 ) / mean_points
def calc_internal(p1,p2,mean_points):
return scipy.spatial.distance.euclidean(p1,p2) / mean_points
def calc_external_img2(img):
median = prep.median_filter(img)
edges = prep.edge_detection_low(median)
return -edges
def calc_external_img(img):
img = np.array(img, dtype=np.int16)
kx = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
Gx = cv2.filter2D(img,-1,kx)
ky = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])
Gy = cv2.filter2D(img,-1,ky)
G = np.sqrt(Gx**2 + Gy**2)
return G
def calc_external(p, external_img):
p = p.astype(int)
max_value = np.abs(np.min(external_img))
return external_img[p[1],p[0]] / max_value
def calc_energy(p1, p2, external_img, mean_points,alpha):
internal = calc_internal(p1,p2, mean_points)
external = calc_external(p1, external_img)
return internal + alpha * external
def get_point_state(point, rad, number, pixel_width):
positive = number // 2
if(positive == 1):
state = (number + 1) / 2
else:
state = -(number / 2)
return fit.get_point_at_distance(point, state, rad)
def unpack(number, back_pointers, angles, points, pixel_width):
size = len(points)
new_points = np.empty((size,2))
new_points[-1] = get_point_state(points[-1],angles[-1], number, pixel_width)
pointer = back_pointers[-1,number]
for i in range(size-2, -1, -1):
new_points[i] = get_point_state(points[i],angles[i], pointer, pixel_width)
pointer = back_pointers[i,pointer]
return new_points
#https://courses.engr.illinois.edu/cs447/fa2017/Slides/Lecture07.pdf
#viterbi algo
def active_contour(points, edge_img, pixel_width, alpha):
size = len(points)
num_states = (2*pixel_width +1)
trellis = np.zeros((size, num_states), dtype=np.float16)
back_pointers = np.zeros((size, num_states), dtype=int)
#external_img = calc_external_img(img)
if(np.dtype('bool') == edge_img.dtype):
external_img = -np.array(edge_img,dtype=np.int8)
else:
external_img = -edge_img
mean_points = calc_mean(points)
#init
trellis[0,:] = np.zeros((num_states))
back_pointers[0,:] = np.zeros((num_states))
angles = get_angles_of(points)
#recursion
for i in range(1, size):
for t in range(num_states):
trellis[i,t] = np.inf
for d in range(num_states):
p1 = get_point_state(points[i-1], angles[i-1], d, pixel_width)
p2 = get_point_state(points[i],angles[i], t, pixel_width)
energy_trans = calc_energy(p1, p2, external_img,mean_points, alpha)
tmp = trellis[i-1,d] + energy_trans
if(tmp < trellis[i,t]):
trellis[i,t] = tmp
back_pointers[i,t] = d
#find best
t_best, vit_min = 0, np.inf
for t in range(num_states):
if(trellis[size-1, t] < vit_min):
t_best = t
vit_min = trellis[size-1, t]
new_points = unpack(t_best, back_pointers,angles, points, pixel_width)
return new_points
def active_contour_loop(points, img, max_loop, pixel_width, alpha):
old_points = points
for i in range(max_loop):
new_points = active_contour(old_points, img, pixel_width, alpha)
if np.array_equal(new_points, old_points):
print(i)
break
#old_points = new_points
head, tail = np.split(new_points, [6])
old_points = np.append(tail, head).reshape(new_points.shape)
return new_points
def resolution_scale(img, points, scale):
new_points = resolution_scale_points(points, scale)
new_img = resolution_downscale_img(img, scale)
return new_img, new_points
def resolution_scale_points(points, scale):
return np.around(points*scale)
def resolution_downscale_img(img, scale):
x, y = img.shape
xn = int(x*scale)
yn = int(y*scale)
return cv2.resize(img, (yn ,xn))
def get_angles_of(points):
size = len(points)
angles = np.zeros(size)
for i in range(size):
if(i==size-1):
p1, p2, p3 = points[i-1], points[i], points[0]
else:
p1, p2, p3 = points[i-1], points[i], points[i+1]
angles[i] = fit.get_normal_angle(p1, p2, p3)
return angles
def show_results():
piece = fm.load_img_piece()
edge_img = prep.canny(piece)
tooth = fm.load_tooth_of_piece(2)
fm.show_with_points(edge_img, tooth)
new_tooth = active_contour(tooth, edge_img, 25, 1)
fm.show_with_points(edge_img, new_tooth)
def show_influence_ext_int():
new_piece, new_tooth = piece, tooth
mean = calc_mean(new_tooth)
ext = calc_external_img(new_piece)
fm.show_with_points(ext, new_tooth[0:2])
print(calc_external(new_tooth[0],ext))
print(calc_internal(new_tooth[0], new_tooth[1], mean))
print(calc_energy(new_tooth[0],new_tooth[1],ext,mean,10))
# In[ ]:
if __name__ == "__main__":
piece = fm.load_img_piece()
tooth = fm.load_tooth_of_piece()
ext = prep.calc_external_img_active_contour(piece)
fm.show_with_points(ext, tooth)
ext2, stooth = fm.resolution_scale(ext, tooth, 1/6)
fm.show_with_points(ext2, stooth) | ActiveFitContour.py |
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage.filters import gaussian
import scipy
import cv2
from scipy import ndimage
import Image_preperation as prep
import FitFunction as fit
import FileManager as fm
import Image_preperation as prep
def calc_mean(points):
size = len(points)
p1 = points[-1]
p2 = points[0]
mean_sum = scipy.spatial.distance.euclidean(p1,p2)
for i in range(size-1):
p1 = points[i]
p2 = points[i+1]
mean_sum += scipy.spatial.distance.euclidean(p1,p2)
return mean_sum / size
def calc_internal2(p1,p2,mean_points):
return np.sum( (p2 - p1)**2 ) / mean_points
def calc_internal(p1,p2,mean_points):
return scipy.spatial.distance.euclidean(p1,p2) / mean_points
def calc_external_img2(img):
median = prep.median_filter(img)
edges = prep.edge_detection_low(median)
return -edges
def calc_external_img(img):
img = np.array(img, dtype=np.int16)
kx = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
Gx = cv2.filter2D(img,-1,kx)
ky = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])
Gy = cv2.filter2D(img,-1,ky)
G = np.sqrt(Gx**2 + Gy**2)
return G
def calc_external(p, external_img):
p = p.astype(int)
max_value = np.abs(np.min(external_img))
return external_img[p[1],p[0]] / max_value
def calc_energy(p1, p2, external_img, mean_points,alpha):
internal = calc_internal(p1,p2, mean_points)
external = calc_external(p1, external_img)
return internal + alpha * external
def get_point_state(point, rad, number, pixel_width):
positive = number // 2
if(positive == 1):
state = (number + 1) / 2
else:
state = -(number / 2)
return fit.get_point_at_distance(point, state, rad)
def unpack(number, back_pointers, angles, points, pixel_width):
size = len(points)
new_points = np.empty((size,2))
new_points[-1] = get_point_state(points[-1],angles[-1], number, pixel_width)
pointer = back_pointers[-1,number]
for i in range(size-2, -1, -1):
new_points[i] = get_point_state(points[i],angles[i], pointer, pixel_width)
pointer = back_pointers[i,pointer]
return new_points
#https://courses.engr.illinois.edu/cs447/fa2017/Slides/Lecture07.pdf
#viterbi algo
def active_contour(points, edge_img, pixel_width, alpha):
size = len(points)
num_states = (2*pixel_width +1)
trellis = np.zeros((size, num_states), dtype=np.float16)
back_pointers = np.zeros((size, num_states), dtype=int)
#external_img = calc_external_img(img)
if(np.dtype('bool') == edge_img.dtype):
external_img = -np.array(edge_img,dtype=np.int8)
else:
external_img = -edge_img
mean_points = calc_mean(points)
#init
trellis[0,:] = np.zeros((num_states))
back_pointers[0,:] = np.zeros((num_states))
angles = get_angles_of(points)
#recursion
for i in range(1, size):
for t in range(num_states):
trellis[i,t] = np.inf
for d in range(num_states):
p1 = get_point_state(points[i-1], angles[i-1], d, pixel_width)
p2 = get_point_state(points[i],angles[i], t, pixel_width)
energy_trans = calc_energy(p1, p2, external_img,mean_points, alpha)
tmp = trellis[i-1,d] + energy_trans
if(tmp < trellis[i,t]):
trellis[i,t] = tmp
back_pointers[i,t] = d
#find best
t_best, vit_min = 0, np.inf
for t in range(num_states):
if(trellis[size-1, t] < vit_min):
t_best = t
vit_min = trellis[size-1, t]
new_points = unpack(t_best, back_pointers,angles, points, pixel_width)
return new_points
def active_contour_loop(points, img, max_loop, pixel_width, alpha):
old_points = points
for i in range(max_loop):
new_points = active_contour(old_points, img, pixel_width, alpha)
if np.array_equal(new_points, old_points):
print(i)
break
#old_points = new_points
head, tail = np.split(new_points, [6])
old_points = np.append(tail, head).reshape(new_points.shape)
return new_points
def resolution_scale(img, points, scale):
new_points = resolution_scale_points(points, scale)
new_img = resolution_downscale_img(img, scale)
return new_img, new_points
def resolution_scale_points(points, scale):
return np.around(points*scale)
def resolution_downscale_img(img, scale):
x, y = img.shape
xn = int(x*scale)
yn = int(y*scale)
return cv2.resize(img, (yn ,xn))
def get_angles_of(points):
size = len(points)
angles = np.zeros(size)
for i in range(size):
if(i==size-1):
p1, p2, p3 = points[i-1], points[i], points[0]
else:
p1, p2, p3 = points[i-1], points[i], points[i+1]
angles[i] = fit.get_normal_angle(p1, p2, p3)
return angles
def show_results():
piece = fm.load_img_piece()
edge_img = prep.canny(piece)
tooth = fm.load_tooth_of_piece(2)
fm.show_with_points(edge_img, tooth)
new_tooth = active_contour(tooth, edge_img, 25, 1)
fm.show_with_points(edge_img, new_tooth)
def show_influence_ext_int():
new_piece, new_tooth = piece, tooth
mean = calc_mean(new_tooth)
ext = calc_external_img(new_piece)
fm.show_with_points(ext, new_tooth[0:2])
print(calc_external(new_tooth[0],ext))
print(calc_internal(new_tooth[0], new_tooth[1], mean))
print(calc_energy(new_tooth[0],new_tooth[1],ext,mean,10))
# In[ ]:
if __name__ == "__main__":
piece = fm.load_img_piece()
tooth = fm.load_tooth_of_piece()
ext = prep.calc_external_img_active_contour(piece)
fm.show_with_points(ext, tooth)
ext2, stooth = fm.resolution_scale(ext, tooth, 1/6)
fm.show_with_points(ext2, stooth) | 0.462716 | 0.567277 |
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
def read_in_edges_SR(f2, threshold = 0):
edges_SR = []
edges_SR_weighted = []
for line in f2:
(uid1, uid2, SR, w) = line.split()
uid1 = int(uid1)
uid2 = int(uid2)
w = int(w)
SR = float(SR)
if w > threshold:
if uid1 != uid2:
edges_SR.append(SR)
for i in range(w):
edges_SR_weighted.append(SR)
return edges_SR, edges_SR_weighted
def read_in_full_SR(f2, threshold = 0):
edges_SR = []
for line in f2:
(uid1, uid2, SR) = line.split()
uid1 = int(uid1)
uid2 = int(uid2)
SR = float(SR)
if uid1 != uid2:
edges_SR.append(SR)
return edges_SR
def plot_pdf(ydata, logscale=False):
plt.clf()
x = np.array(ydata)
#x = np.log(x + 1)
mu = np.mean(x)
sigma = np.std(x)
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=0, histtype='step', color='darkorchid', alpha=0.97)
if logscale:
plt.yscale('log', nonposy='clip')
# add a 'best fit' line
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', label='Normal distribution')
plt.xlabel('SR value')
plt.ylabel('# edges')
plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
if logscale:
logs = "_log"
else:
logs = ""
plt.savefig("histogram_mention_SR" + logs + ".eps", dpi = 550)
#plt.show()
def plot_both_pdf(ydata, ydata2, logscale=False):
#plt.clf()
print 'Plotting both'
x = np.array(ydata)
#x = np.log(x + 1)
mu = np.mean(x)
sigma = np.std(x)
x2 = np.array(ydata2)
#x = np.log(x + 1)
mu2 = np.mean(x2)
sigma2 = np.std(x2)
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
n, bins, patches = plt.hist(x2, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x2 = np.ravel(zip(bins[:-1], bins[:-1]+width))
y2 = np.ravel(zip(nx_frac,nx_frac))
lab1 = 'mention network SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
plt.plot(x,y,linestyle="-",color='darkorchid',label=lab1)
lab2 = 'full network SR: $\mu=' + "{:.3f}".format(mu2) + '$, $\sigma= ' + "{:.3f}".format(sigma2) + '$'
plt.plot(x2,y2,linestyle="-",color='blue',label=lab2)
if logscale:
plt.yscale('log', nonposy='clip')
# add a 'best fit' line
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', label='Normal distribution')
plt.xlabel('SR')
plt.ylabel('p(SR)')
plt.legend()
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
#plt.subplots_adjust(left=0.15)
if logscale:
logs = "_log"
else:
logs = ""
plt.savefig("27_FIN_normed_histograms_mention_and_FULL_SR" + logs + ".eps", dpi = 550)
plt.show()
def plot_pdf_line(ydata):
plt.clf()
x = np.array(ydata)
mu = np.mean(x)
sigma = np.std(x)
num_bins = 100
y,binEdges=np.histogram(ydata,bins=num_bins)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
plt.plot(bincenters,y,'-')
plt.savefig("ALL_SR_line.eps", dpi = 440)
def plot_cum_distr(ydata):
plt.clf()
print len(ydata)
x = np.array(ydata)
mu = np.mean(x)
sigma = np.std(x)
plt.hist(x, 100, normed=0, histtype='step', color='lightsalmon', alpha=0.88, cumulative=1)
plt.xlabel('SR value')
plt.ylabel('SR >= x')
plt.title(r'SR cumulative distribution: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig("ALL_SR_cum.eps", dpi = 440)
#plt.show()
def main_pdf():
IN_DIR = "../../../DATA/CV/"
f_in = "mention_graph_IDs_with_SR_weight.dat"
os.chdir(IN_DIR)
f = open(f_in, 'r')
edges_SR, edges_SR_weighted = read_in_edges_SR(f)
plot_pdf(edges_SR, False)
#plot_cum_distr(edges_SR)
#plot_pdf_line(ydata)
#main_pdf()
def main_full_SR():
IN_DIR = "../../../ALL_SR/"
os.chdir(IN_DIR)
f_in = "SMALL.weighted_edge_list"
#f_in = 'alltest'
f = open(f_in, 'r')
edges_SR = read_in_full_SR(f)
plot_pdf(edges_SR, False)
plot_pdf(edges_SR, True)
plot_cum_distr(edges_SR)
plot_pdf_line(edges_SR)
#main_full_SR()
def main_both_pdf():
IN_DIR = "../../../DATA/CV/"
f_in = "mention_graph_IDs_with_SR_weight.dat"
os.chdir(IN_DIR)
f = open(f_in, 'r')
edges_SR, edges_SR_weighted = read_in_edges_SR(f)
IN_DIR = "../../ALL_SR/"
os.chdir(IN_DIR)
f_in = "SMALL.weighted_edge_list"
#f_in = 'alltest'
f = open(f_in, 'r')
edges_SR_full = read_in_full_SR(f)
plot_both_pdf(edges_SR, edges_SR_full, True)
main_both_pdf() | src_general/SR_pdf.py | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
def read_in_edges_SR(f2, threshold = 0):
edges_SR = []
edges_SR_weighted = []
for line in f2:
(uid1, uid2, SR, w) = line.split()
uid1 = int(uid1)
uid2 = int(uid2)
w = int(w)
SR = float(SR)
if w > threshold:
if uid1 != uid2:
edges_SR.append(SR)
for i in range(w):
edges_SR_weighted.append(SR)
return edges_SR, edges_SR_weighted
def read_in_full_SR(f2, threshold = 0):
edges_SR = []
for line in f2:
(uid1, uid2, SR) = line.split()
uid1 = int(uid1)
uid2 = int(uid2)
SR = float(SR)
if uid1 != uid2:
edges_SR.append(SR)
return edges_SR
def plot_pdf(ydata, logscale=False):
plt.clf()
x = np.array(ydata)
#x = np.log(x + 1)
mu = np.mean(x)
sigma = np.std(x)
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=0, histtype='step', color='darkorchid', alpha=0.97)
if logscale:
plt.yscale('log', nonposy='clip')
# add a 'best fit' line
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', label='Normal distribution')
plt.xlabel('SR value')
plt.ylabel('# edges')
plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
if logscale:
logs = "_log"
else:
logs = ""
plt.savefig("histogram_mention_SR" + logs + ".eps", dpi = 550)
#plt.show()
def plot_both_pdf(ydata, ydata2, logscale=False):
#plt.clf()
print 'Plotting both'
x = np.array(ydata)
#x = np.log(x + 1)
mu = np.mean(x)
sigma = np.std(x)
x2 = np.array(ydata2)
#x = np.log(x + 1)
mu2 = np.mean(x2)
sigma2 = np.std(x2)
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
n, bins, patches = plt.hist(x2, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x2 = np.ravel(zip(bins[:-1], bins[:-1]+width))
y2 = np.ravel(zip(nx_frac,nx_frac))
lab1 = 'mention network SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
plt.plot(x,y,linestyle="-",color='darkorchid',label=lab1)
lab2 = 'full network SR: $\mu=' + "{:.3f}".format(mu2) + '$, $\sigma= ' + "{:.3f}".format(sigma2) + '$'
plt.plot(x2,y2,linestyle="-",color='blue',label=lab2)
if logscale:
plt.yscale('log', nonposy='clip')
# add a 'best fit' line
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', label='Normal distribution')
plt.xlabel('SR')
plt.ylabel('p(SR)')
plt.legend()
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
#plt.subplots_adjust(left=0.15)
if logscale:
logs = "_log"
else:
logs = ""
plt.savefig("27_FIN_normed_histograms_mention_and_FULL_SR" + logs + ".eps", dpi = 550)
plt.show()
def plot_pdf_line(ydata):
plt.clf()
x = np.array(ydata)
mu = np.mean(x)
sigma = np.std(x)
num_bins = 100
y,binEdges=np.histogram(ydata,bins=num_bins)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
plt.plot(bincenters,y,'-')
plt.savefig("ALL_SR_line.eps", dpi = 440)
def plot_cum_distr(ydata):
plt.clf()
print len(ydata)
x = np.array(ydata)
mu = np.mean(x)
sigma = np.std(x)
plt.hist(x, 100, normed=0, histtype='step', color='lightsalmon', alpha=0.88, cumulative=1)
plt.xlabel('SR value')
plt.ylabel('SR >= x')
plt.title(r'SR cumulative distribution: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig("ALL_SR_cum.eps", dpi = 440)
#plt.show()
def main_pdf():
IN_DIR = "../../../DATA/CV/"
f_in = "mention_graph_IDs_with_SR_weight.dat"
os.chdir(IN_DIR)
f = open(f_in, 'r')
edges_SR, edges_SR_weighted = read_in_edges_SR(f)
plot_pdf(edges_SR, False)
#plot_cum_distr(edges_SR)
#plot_pdf_line(ydata)
#main_pdf()
def main_full_SR():
IN_DIR = "../../../ALL_SR/"
os.chdir(IN_DIR)
f_in = "SMALL.weighted_edge_list"
#f_in = 'alltest'
f = open(f_in, 'r')
edges_SR = read_in_full_SR(f)
plot_pdf(edges_SR, False)
plot_pdf(edges_SR, True)
plot_cum_distr(edges_SR)
plot_pdf_line(edges_SR)
#main_full_SR()
def main_both_pdf():
IN_DIR = "../../../DATA/CV/"
f_in = "mention_graph_IDs_with_SR_weight.dat"
os.chdir(IN_DIR)
f = open(f_in, 'r')
edges_SR, edges_SR_weighted = read_in_edges_SR(f)
IN_DIR = "../../ALL_SR/"
os.chdir(IN_DIR)
f_in = "SMALL.weighted_edge_list"
#f_in = 'alltest'
f = open(f_in, 'r')
edges_SR_full = read_in_full_SR(f)
plot_both_pdf(edges_SR, edges_SR_full, True)
main_both_pdf() | 0.477067 | 0.716057 |
import struct
import sys
import os
disk_data = [
0x4A, 0x57, 0x5E, 0x75, 0x38, 0x66, 0x3B, 0x79, 0x3A, 0x60,
0x75, 0x61, 0x26, 0x38, 0x68, 0x5E, 0x28, 0x68, 0x6C, 0x6C,
0x72, 0x76, 0x71, 0x7E, 0x55, 0x47, 0x38, 0x42, 0x7A, 0x4A,
0x6B, 0x4D, 0x4D, 0x65, 0x37, 0x79, 0x45, 0x62, 0x2E, 0x70,
0x4C, 0x63, 0x38, 0x74, 0x79, 0x3D, 0x3D, 0x36, 0x50, 0x62,
0x5F, 0x77, 0x66, 0x55, 0x6E, 0x33, 0x79, 0x6C, 0x56, 0x29,
0x41, 0x36, 0x75, 0x65, 0x6A, 0x2E, 0x4F, 0x68, 0x54, 0x5B,
0x5F, 0x47, 0x6C, 0x76, 0x64, 0x6E, 0x47, 0x60, 0x47, 0x69,
0x71, 0x74, 0x4A, 0x66, 0x63, 0x78, 0x3C, 0x66, 0x5F, 0x5C,
0x3B, 0x7A, 0x55, 0x4B, 0x75, 0x2D, 0x60, 0x3E, 0x25, 0x3A,
0x2A, 0x27, 0x2B, 0x7A, 0x5D, 0x39, 0x48, 0x28, 0x65, 0x62,
0x5A, 0x44, 0x4B, 0x6B, 0x60, 0x37, 0x3B, 0x6F, 0x69, 0x3B,
0x6B, 0x74, 0x77, 0x7C, 0x44, 0x4B, 0x49, 0x77, 0x31, 0x50,
0x52, 0x39, 0x63, 0x3E, 0x50, 0x3F, 0x4C, 0x61, 0x46, 0x5E,
0x71, 0x7E, 0x55, 0x41, 0x2C, 0x63, 0x5E, 0x43, 0x31, 0x37,
0x24, 0x4B, 0x5F, 0x42, 0x39, 0x2A, 0x48, 0x32, 0x47, 0x28,
0x5B, 0x6A, 0x3D, 0x72, 0x79, 0x64, 0x29, 0x25, 0x4B, 0x4C,
0x6A, 0x49, 0x65, 0x4A, 0x23, 0x4C, 0x74, 0x63, 0x54, 0x32,
0x60, 0x41, 0x6B, 0x39, 0x47, 0x4F, 0x69, 0x48, 0x6B, 0x31,
0x72, 0x5E, 0x22, 0x29, 0x4E, 0x3A, 0x5F, 0x23, 0x5E, 0x36,
0x72, 0x37, 0x25, 0x46, 0x25, 0x62, 0x4D, 0x73, 0x32, 0x62,
0x3C, 0x78, 0x70, 0x28, 0x6A, 0x44, 0x7B, 0x40, 0x7D, 0x5C,
0x68, 0x50, 0x64, 0x64, 0x39, 0x58, 0x79, 0x73, 0x57, 0x32,
0x39, 0x58, 0x6B, 0x5E, 0x3B, 0x79, 0x22, 0x66, 0x67, 0x47,
0x33, 0x4B, 0x3B, 0x23, 0x23, 0x2A, 0x46, 0x49, 0x61, 0x64,
0x26, 0x35, 0x2C, 0x27, 0x39, 0x50, 0x62, 0x3A, 0x7A, 0x54,
0x7E, 0x63, 0x4D, 0x69, 0x3F, 0x51, 0x43, 0x65, 0x51, 0x2B,
0x4B, 0x7B, 0x76, 0x5B, 0x30, 0x42, 0x4A, 0x38, 0x54, 0x43,
0x73, 0x6F, 0x76, 0x38, 0x5E, 0x3F, 0x21, 0x2F, 0x7B, 0x24,
0x4B, 0x36, 0x52, 0x46, 0x78, 0x70, 0x32, 0x38, 0x35, 0x7C,
0x4E, 0x7E, 0x5D, 0x22, 0x79, 0x40, 0x26, 0x71, 0x6D, 0x78,
0x23, 0x57, 0x7B, 0x48, 0x5D, 0x23, 0x79, 0x72, 0x38, 0x56,
0x24, 0x27, 0x74, 0x54, 0x31, 0x6F, 0x73, 0x51, 0x3D, 0x24,
0x5E, 0x4A, 0x4C, 0x7B, 0x26, 0x57, 0x36, 0x6B, 0x6A, 0x2B,
0x4B, 0x67, 0x33, 0x72, 0x5D, 0x72, 0x6A, 0x78, 0x23, 0x29,
0x5F, 0x51, 0x39, 0x6A, 0x5D, 0x54, 0x4A, 0x54, 0x4A, 0x5D,
0x27, 0x38, 0x32, 0x5D, 0x79, 0x2D, 0x5B, 0x5D, 0x48, 0x48,
0x23, 0x21, 0x53, 0x4B, 0x60, 0x6C, 0x21, 0x72, 0x42, 0x77,
0x54, 0x60, 0x3C, 0x4B, 0x5A, 0x7E, 0x4B, 0x5D, 0x22, 0x34,
0x48, 0x5C, 0x4A, 0x65, 0x52, 0x43, 0x60, 0x26, 0x7A, 0x45,
0x51, 0x61, 0x35, 0x3A, 0x7E, 0x7B, 0x6A, 0x63, 0x68, 0x53,
0x72, 0x3C, 0x38, 0x4B, 0x6C, 0x3C, 0x30, 0x71, 0x73, 0x60,
0x4F, 0x7D, 0x2B, 0x6B, 0x54, 0x2E, 0x76, 0x53, 0x25, 0x5C,
0x6E, 0x31, 0x2B, 0x37, 0x25, 0x65, 0x52, 0x21, 0x24, 0x67,
0x59, 0x3E, 0x53, 0x5B, 0x4A, 0x6B, 0x4D, 0x36, 0x30, 0x7B,
0x21, 0x21, 0x5D, 0x3C, 0x30, 0x66, 0x68, 0x53, 0x7E, 0x77,
0x33, 0x7B, 0x65, 0x3C, 0x6A, 0x62, 0x50, 0x53, 0x38, 0x55,
0x49, 0x48, 0x70, 0x6A, 0x52, 0x79, 0x29, 0x71, 0x34, 0x3B,
0x6C, 0x72, 0x38, 0x5B, 0x45, 0x7D, 0x70, 0x7B, 0x51, 0x3C,
0x7B, 0x65, 0x2C, 0x41, 0x71, 0x3C, 0x6D, 0x2C, 0x7E, 0x72,
0x61, 0x56, 0x65, 0x48, 0x55, 0x64, 0x64, 0x53, 0x37, 0x31,
0x67, 0x51, 0x57, 0x48, 0x5E, 0x37, 0x26, 0x4A, 0x52, 0x4D,
0x65, 0x25, 0x71, 0x35, 0x2C, 0x71, 0x69, 0x7E, 0x45, 0x70,
0x7A, 0x6D, 0x79, 0x76, 0x71, 0x4D, 0x79, 0x59, 0x51, 0x48,
0x69, 0x4E, 0x60, 0x57, 0x65, 0x2D, 0x2A, 0x78, 0x7C, 0x28,
0x70, 0x32, 0x70, 0x4A, 0x71, 0x60, 0x4A, 0x32, 0x22, 0x2C,
0x7C, 0x63, 0x6D, 0x43, 0x25, 0x74, 0x56, 0x57, 0x3E, 0x50,
0x3E, 0x42, 0x6F, 0x28, 0x3D, 0x7C, 0x56, 0x50, 0x3F, 0x33,
0x22, 0x5C, 0x42, 0x33, 0x3A, 0x57, 0x5F, 0x78, 0x6E, 0x4F,
0x2A, 0x51, 0x37, 0x3E, 0x67, 0x79, 0x5D, 0x4A, 0x5C, 0x46,
0x2E, 0x56, 0x7B, 0x40, 0x30, 0x60, 0x3C, 0x21, 0x26, 0x59,
0x4B, 0x75, 0x34, 0x56, 0x4A, 0x5C, 0x27, 0x32, 0x7E, 0x2B,
0x47, 0x6F, 0x50, 0x2B, 0x41, 0x4B, 0x7B, 0x3B, 0x65, 0x78,
0x69, 0x64, 0x58, 0x4E, 0x53, 0x69, 0x41, 0x5B, 0x44, 0x72,
0x62, 0x57, 0x62, 0x44, 0x44, 0x68, 0x5D, 0x3C, 0x29, 0x56,
0x5D, 0x26, 0x55, 0x39, 0x5B, 0x6A, 0x6C, 0x51, 0x67, 0x6A,
0x5B, 0x43, 0x3F, 0x4E, 0x30, 0x55, 0x47, 0x59, 0x65, 0x3C,
0x2F, 0x23, 0x76, 0x54, 0x72, 0x38, 0x48, 0x59, 0x22, 0x5E,
0x35, 0x7E, 0x65, 0x51, 0x4C, 0x7B, 0x70, 0x5C, 0x3A, 0x3F,
0x41, 0x73, 0x7A, 0x55, 0x52, 0x4F, 0x34, 0x6E, 0x6C, 0x27,
0x23, 0x32, 0x69, 0x21, 0x2E, 0x5B, 0x53, 0x29, 0x50, 0x67,
0x28, 0x6A, 0x61, 0x21, 0x2E, 0x4A, 0x51, 0x6C, 0x34, 0x6B,
0x41, 0x31, 0x6C, 0x38, 0x60, 0x59, 0x3C, 0x3C, 0x2E, 0x79,
0x2A, 0x7D, 0x7E, 0x60, 0x4C, 0x4F, 0x27, 0x42, 0x2C, 0x22,
0x31, 0x6A, 0x40, 0x28, 0x42, 0x52, 0x66, 0x3A, 0x62, 0x62,
0x57, 0x6C, 0x7B, 0x56, 0x41, 0x78, 0x38, 0x57, 0x25, 0x77,
0x47, 0x5F, 0x5C, 0x30, 0x41, 0x42, 0x5E, 0x30, 0x4A, 0x5E,
0x71, 0x3F, 0x60, 0x58, 0x4E, 0x38, 0x7B, 0x21, 0x3C, 0x78,
0x74, 0x55, 0x6D, 0x71, 0x37, 0x36, 0x22, 0x62, 0x7E, 0x6A,
0x76, 0x28, 0x4C, 0x25, 0x6A, 0x48, 0x4E, 0x4A, 0x37, 0x39,
0x2C, 0x5C, 0x25, 0x48, 0x62, 0x7E, 0x2E, 0x51, 0x2A, 0x49,
0x2F, 0x22, 0x78, 0x29, 0x2D, 0x47, 0x5E, 0x2A, 0x52, 0x2E,
0x5F, 0x7C, 0x4C, 0x7D, 0x6B, 0x23, 0x4A, 0x30, 0x7C, 0x31,
0x4E, 0x23, 0x59, 0x37, 0x28, 0x4F, 0x64, 0x6F, 0x66, 0x33,
0x34, 0x4E, 0x52, 0x2D, 0x35, 0x7E, 0x35, 0x67, 0x6C, 0x54,
0x22, 0x49, 0x47, 0x31, 0x5C, 0x62, 0x36, 0x57, 0x39, 0x48,
0x7B, 0x67, 0x6F, 0x6F, 0x5F, 0x4B, 0x58, 0x54, 0x38, 0x5F,
0x23, 0x57, 0x5F, 0x59, 0x58, 0x29, 0x2F, 0x38, 0x62, 0x5F,
0x5F, 0x3E, 0x79, 0x6B, 0x2B, 0x7B, 0x65, 0x3D, 0x5E, 0x5C,
0x44, 0x65, 0x50, 0x5D, 0x78, 0x73, 0x4D, 0x4B, 0x42, 0x5F,
0x39, 0x45, 0x5A, 0x5A, 0x6C, 0x2F, 0x59, 0x54, 0x65, 0x51,
0x5F, 0x40, 0x25, 0x49, 0x27, 0x32, 0x2F, 0x53, 0x76, 0x5F,
0x63, 0x50, 0x5A, 0x21, 0x56, 0x69, 0x27, 0x7B, 0x7E, 0x4C,
0x6D, 0x7B, 0x35, 0x4F, 0x6B, 0x75, 0x24, 0x53, 0x6C, 0x77,
0x46, 0x2E, 0x50, 0x77, 0x78, 0x71, 0x43, 0x47, 0x58, 0x5B,
0x41, 0x60, 0x38, 0x72, 0x72, 0x46, 0x78, 0x46, 0x47, 0x3E,
0x39, 0x58, 0x35, 0x35, 0x4C, 0x6F, 0x50, 0x4C, 0x4E, 0x69,
0x36, 0x42, 0x2D, 0x74, 0x57, 0x69, 0x27, 0x28, 0x6D, 0x7B,
0x58, 0x54, 0x38, 0x66, 0x6C, 0x49, 0x42, 0x70, 0x68, 0x37,
0x5A, 0x22, 0x7A, 0x3D, 0x5F, 0x7D, 0x7D, 0x5D, 0x77, 0x4C,
0x57, 0x5C, 0x43, 0x42, 0x5A, 0x7D, 0x73, 0x58, 0x39, 0x59,
0x6B, 0x38, 0x5A, 0x6C, 0x3C, 0x67, 0x76, 0x4A, 0x32, 0x6C,
0x24, 0x5A, 0x32, 0x61, 0x55, 0x62, 0x7B, 0x3A, 0x51, 0x6D,
0x28, 0x22, 0x4B, 0x6E, 0x7B, 0x3F, 0x3E, 0x7C, 0x3B, 0x79,
0x2C, 0x57, 0x68, 0x7D, 0x4E, 0x61, 0x70, 0x3C, 0x7C, 0x5B,
0x64, 0x50, 0x52, 0x44, 0x40, 0x67, 0x44, 0x29, 0x30, 0x3C,
0x39, 0x70, 0x39, 0x4E, 0x58, 0x45, 0x46, 0x51, 0x76, 0x4C,
0x4C, 0x3A, 0x58, 0x7A, 0x28, 0x69, 0x75, 0x33, 0x4D, 0x49,
0x68, 0x26, 0x72, 0x7B, 0x23, 0x60, 0x59, 0x45, 0x46, 0x68,
0x68, 0x78, 0x7C, 0x58, 0x75, 0x4F, 0x76, 0x68, 0x63, 0x7E,
0x37, 0x2D, 0x7C, 0x5A, 0x35, 0x2F, 0x2B, 0x2B, 0x23, 0x3C,
0x40, 0x60, 0x6E, 0x3C, 0x42, 0x21, 0x3A, 0x78, 0x37, 0x4E,
0x75, 0x64, 0x59, 0x53, 0x6B, 0x33, 0x25, 0x35, 0x46, 0x68,
0x3F, 0x27, 0x58, 0x63, 0x3C, 0x39, 0x23, 0x53, 0x5B, 0x55,
0x3F, 0x28, 0x7A, 0x7B, 0x5C, 0x2D, 0x59, 0x5C, 0x31, 0x4C,
0x68, 0x39, 0x4F, 0x21, 0x76, 0x75, 0x24, 0x3C, 0x7A, 0x73,
0x6E, 0x32, 0x26, 0x61, 0x3A, 0x6C, 0x65, 0x74, 0x3C, 0x7C,
0x67, 0x49, 0x51, 0x38, 0x3D, 0x36, 0x5B, 0x42, 0x63, 0x74,
0x3C, 0x30, 0x35, 0x4A, 0x2A, 0x7C, 0x7A, 0x22, 0x40, 0x35,
0x2E, 0x32, 0x22, 0x7E, 0x7E, 0x2A, 0x59, 0x33, 0x7C, 0x52,
0x6A, 0x75, 0x2E, 0x23, 0x61, 0x3D, 0x56, 0x68, 0x3D, 0x6C,
0x21, 0x41, 0x5C, 0x36, 0x44, 0x6A, 0x29, 0x75, 0x7A, 0x22,
0x7E, 0x3E, 0x63, 0x36, 0x4B, 0x23, 0x3F, 0x2E, 0x42, 0x44,
0x7E, 0x77, 0x4E, 0x6F, 0x37, 0x71, 0x26, 0x68, 0x23, 0x55,
0x40, 0x72, 0x5A, 0x48, 0x52, 0x59, 0x5F, 0x32, 0x35, 0x3B,
0x2C, 0x34, 0x4B, 0x6C, 0x50, 0x67, 0x40, 0x39, 0x6D, 0x29,
0x27, 0x78, 0x53, 0x32, 0x45, 0x55, 0x66, 0x40, 0x63, 0x47,
0x43, 0x2B, 0x38, 0x3A, 0x4A, 0x70, 0x48, 0x2C, 0x71, 0x35,
0x57, 0x31, 0x47, 0x54, 0x48, 0x6C, 0x61, 0x4C, 0x3C, 0x39,
0x79, 0x3E, 0x27, 0x64, 0x73, 0x27, 0x55, 0x47, 0x43, 0x27,
0x44, 0x39, 0x66, 0x6C, 0x23, 0x58, 0x6E, 0x7A, 0x4D, 0x37,
0x54, 0x7C, 0x7E, 0x6E, 0x6E, 0x6F, 0x35, 0x5D, 0x59, 0x41,
0x4A, 0x5D, 0x75, 0x59, 0x7B, 0x49, 0x30, 0x3B, 0x48, 0x2A,
0x77, 0x54, 0x29, 0x76, 0x2C, 0x6E, 0x2B, 0x3B, 0x29, 0x3A,
0x4F, 0x4C, 0x36, 0x26, 0x38, 0x4B, 0x26, 0x4E, 0x63, 0x3B,
0x53, 0x50, 0x6B, 0x77, 0x72, 0x2C, 0x27, 0x5E, 0x43, 0x62,
0x73, 0x7E, 0x60, 0x69, 0x60, 0x34, 0x6F, 0x52, 0x6A, 0x75,
0x3E, 0x58, 0x57, 0x2D, 0x40, 0x77, 0x5F, 0x3F, 0x75, 0x7C,
0x46, 0x50, 0x23, 0x3A, 0x6C, 0x2D, 0x49, 0x22, 0x57, 0x6E,
0x72, 0x34, 0x77, 0x6C, 0x76, 0x67, 0x67, 0x75, 0x50, 0x5E,
0x6A, 0x2F, 0x7E, 0x68, 0x69, 0x5D, 0x35, 0x46, 0x2D, 0x59,
0x35, 0x64, 0x4A, 0x74, 0x21, 0x6A, 0x32, 0x2A, 0x54, 0x6C,
0x58, 0x66, 0x36, 0x65, 0x78, 0x55, 0x56, 0x37, 0x65, 0x3C,
0x50, 0x41, 0x42, 0x79, 0x68, 0x41, 0x55, 0x40, 0x25, 0x42,
0x3B, 0x7B, 0x6B, 0x2D, 0x2C, 0x40, 0x4C, 0x49, 0x64, 0x6C,
0x3F, 0x4D, 0x6E, 0x7E, 0x30, 0x76, 0x48, 0x28, 0x76, 0x45,
0x70, 0x60, 0x31, 0x49, 0x40, 0x66, 0x5D, 0x40, 0x5D, 0x42,
0x4B, 0x4D, 0x56, 0x7A, 0x34, 0x3F, 0x5F, 0x29, 0x7A, 0x78,
0x2E, 0x59, 0x7C, 0x6D, 0x75, 0x48, 0x3E, 0x63, 0x62, 0x45,
0x40, 0x4A, 0x71, 0x47, 0x46, 0x42, 0x21, 0x3E, 0x27, 0x25,
0x34, 0x42, 0x7C, 0x7A, 0x21, 0x54, 0x30, 0x70, 0x5C, 0x29,
0x5D, 0x32, 0x67, 0x72, 0x56, 0x23, 0x28, 0x3B, 0x2E, 0x23,
0x31, 0x53, 0x77, 0x28, 0x4E, 0x38, 0x2A, 0x63, 0x71, 0x27,
0x47, 0x23, 0x52, 0x62, 0x32, 0x63, 0x73, 0x59, 0x44, 0x6C,
0x21, 0x53, 0x46, 0x25, 0x53, 0x5F, 0x25, 0x58, 0x24, 0x75,
0x7B, 0x25, 0x42, 0x6C, 0x53, 0x6F, 0x4D, 0x34, 0x28, 0x4E,
0x7D, 0x74, 0x3F, 0x7A, 0x21, 0x7E, 0x3D, 0x69, 0x66, 0x61,
0x7A, 0x2B, 0x37, 0x34, 0x57, 0x79, 0x40, 0x72, 0x7B, 0x4D,
0x29, 0x41, 0x4A, 0x73, 0x6D, 0x4D, 0x6A, 0x60, 0x4C, 0x41,
0x6C, 0x4F, 0x22, 0x7D, 0x5C, 0x62, 0x3E, 0x2B, 0x4A, 0x79,
0x22, 0x5C, 0x4D, 0x56, 0x2F, 0x36, 0x53, 0x61, 0x33, 0x22,
0x31, 0x49, 0x2A, 0x32, 0x5F, 0x61, 0x79, 0x72, 0x2D, 0x2F,
0x2A, 0x5B, 0x2D, 0x29, 0x4F, 0x2C, 0x2F, 0x61, 0x26, 0x4F,
0x78, 0x5A, 0x4A, 0x64, 0x23, 0x40, 0x24, 0x44, 0x22, 0x4C,
0x75, 0x57, 0x42, 0x29, 0x72, 0x50, 0x2F, 0x53, 0x46, 0x29,
0x62, 0x6E, 0x4E, 0x4A, 0x75, 0x29, 0x44, 0x5B, 0x3B, 0x69,
0x50, 0x6C, 0x7E, 0x34, 0x48, 0x76, 0x60, 0x2A, 0x68, 0x6B,
0x2B, 0x25, 0x7B, 0x7D, 0x6D, 0x6E, 0x38, 0x4D, 0x56, 0x65,
0x26, 0x7C, 0x6C, 0x25, 0x39, 0x2A, 0x33, 0x58, 0x73, 0x2B,
0x3A, 0x2C, 0x64, 0x3F, 0x48, 0x6F, 0x3A, 0x78, 0x61, 0x71,
0x60, 0x29, 0x27, 0x5F, 0x28, 0x70, 0x70, 0x76, 0x2F, 0x47,
0x5E, 0x4B, 0x3B, 0x54, 0x65, 0x65, 0x77, 0x2E, 0x57, 0x21,
0x47, 0x76, 0x44, 0x32, 0x49, 0x46, 0x3C, 0x23, 0x29, 0x4D,
0x49, 0x29, 0x43, 0x6D, 0x25, 0x24, 0x51, 0x25, 0x77, 0x62,
0x74, 0x6C, 0x6B, 0x37, 0x4F, 0x6D, 0x5D, 0x5F, 0x22, 0x40,
0x27, 0x32, 0x23, 0x50, 0x22, 0x33, 0x5F, 0x44, 0x71, 0x6B,
0x7B, 0x72, 0x36, 0x4F, 0x58, 0x60, 0x6F, 0x73, 0x44, 0x5E,
0x7A, 0x62, 0x7D, 0x50, 0x3C, 0x4D, 0x7D, 0x7E, 0x54, 0x23,
0x21, 0x60, 0x5A, 0x4B, 0x51, 0x77, 0x49, 0x5E, 0x6D, 0x79,
0x62, 0x4D, 0x3F, 0x6E, 0x62, 0x26, 0x24, 0x47, 0x6E, 0x25,
0x6F, 0x3B, 0x2D, 0x78, 0x77, 0x32, 0x6C, 0x7C, 0x72, 0x64,
0x44, 0x79, 0x4D, 0x42, 0x78, 0x70, 0x71, 0x31, 0x41, 0x48,
0x6C, 0x71, 0x78, 0x35, 0x67, 0x31, 0x4D, 0x5E, 0x38, 0x67,
0x2B, 0x34, 0x7D, 0x24, 0x23, 0x6F, 0x21, 0x5F, 0x32, 0x65,
0x7D, 0x7E, 0x7D, 0x49, 0x25, 0x37, 0x51, 0x2C, 0x64, 0x40,
0x6B, 0x37, 0x44, 0x61, 0x6A, 0x4D, 0x71, 0x28, 0x6F, 0x46,
0x48, 0x51, 0x35, 0x51, 0x29, 0x60, 0x79, 0x4D, 0x53, 0x28,
0x6D, 0x51, 0x7A, 0x2D, 0x5B, 0x52, 0x31, 0x54, 0x4F, 0x49,
0x28, 0x51, 0x43, 0x62, 0x7A, 0x3E, 0x50, 0x4B, 0x57, 0x70,
0x27, 0x3D, 0x27, 0x38, 0x6F, 0x5F, 0x79, 0x3A, 0x33, 0x57,
0x67, 0x61, 0x22, 0x79, 0x7B, 0x2F, 0x5D, 0x63, 0x49, 0x31,
0x28, 0x66, 0x73, 0x3B, 0x3B, 0x31, 0x36, 0x4E, 0x71, 0x4D,
0x40, 0x27, 0x48, 0x44, 0x3A, 0x43, 0x5D, 0x6F, 0x49, 0x75,
0x35, 0x2E, 0x5A, 0x7B, 0x46, 0x29, 0x3F, 0x29, 0x61, 0x6E,
0x5C, 0x35, 0x38, 0x62, 0x4E, 0x57, 0x5A, 0x6D, 0x23, 0x38,
0x69, 0x2E, 0x3F, 0x39, 0x30, 0x44, 0x56, 0x38, 0x5C, 0x41,
0x61, 0x3C, 0x3A, 0x25, 0x47, 0x76, 0x5B, 0x27, 0x4A, 0x7B,
0x54, 0x7A, 0x73, 0x37, 0x55, 0x25, 0x2B, 0x6F, 0x38, 0x52,
0x2F, 0x76, 0x57, 0x56, 0x57, 0x44, 0x5C, 0x7C, 0x29, 0x70,
0x36, 0x77, 0x52, 0x61, 0x21, 0x64, 0x42, 0x6A, 0x6D, 0x2D,
0x39, 0x2C, 0x3B, 0x5F, 0x42, 0x4B, 0x2C, 0x55, 0x7C, 0x5D,
0x2B, 0x4B, 0x30, 0x28, 0x25, 0x64, 0x6F, 0x51, 0x2C, 0x69,
0x34, 0x39, 0x26, 0x4E, 0x40, 0x37, 0x57, 0x26, 0x7A, 0x28,
0x3B, 0x5B, 0x7E, 0x41, 0x2B, 0x7C, 0x61, 0x3F, 0x62, 0x7C,
0x47, 0x4C, 0x34, 0x57, 0x71, 0x65, 0x65, 0x55, 0x46, 0x59,
0x26, 0x52, 0x48, 0x23, 0x31, 0x46, 0x6A, 0x39, 0x47, 0x67,
0x62, 0x32, 0x64, 0x5B, 0x60, 0x5A, 0x7A, 0x25, 0x3A, 0x7D,
0x49, 0x33, 0x3C, 0x6C, 0x41, 0x66, 0x36, 0x41, 0x37, 0x6A,
0x77, 0x79, 0x22, 0x3D, 0x5F, 0x4A, 0x59, 0x63, 0x55, 0x2A,
0x3E, 0x3C, 0x57, 0x45, 0x68, 0x2B, 0x28, 0x66, 0x26, 0x4B,
0x51, 0x71, 0x66, 0x39, 0x78, 0x70, 0x2C, 0x73, 0x50, 0x4C,
0x50, 0x43, 0x57, 0x66, 0x58, 0x6B, 0x2B, 0x57, 0x56, 0x36,
0x5A, 0x57, 0x6A, 0x34, 0x63, 0x7B, 0x65, 0x57, 0x77, 0x4F,
0x5F, 0x77, 0x29, 0x22, 0x37, 0x74, 0x6A, 0x68, 0x7D, 0x5C,
0x53, 0x72, 0x7A, 0x4A, 0x3C, 0x6F, 0x30, 0x75, 0x76, 0x52,
0x65, 0x7A, 0x48, 0x6D, 0x68, 0x7B, 0x25, 0x42, 0x3B, 0x6C,
0x2B, 0x41, 0x59, 0x49, 0x4F, 0x2B, 0x41, 0x5F, 0x5B, 0x60,
0x7C, 0x67, 0x74, 0x2C, 0x4C, 0x2A, 0x7A, 0x3C, 0x2A, 0x4F,
0x74, 0x58, 0x7E, 0x38, 0x32, 0x76, 0x53, 0x7A, 0x5B, 0x4D,
0x55, 0x28, 0x43, 0x72, 0x3C, 0x38, 0x79, 0x7E, 0x3F, 0x5F,
0x34, 0x2D, 0x3B, 0x7D, 0x46, 0x24, 0x2A, 0x7A, 0x3E, 0x7E,
0x33, 0x6D, 0x36, 0x5F, 0x21, 0x67, 0x57, 0x49, 0x66, 0x6C,
0x5C, 0x27, 0x48, 0x49, 0x25, 0x39, 0x23, 0x6B, 0x50, 0x30,
0x70, 0x30, 0x5E, 0x33, 0x46, 0x71, 0x5E, 0x5D, 0x3D, 0x35,
0x69, 0x4A, 0x36, 0x47, 0x7E, 0x3C, 0x61, 0x32, 0x60, 0x2A,
0x24, 0x78, 0x6D, 0x5B, 0x46, 0x3C, 0x2E, 0x27, 0x6B, 0x7B,
0x68, 0x6B, 0x4C, 0x3E, 0x7A, 0x40, 0x4A, 0x72, 0x49, 0x71,
0x77, 0x3D, 0x36, 0x77, 0x76, 0x2C, 0x65, 0x6F, 0x29, 0x72,
0x4D, 0x47, 0x7A, 0x4B, 0x5C, 0x77, 0x62, 0x60, 0x5C, 0x32,
0x32, 0x2F, 0x50, 0x2A, 0x3B, 0x2B, 0x79, 0x58, 0x58, 0x41,
0x3D, 0x72, 0x5D, 0x25, 0x48, 0x23, 0x61, 0x67, 0x61, 0x3C,
0x7B, 0x66, 0x57, 0x2F, 0x34, 0x70, 0x65, 0x7E, 0x6B, 0x43,
0x69, 0x45, 0x79, 0x75, 0x2E, 0x33, 0x39, 0x5D, 0x2F, 0x4B,
0x49, 0x2F, 0x53, 0x73, 0x2E, 0x31, 0x58, 0x55, 0x54, 0x56,
0x7A, 0x4E, 0x62, 0x2E, 0x26, 0x42, 0x2D, 0x26, 0x4E, 0x79,
0x32, 0x7D, 0x53, 0x40, 0x40, 0x3F, 0x71, 0x6B, 0x2D, 0x38,
0x51, 0x33, 0x33, 0x7C, 0x4D, 0x7E, 0x4F, 0x58, 0x6E, 0x6F,
0x5E, 0x50, 0x26, 0x2B, 0x7A, 0x5E, 0x54, 0x4C, 0x71, 0x35,
0x6C, 0x44, 0x6D, 0x49, 0x32, 0x39, 0x4B, 0x71, 0x3B, 0x5F,
0x45, 0x41, 0x39, 0x49, 0x59, 0x2E, 0x45, 0x60, 0x71, 0x67,
0x71, 0x70, 0x62, 0x76, 0x48, 0x4B, 0x68, 0x71, 0x2B, 0x42,
0x67, 0x7A, 0x5F, 0x4B, 0x76, 0x53, 0x78, 0x62, 0x5A, 0x72,
0x55, 0x48, 0x42, 0x48, 0x77, 0x2F, 0x37, 0x3D, 0x3C, 0x56,
0x61, 0x39, 0x56, 0x49, 0x47, 0x23, 0x33, 0x44, 0x29, 0x56,
0x61, 0x51, 0x50, 0x34, 0x6E, 0x5D, 0x4A, 0x4A, 0x37, 0x7A,
0x3F, 0x68, 0x7B, 0x4F, 0x72, 0x2A, 0x26, 0x6D, 0x53, 0x51,
0x45, 0x25, 0x50, 0x64, 0x53, 0x32, 0x24, 0x46, 0x65, 0x78,
0x64, 0x48, 0x7B, 0x70, 0x4E, 0x46, 0x27, 0x2D, 0x3A, 0x5F,
0x3B, 0x4F, 0x38, 0x29, 0x66, 0x70, 0x48, 0x4D, 0x2F, 0x29,
0x6F, 0x63, 0x31, 0x45, 0x42, 0x2B, 0x56, 0x6C, 0x6C, 0x60,
0x28, 0x7E, 0x77, 0x31, 0x21, 0x3F, 0x29, 0x6F, 0x34, 0x4A,
0x5C, 0x4A, 0x4E, 0x5E, 0x55, 0x26, 0x74, 0x44, 0x5B, 0x65,
0x2A, 0x6B, 0x7A, 0x48, 0x27, 0x62, 0x66, 0x3E, 0x5C, 0x36,
0x2A, 0x64, 0x5C, 0x66, 0x27, 0x3F, 0x49, 0x36, 0x75, 0x7D,
0x30, 0x3A, 0x2F, 0x7D, 0x79, 0x33, 0x72, 0x28, 0x4C, 0x61,
0x3A, 0x47, 0x56, 0x36, 0x33, 0x62, 0x4A, 0x70, 0x33, 0x71,
0x62, 0x22, 0x64, 0x39, 0x4F, 0x53, 0x51, 0x69, 0x6F, 0x26,
0x6E, 0x24, 0x39, 0x36, 0x3E, 0x25, 0x6B, 0x7C, 0x52, 0x70,
0x6D, 0x21, 0x36, 0x68, 0x50, 0x78, 0x4D, 0x3A, 0x24, 0x7D,
0x75, 0x6B, 0x40, 0x64, 0x2B, 0x5C, 0x3B, 0x7D, 0x64, 0x3F,
0x2E, 0x23, 0x28, 0x70, 0x77, 0x7D, 0x30, 0x78, 0x5E, 0x31,
0x39, 0x33, 0x3D, 0x56, 0x76, 0x4C, 0x6B, 0x44, 0x56, 0x4D,
0x4D, 0x55, 0x7B, 0x6A, 0x6D, 0x55, 0x62, 0x58, 0x54, 0x77,
0x7B, 0x5F, 0x5D, 0x6F, 0x56, 0x7E, 0x5B, 0x31, 0x65, 0x5E,
0x59, 0x7C, 0x35, 0x64, 0x45, 0x4D, 0x5D, 0x39, 0x2D, 0x44,
0x4A, 0x3B, 0x43, 0x2C, 0x35, 0x3E, 0x75, 0x6C, 0x67, 0x4D,
0x5C, 0x5A, 0x79, 0x5A, 0x6A, 0x22, 0x7D, 0x5B, 0x29, 0x30,
0x72, 0x27, 0x4F, 0x6B, 0x73, 0x51, 0x6B, 0x59, 0x58, 0x69,
0x23, 0x22, 0x47, 0x37, 0x39, 0x2F, 0x3E, 0x70, 0x44, 0x44,
0x67, 0x52, 0x65, 0x59, 0x34, 0x39, 0x26, 0x2B, 0x57, 0x6C,
0x7B, 0x68, 0x70, 0x66, 0x47, 0x41, 0x4A, 0x35, 0x31, 0x6E,
0x70, 0x74, 0x46, 0x48, 0x2E, 0x66, 0x34, 0x2F, 0x3C, 0x26,
0x2E, 0x41, 0x5A, 0x2A, 0x23, 0x54, 0x45, 0x25, 0x44, 0x2E,
0x4A, 0x60, 0x39, 0x52, 0x51, 0x31, 0x71, 0x3A, 0x2D, 0x5E,
0x36, 0x39, 0x5E, 0x61, 0x7C, 0x7D, 0x79, 0x2C, 0x4A, 0x49,
0x7B, 0x60, 0x3F, 0x4D, 0x5F, 0x77, 0x3C, 0x2D, 0x74, 0x42,
0x5B, 0x43, 0x51, 0x44, 0x7E, 0x39, 0x79, 0x2D, 0x72, 0x7A,
0x2C, 0x23, 0x2D, 0x7E, 0x43, 0x70, 0x3C, 0x57, 0x21, 0x30,
0x39, 0x67, 0x74, 0x27, 0x3E, 0x72, 0x4C, 0x77, 0x34, 0x74,
0x4F, 0x34, 0x6C, 0x55, 0x55, 0x61, 0x2A, 0x37, 0x67, 0x62,
0x6F, 0x43, 0x5C, 0x3A, 0x53, 0x46, 0x42, 0x2A, 0x55, 0x78,
0x25, 0x51, 0x3D, 0x31, 0x4C, 0x2B, 0x44, 0x5E, 0x74, 0x6B,
0x2C, 0x74, 0x3A, 0x70, 0x5D, 0x5D, 0x61, 0x34, 0x75, 0x78,
0x5F, 0x6D, 0x42, 0x4A, 0x65, 0x25, 0x2C, 0x2E, 0x76, 0x4A,
0x27, 0x63, 0x43, 0x48, 0x3A, 0x50, 0x4F, 0x40, 0x3A, 0x3D,
0x31, 0x51, 0x31, 0x41, 0x4F, 0x27, 0x6B, 0x5E, 0x7B, 0x2A,
0x7D, 0x6D, 0x4D, 0x7A, 0x3E, 0x2E, 0x2F, 0x4E, 0x42, 0x70,
0x48, 0x45, 0x70, 0x53, 0x23, 0x70, 0x6A, 0x32, 0x4C, 0x69,
0x57, 0x37, 0x32, 0x70, 0x5A, 0x35, 0x3E, 0x37, 0x41, 0x3C,
0x7D, 0x51, 0x34, 0x5E, 0x2F, 0x4C, 0x5E, 0x41, 0x5E, 0x4E,
0x23, 0x46, 0x3B, 0x6C, 0x48, 0x2A, 0x5D, 0x3D, 0x6F, 0x40,
0x7B, 0x24, 0x5B, 0x49, 0x4A, 0x7E, 0x27, 0x72, 0x4D, 0x40,
0x6F, 0x62, 0x37, 0x6E, 0x2C, 0x64, 0x35, 0x3F, 0x39, 0x7E,
0x5D, 0x6A, 0x42, 0x60, 0x44, 0x31, 0x52, 0x37, 0x7D, 0x30,
0x3C, 0x64, 0x53, 0x2C, 0x6B, 0x5B, 0x46, 0x70, 0x49, 0x2F,
0x76, 0x48, 0x71, 0x33, 0x6C, 0x6B, 0x25, 0x35, 0x4B, 0x3C,
0x74, 0x74, 0x54, 0x7D, 0x79, 0x73, 0x57, 0x63, 0x5C, 0x57,
0x67, 0x34, 0x34, 0x33, 0x6E, 0x4E, 0x44, 0x71, 0x7A, 0x7A,
0x2E, 0x24, 0x4F, 0x61, 0x63, 0x3B, 0x53, 0x36, 0x62, 0x7B,
0x23, 0x34, 0x59, 0x6D, 0x48, 0x41, 0x2B, 0x53, 0x4D, 0x51,
0x33, 0x37, 0x6D, 0x23, 0x6C, 0x4B, 0x68, 0x57, 0x56, 0x73,
0x73, 0x74, 0x6E, 0x31, 0x5A, 0x77, 0x4B, 0x39, 0x7B, 0x66,
0x5C, 0x4F, 0x6D, 0x5C, 0x2E, 0x54, 0x64, 0x76, 0x39, 0x48,
0x77, 0x33, 0x3F, 0x3B, 0x57, 0x26, 0x5D, 0x67, 0x6B, 0x37,
0x52, 0x2B, 0x60, 0x65, 0x6A, 0x4A, 0x56, 0x5F, 0x24, 0x5C,
0x7C, 0x36, 0x69, 0x27, 0x6F, 0x66, 0x29, 0x54, 0x45, 0x73,
0x21, 0x25, 0x63, 0x63, 0x66, 0x5B, 0x7C, 0x65, 0x39, 0x6D,
0x7C, 0x25, 0x46, 0x3D, 0x22, 0x55, 0x77, 0x58, 0x4A, 0x5C,
0x30, 0x69, 0x23, 0x64, 0x28, 0x39, 0x3E, 0x62, 0x3A, 0x53,
0x51, 0x51, 0x5D, 0x5B, 0x53, 0x70, 0x4F, 0x53, 0x5B, 0x66,
0x71, 0x45, 0x3D, 0x3C, 0x2D, 0x57, 0x31, 0x52, 0x55, 0x51,
0x6D, 0x3B, 0x4D, 0x79, 0x69, 0x34, 0x6D, 0x55, 0x30, 0x23,
0x71, 0x31, 0x43, 0x22, 0x76, 0x5A, 0x7E, 0x64, 0x4C, 0x6C,
0x4C, 0x48, 0x59, 0x43, 0x3B, 0x2E, 0x6E, 0x6E, 0x37, 0x3F,
0x5E, 0x79, 0x78, 0x49, 0x25, 0x71, 0x3F, 0x23, 0x37, 0x39,
0x40, 0x33, 0x53, 0x31, 0x66, 0x45, 0x38, 0x32, 0x3E, 0x27,
0x68, 0x3B, 0x48, 0x5A, 0x2E, 0x2E, 0x70, 0x69, 0x5B, 0x77,
0x2D, 0x45, 0x22, 0x42, 0x6A, 0x4A, 0x7B, 0x40, 0x56, 0x61,
0x7C, 0x2E, 0x76, 0x39, 0x59, 0x2C, 0x3D, 0x4F, 0x51, 0x31,
0x5A, 0x32, 0x54, 0x5A, 0x41, 0x2E, 0x78, 0x77, 0x6D, 0x21,
0x2E, 0x5A, 0x2C, 0x4A, 0x3C, 0x2E, 0x4B, 0x46, 0x6A, 0x6B,
0x65, 0x6D, 0x30, 0x43, 0x6B, 0x29, 0x2B, 0x7E, 0x7B, 0x29,
0x44, 0x37, 0x56, 0x44, 0x3C, 0x7D, 0x50, 0x3B, 0x58, 0x79,
0x76, 0x51, 0x36, 0x46, 0x69, 0x7C, 0x3A, 0x43, 0x4B, 0x34,
0x2F, 0x61, 0x5B, 0x2E, 0x2E, 0x69, 0x31, 0x2B, 0x39, 0x41,
0x75, 0x41, 0x3F, 0x6D, 0x4F, 0x53, 0x68, 0x38, 0x41, 0x27,
0x65, 0x57, 0x69, 0x22, 0x29, 0x50, 0x64, 0x59, 0x51, 0x61,
0x66, 0x50, 0x47, 0x57, 0x32, 0x22, 0x3E, 0x68, 0x31, 0x65,
0x35, 0x3B, 0x73, 0x63, 0x62, 0x75, 0x5A, 0x42, 0x38, 0x7C,
0x3F, 0x55, 0x65, 0x4D, 0x44, 0x25, 0x37, 0x2A, 0x25, 0x36,
0x6D, 0x48, 0x79, 0x3D, 0x65, 0x34, 0x5B, 0x61, 0x3F, 0x48,
0x79, 0x41, 0x4B, 0x60, 0x5E, 0x59, 0x7A, 0x4F, 0x24, 0x26,
0x21, 0x57, 0x53, 0x66, 0x29, 0x3D, 0x74, 0x48, 0x70, 0x62,
0x64, 0x62, 0x51, 0x66, 0x50, 0x2D, 0x4A, 0x44, 0x5D, 0x26,
0x5D, 0x55, 0x4B, 0x77, 0x7C, 0x27, 0x65, 0x41, 0x4A, 0x4A,
0x70, 0x48, 0x66, 0x3B, 0x40, 0x51, 0x58, 0x4D, 0x66, 0x36,
0x6B, 0x52, 0x41, 0x6B, 0x56, 0x33, 0x64, 0x24, 0x7B, 0x6A,
0x68, 0x65, 0x2C, 0x66, 0x5B, 0x3C, 0x4C, 0x26, 0x34, 0x5E,
0x67, 0x30, 0x56, 0x78, 0x54, 0x73, 0x7A, 0x79, 0x3E, 0x71,
0x40, 0x33, 0x46, 0x3E, 0x73, 0x46, 0x51, 0x4D, 0x72, 0x28,
0x33, 0x27, 0x3E, 0x24, 0x51, 0x2B, 0x32, 0x7C, 0x4C, 0x25,
0x4F, 0x4A, 0x6C, 0x50, 0x49, 0x3E, 0x3D, 0x7B, 0x72, 0x32,
0x58, 0x61, 0x4E, 0x7B, 0x68, 0x7A, 0x7C, 0x48, 0x22, 0x4D,
0x7E, 0x46, 0x42, 0x58, 0x44, 0x25, 0x5A, 0x31, 0x36, 0x70,
0x25, 0x4F, 0x6D, 0x31, 0x3F, 0x6E, 0x51, 0x6B, 0x6E, 0x3F,
0x75, 0x7C, 0x5A, 0x3E, 0x52, 0x57, 0x30, 0x79, 0x35, 0x2A,
0x70, 0x7D, 0x24, 0x24, 0x3C, 0x77, 0x38, 0x6D, 0x65, 0x6F,
0x36, 0x35, 0x23, 0x49, 0x67, 0x73, 0x69, 0x22, 0x65, 0x55,
0x7D, 0x2B, 0x28, 0x7E, 0x3D, 0x7D, 0x70, 0x38, 0x71, 0x6C,
0x7C, 0x49, 0x3A, 0x33, 0x2D, 0x47, 0x5F, 0x71, 0x44, 0x79,
0x4F, 0x59, 0x60, 0x41, 0x7A, 0x36, 0x2C, 0x58, 0x4C, 0x60,
0x79, 0x53, 0x52, 0x68, 0x38, 0x45, 0x2F, 0x40, 0x44, 0x2A,
0x46, 0x7A, 0x56, 0x3C, 0x7A, 0x55, 0x6F, 0x5E, 0x58, 0x79,
0x71, 0x6B, 0x2C, 0x41, 0x51, 0x42, 0x37, 0x41, 0x43, 0x44,
0x26, 0x60, 0x47, 0x2B, 0x5B, 0x35, 0x30, 0x5D, 0x3B, 0x5C,
0x74, 0x56, 0x40, 0x6F, 0x23, 0x41, 0x58, 0x52, 0x30, 0x21,
0x68, 0x3E, 0x26, 0x21, 0x3D, 0x64, 0x2D, 0x3B, 0x2E, 0x4D,
0x41, 0x4F, 0x7A, 0x37, 0x52, 0x63, 0x59, 0x22, 0x41, 0x66,
0x4E, 0x28, 0x6D, 0x25, 0x2C, 0x50, 0x67, 0x3E, 0x66, 0x70,
0x69, 0x49, 0x2A, 0x2E, 0x35, 0x28, 0x78, 0x5C, 0x66, 0x31,
0x55, 0x6A, 0x36, 0x74, 0x28, 0x5D, 0x46, 0x52, 0x69, 0x5A,
0x39, 0x30, 0x38, 0x43, 0x30, 0x39, 0x66, 0x6D, 0x48, 0x5D,
0x2E, 0x63, 0x77, 0x21, 0x68, 0x67, 0x21, 0x6E, 0x5C, 0x6D,
0x2A, 0x4A, 0x38, 0x5B, 0x66, 0x76, 0x25, 0x59, 0x27, 0x7A,
0x6C, 0x7D, 0x70, 0x5D, 0x59, 0x47, 0x36, 0x62, 0x32, 0x52,
0x22, 0x6D, 0x65, 0x41, 0x38, 0x2C, 0x72, 0x7E, 0x2D, 0x50,
0x7A, 0x52, 0x37, 0x50, 0x52, 0x7A, 0x5B, 0x73, 0x23, 0x44,
0x38, 0x46, 0x50, 0x71, 0x59, 0x63, 0x65, 0x33, 0x60, 0x27,
0x3F, 0x23, 0x2F, 0x25, 0x23, 0x7A, 0x53, 0x5A, 0x29, 0x44,
0x31, 0x39, 0x69, 0x46, 0x35, 0x60, 0x48, 0x76, 0x5C, 0x4E,
0x54, 0x31, 0x5A, 0x74, 0x5E, 0x2B, 0x59, 0x63, 0x7A, 0x6D,
0x43, 0x27, 0x27, 0x7A, 0x5C, 0x26, 0x66, 0x48, 0x65, 0x5B,
0x5C, 0x34, 0x3B, 0x74, 0x5A, 0x5A, 0x71, 0x7D, 0x6A, 0x28,
0x23, 0x26, 0x73, 0x79, 0x63, 0x78, 0x61, 0x70, 0x35, 0x2B,
0x50, 0x64, 0x69, 0x60, 0x48, 0x3D, 0x61, 0x3B, 0x71, 0x7A,
0x27, 0x4B, 0x7C, 0x30, 0x77, 0x72, 0x6F, 0x49, 0x60, 0x7D,
0x24, 0x27, 0x60, 0x29, 0x33, 0x3B, 0x32, 0x53, 0x4D, 0x60,
0x27, 0x5F, 0x58, 0x35, 0x63, 0x7E, 0x38, 0x59, 0x49, 0x79,
0x4C, 0x2F, 0x70, 0x30, 0x24, 0x5F, 0x3B, 0x32, 0x49, 0x37,
0x49, 0x7D, 0x5C, 0x4E, 0x47, 0x31, 0x24, 0x26, 0x34, 0x5F,
0x5E, 0x45, 0x52, 0x58, 0x73, 0x73, 0x22, 0x2D, 0x49, 0x27,
0x59, 0x24, 0x21, 0x48, 0x4F, 0x25, 0x2D, 0x59, 0x76, 0x61,
0x6D, 0x29, 0x21, 0x6F, 0x4D, 0x5C, 0x26, 0x78, 0x26, 0x3C,
0x5F, 0x78, 0x49, 0x51, 0x79, 0x2E, 0x50, 0x59, 0x28, 0x74,
0x2E, 0x76, 0x6F, 0x3B, 0x2B, 0x41, 0x56, 0x70, 0x70, 0x76,
0x40, 0x65, 0x58, 0x64, 0x65, 0x45, 0x7A, 0x40, 0x6A, 0x58,
0x5E, 0x53, 0x3D, 0x38, 0x72, 0x75, 0x5E, 0x46, 0x2A, 0x4B,
0x48, 0x5F, 0x77, 0x6B, 0x24, 0x77, 0x57, 0x6C, 0x77, 0x2F,
0x44, 0x2C, 0x27, 0x64, 0x5D, 0x69, 0x3B, 0x45, 0x47, 0x65,
0x75, 0x4F, 0x5B, 0x69, 0x5E, 0x4E, 0x32, 0x22, 0x31, 0x53,
0x56, 0x56, 0x45, 0x77, 0x46, 0x62, 0x6F, 0x2F, 0x48, 0x6A,
0x6B, 0x38, 0x25, 0x7B, 0x22, 0x62, 0x58, 0x45, 0x3B, 0x32,
0x60, 0x38, 0x2F, 0x2B, 0x48, 0x3A, 0x6A, 0x3E, 0x75, 0x22,
0x38, 0x2D, 0x2F, 0x74, 0x5A, 0x32, 0x6F, 0x2B, 0x35, 0x69,
0x6B, 0x33, 0x6C, 0x67, 0x76, 0x39, 0x54, 0x5D, 0x45, 0x44,
0x4E, 0x37, 0x37, 0x31, 0x27, 0x7D, 0x4F, 0x5F, 0x4D, 0x47,
0x7E, 0x24, 0x73, 0x2B, 0x68, 0x54, 0x7D, 0x73, 0x25, 0x65,
0x3A, 0x5C, 0x3C, 0x7E, 0x62, 0x73, 0x6E, 0x7C, 0x40, 0x39,
0x75, 0x6B, 0x59, 0x42, 0x24, 0x21, 0x70, 0x75, 0x58, 0x56,
0x5B, 0x2F, 0x77, 0x6B, 0x5E, 0x67, 0x6D, 0x46, 0x54, 0x7E,
0x38, 0x39, 0x3F, 0x54, 0x7D, 0x40, 0x70, 0x53, 0x77, 0x33,
0x7C, 0x2B, 0x62, 0x4D, 0x41, 0x7A, 0x4A, 0x37, 0x2B, 0x4A,
0x4C, 0x21, 0x60, 0x42, 0x6D, 0x30, 0x59, 0x2B, 0x3D, 0x30,
0x22, 0x28, 0x6E, 0x72, 0x41, 0x41, 0x33, 0x2F, 0x59, 0x6D,
0x64, 0x57, 0x65, 0x70, 0x49, 0x7D, 0x36, 0x56, 0x72, 0x7C,
0x58, 0x71, 0x4C, 0x77, 0x3A, 0x6C, 0x50, 0x61, 0x22, 0x6A,
0x6A, 0x62, 0x64, 0x6A, 0x65, 0x51, 0x43, 0x3F, 0x64, 0x63,
0x5C, 0x6E, 0x5C, 0x50, 0x4F, 0x2B, 0x27, 0x66, 0x69, 0x47,
0x30, 0x3E, 0x77, 0x78, 0x66, 0x2B, 0x67, 0x3A, 0x46, 0x3D,
0x40, 0x78, 0x28, 0x2E, 0x3D, 0x51, 0x56, 0x44, 0x72, 0x2E,
0x6C, 0x25, 0x67, 0x52, 0x64, 0x23, 0x36, 0x69, 0x4A, 0x2A,
0x25, 0x30, 0x61, 0x3B, 0x4D, 0x2C, 0x49, 0x59, 0x72, 0x5F,
0x54, 0x3D, 0x41, 0x23, 0x3C, 0x27, 0x34, 0x59, 0x56, 0x78,
0x4F, 0x64, 0x69, 0x5B, 0x26, 0x69, 0x2B, 0x7B, 0x41, 0x32,
0x60, 0x74, 0x6B, 0x27, 0x38, 0x35, 0x58, 0x6B, 0x45, 0x3A,
0x75, 0x33, 0x57, 0x46, 0x4F, 0x7D, 0x47, 0x5E, 0x45, 0x3E,
0x70, 0x52, 0x4B, 0x3D, 0x47, 0x3A, 0x53, 0x3D, 0x67, 0x34,
0x49, 0x66, 0x33, 0x6A, 0x6A, 0x60, 0x72, 0x6F, 0x4D, 0x2F,
0x6A, 0x35, 0x5F, 0x24, 0x4F, 0x35, 0x3F, 0x43, 0x77, 0x45,
0x34, 0x3F, 0x43, 0x45, 0x7B, 0x6F, 0x7B, 0x25, 0x21, 0x31,
0x44, 0x5B, 0x48, 0x62, 0x51, 0x72, 0x42, 0x73, 0x74, 0x4A,
0x44, 0x5C, 0x7C, 0x68, 0x37, 0x78, 0x47, 0x5D, 0x2C, 0x43,
0x55, 0x21, 0x62, 0x29, 0x50, 0x2F, 0x7B, 0x71, 0x61, 0x74,
0x6C, 0x4A, 0x76, 0x2A, 0x6A, 0x32, 0x3A, 0x6A, 0x63, 0x64,
0x60, 0x61, 0x2F, 0x67, 0x5C, 0x63, 0x26, 0x4E, 0x53, 0x73,
0x76, 0x73, 0x75, 0x23, 0x7E, 0x21, 0x3F, 0x2E, 0x2C, 0x26,
0x40, 0x56, 0x71, 0x2A, 0x6B, 0x62, 0x7D, 0x3C, 0x5D, 0x28,
0x60, 0x27, 0x3B, 0x43, 0x43, 0x51, 0x71, 0x77, 0x44, 0x57,
0x75, 0x42, 0x21, 0x6A, 0x68, 0x41, 0x4C, 0x62, 0x7B, 0x55,
0x35, 0x6B, 0x35, 0x36, 0x77, 0x58, 0x61, 0x2E, 0x21, 0x6D,
0x26, 0x71, 0x3B, 0x46, 0x21, 0x23, 0x4E, 0x59, 0x3C, 0x40,
0x40, 0x46, 0x68, 0x25, 0x37, 0x63, 0x23, 0x37, 0x64, 0x6F,
0x68, 0x4D, 0x7A, 0x27, 0x51, 0x50, 0x6F, 0x3A, 0x62, 0x7A,
0x63, 0x38, 0x38, 0x6B, 0x3D, 0x48, 0x56, 0x76, 0x4A, 0x5B,
0x68, 0x5B, 0x73, 0x59, 0x62, 0x42, 0x77, 0x78, 0x3A, 0x52,
0x44, 0x78, 0x58, 0x53, 0x30, 0x7C, 0x54, 0x2E, 0x33, 0x46,
0x3C, 0x77, 0x43, 0x62, 0x6A, 0x33, 0x4D, 0x37, 0x33, 0x5F,
0x3D, 0x6E, 0x65, 0x21, 0x4C, 0x61, 0x5C, 0x65, 0x55, 0x64,
0x6D, 0x41, 0x37, 0x35, 0x55, 0x3C, 0x71, 0x71, 0x5C, 0x7D,
0x76, 0x33, 0x4C, 0x75, 0x49, 0x25, 0x70, 0x25, 0x34, 0x27,
0x50, 0x76, 0x65, 0x75, 0x55, 0x66, 0x26, 0x62, 0x7B, 0x2D,
0x69, 0x6C, 0x5E, 0x54, 0x2B, 0x7B, 0x3D, 0x40, 0x3D, 0x2F,
0x2B, 0x62, 0x7D, 0x5A, 0x21, 0x6B, 0x3D, 0x3A, 0x72, 0x7D,
0x5F, 0x6C, 0x7E, 0x57, 0x6D, 0x2C, 0x4B, 0x29, 0x31, 0x53,
0x45, 0x49, 0x40, 0x78, 0x6D, 0x7C, 0x46, 0x43, 0x5A, 0x34,
0x31, 0x67, 0x50, 0x28, 0x72, 0x21, 0x2C, 0x5F, 0x52, 0x31,
0x5D, 0x3E, 0x50, 0x5E, 0x40, 0x4F, 0x2B, 0x26, 0x51, 0x43,
0x5B, 0x5D, 0x74, 0x79, 0x3B, 0x2B, 0x2B, 0x2A, 0x25, 0x5D,
0x5A, 0x7E, 0x70, 0x3F, 0x76, 0x26, 0x29, 0x51, 0x69, 0x53,
0x3A, 0x51, 0x72, 0x49, 0x64, 0x63, 0x3E, 0x6C, 0x66, 0x38,
0x6A, 0x68, 0x62, 0x2C, 0x4D, 0x70, 0x7C, 0x25, 0x5D, 0x33,
0x52, 0x7D, 0x5B, 0x4E, 0x6E, 0x4C, 0x45, 0x4D, 0x49, 0x46,
0x3E, 0x23, 0x72, 0x4D, 0x71, 0x5F, 0x40, 0x78, 0x27, 0x21,
0x4E, 0x3E, 0x5A, 0x59, 0x48, 0x6B, 0x30, 0x4A, 0x65, 0x48,
0x4F, 0x65, 0x6C, 0x6F, 0x31, 0x59, 0x2A, 0x34, 0x7E, 0x29,
0x63, 0x48, 0x24, 0x31, 0x7B, 0x25, 0x2B, 0x28, 0x39, 0x21,
0x45, 0x4E, 0x2F, 0x3B, 0x48, 0x62, 0x5A, 0x25, 0x2B, 0x73,
0x5E, 0x46, 0x74, 0x32, 0x5B, 0x4D, 0x3A, 0x7B, 0x50, 0x4F,
0x47, 0x33, 0x6E, 0x79, 0x49, 0x7C, 0x23, 0x23, 0x33, 0x51,
0x23, 0x65, 0x39, 0x32, 0x2B, 0x58, 0x30, 0x49, 0x39, 0x2A,
0x5D, 0x43, 0x2A, 0x62, 0x67, 0x64, 0x66, 0x36, 0x54, 0x41,
0x72, 0x46, 0x7C, 0x67, 0x2B, 0x31, 0x72, 0x43, 0x49, 0x3A,
0x25, 0x6F, 0x78, 0x3E, 0x57, 0x4A, 0x57, 0x2C, 0x55, 0x22,
0x5E, 0x5B, 0x52, 0x56, 0x7B, 0x7C, 0x64, 0x37, 0x72, 0x46,
0x56, 0x51, 0x48, 0x75, 0x22, 0x73, 0x75, 0x34, 0x62, 0x3D,
0x23, 0x48, 0x58, 0x43, 0x5F, 0x45, 0x58, 0x78, 0x73, 0x59,
0x74, 0x3F, 0x41, 0x45, 0x46, 0x3A, 0x3C, 0x5F, 0x3A, 0x79,
0x3F, 0x5B, 0x35, 0x36, 0x4F, 0x34, 0x3B, 0x43, 0x25, 0x54,
0x5B, 0x74, 0x24, 0x49, 0x26, 0x2C, 0x46, 0x59, 0x70, 0x75,
0x5D, 0x24, 0x61, 0x36, 0x5E, 0x4A, 0x27, 0x70, 0x69, 0x7E,
0x60, 0x25, 0x59, 0x42, 0x4D, 0x71, 0x72, 0x3F, 0x54, 0x3B,
0x41, 0x38, 0x57, 0x26, 0x5D, 0x6F, 0x26, 0x70, 0x2C, 0x45,
0x54, 0x3E, 0x4A, 0x65, 0x3C, 0x6B, 0x67, 0x5B, 0x78, 0x24,
0x58, 0x43, 0x51, 0x4E, 0x50, 0x57, 0x76, 0x39, 0x3E, 0x2B,
0x6C, 0x7D, 0x33, 0x76, 0x49, 0x5B, 0x5F, 0x33, 0x6E, 0x3C,
0x64, 0x33, 0x5E, 0x76, 0x22, 0x4C, 0x38, 0x2C, 0x57, 0x23,
0x50, 0x28, 0x76, 0x70, 0x25, 0x6E, 0x68, 0x4F, 0x25, 0x62,
0x4E, 0x49, 0x27, 0x6F, 0x3C, 0x73, 0x6F, 0x59, 0x39, 0x50,
0x4D, 0x7B, 0x60, 0x22, 0x58, 0x49, 0x61, 0x6F, 0x62, 0x77,
0x50, 0x72, 0x68, 0x60, 0x57, 0x35, 0x64, 0x39, 0x2A, 0x63,
0x27, 0x35, 0x52, 0x2E, 0x24, 0x3E, 0x49, 0x26, 0x76, 0x47,
0x57, 0x28, 0x61, 0x6E, 0x2C, 0x5C, 0x7E, 0x40, 0x50, 0x42,
0x31, 0x75, 0x79, 0x63, 0x34, 0x68, 0x2C, 0x4A, 0x42, 0x54,
0x6B, 0x68, 0x6A, 0x6A, 0x35, 0x4E, 0x5E, 0x37, 0x7C, 0x7A,
0x73, 0x45, 0x50, 0x7D, 0x5C, 0x50, 0x6C, 0x23, 0x43, 0x78,
0x6D, 0x25, 0x7A, 0x42, 0x51, 0x31, 0x3B, 0x6D, 0x46, 0x7A,
0x4E, 0x32, 0x59, 0x72, 0x3B, 0x39, 0x35, 0x5B, 0x61, 0x57,
0x45, 0x37, 0x3E, 0x67, 0x27, 0x45, 0x47, 0x6A, 0x26, 0x7E,
0x41, 0x77, 0x30, 0x69, 0x4D, 0x41, 0x69, 0x2B, 0x26, 0x46,
0x4B, 0x37, 0x3B, 0x36, 0x4F, 0x74, 0x52, 0x70, 0x57, 0x33,
0x44, 0x28, 0x45, 0x2B, 0x51, 0x50, 0x24, 0x42, 0x30, 0x70,
0x71, 0x7A, 0x78, 0x3B, 0x7A, 0x60, 0x2A, 0x31, 0x7D, 0x46,
0x33, 0x65, 0x4C, 0x42, 0x40, 0x60, 0x48, 0x78, 0x2D, 0x31,
0x3E, 0x4B, 0x2A, 0x43, 0x7A, 0x53, 0x31, 0x24, 0x5F, 0x5D,
0x31, 0x63, 0x42, 0x2C, 0x7D, 0x40, 0x47, 0x3C, 0x42, 0x2F,
0x2E, 0x36, 0x5A, 0x6E, 0x6D, 0x3D, 0x2D, 0x51, 0x63, 0x79,
0x58, 0x49, 0x68, 0x6F, 0x3D, 0x4E, 0x71, 0x65, 0x73, 0x57,
0x65, 0x72, 0x3A, 0x33, 0x4A, 0x56, 0x73, 0x43, 0x22, 0x5D,
0x46, 0x34, 0x6F, 0x5A, 0x77, 0x40, 0x37, 0x26, 0x73, 0x74,
0x7C, 0x66, 0x32, 0x24, 0x69, 0x2C, 0x4F, 0x61, 0x4F, 0x47,
0x7A, 0x53, 0x6E, 0x2A, 0x4E, 0x5B, 0x4B, 0x4B, 0x2C, 0x42,
0x5F, 0x23, 0x21, 0x32, 0x4F, 0x2F, 0x7D, 0x24, 0x2A, 0x22,
0x3C, 0x3F, 0x7B, 0x38, 0x43, 0x32, 0x4E, 0x45, 0x5E, 0x6A,
0x26, 0x7E, 0x2D, 0x62, 0x50, 0x74, 0x7E, 0x61, 0x3E, 0x2D,
0x55, 0x26, 0x44, 0x7E, 0x39, 0x6F, 0x5E, 0x21, 0x4A, 0x4B,
0x27, 0x6C, 0x2B, 0x4B, 0x22, 0x66, 0x47, 0x62, 0x77, 0x2C,
0x57, 0x5B, 0x74, 0x7A, 0x61, 0x71, 0x6F, 0x3D, 0x41, 0x33,
0x4C, 0x31, 0x7D, 0x25, 0x67, 0x35, 0x44, 0x5D, 0x6D, 0x3D,
0x3E, 0x34, 0x56, 0x41, 0x52, 0x47, 0x6B, 0x7D, 0x78, 0x75,
0x61, 0x44, 0x51, 0x26, 0x29, 0x64, 0x2E, 0x7B, 0x4A, 0x6E,
0x22, 0x23, 0x64, 0x74, 0x50, 0x78, 0x29, 0x66, 0x34, 0x37,
0x4F, 0x40, 0x72, 0x39, 0x54, 0x6F, 0x76, 0x45, 0x53, 0x4D,
0x6C, 0x48, 0x3A, 0x3E, 0x5F, 0x72, 0x69, 0x50, 0x28, 0x56,
0x2F, 0x79, 0x75, 0x2A, 0x41, 0x30, 0x54, 0x3F, 0x58, 0x5F,
0x3A, 0x71, 0x23, 0x34, 0x72, 0x62, 0x39, 0x73, 0x35, 0x28,
0x4A, 0x3C, 0x42, 0x76, 0x7C, 0x6D, 0x39, 0x6F, 0x46, 0x51,
0x27, 0x2D, 0x67, 0x4D, 0x48, 0x5D, 0x59, 0x34, 0x70, 0x4C,
0x73, 0x42, 0x25, 0x39, 0x3D, 0x23, 0x3B, 0x4D, 0x34, 0x72,
0x40, 0x36, 0x41, 0x6C, 0x71, 0x6B, 0x4A, 0x69, 0x7C, 0x39,
0x6D, 0x3A, 0x7E, 0x3C, 0x3A, 0x62, 0x69, 0x42, 0x24, 0x3F,
0x6B, 0x72, 0x66, 0x4F, 0x39, 0x5B, 0x4A, 0x5E, 0x49, 0x6B,
0x42, 0x78, 0x78, 0x29, 0x68, 0x30, 0x78, 0x6C, 0x49, 0x47,
0x78, 0x66, 0x44, 0x3B, 0x22, 0x39, 0x78, 0x5A, 0x79, 0x65,
0x7E, 0x63, 0x54, 0x44, 0x78, 0x65, 0x51, 0x44, 0x64, 0x4D,
0x6C, 0x53, 0x4C, 0x34, 0x75, 0x60, 0x5C, 0x61, 0x7B, 0x6F,
0x22, 0x6B, 0x2A, 0x41, 0x4B, 0x74, 0x7E, 0x31, 0x76, 0x3C,
0x6D, 0x52, 0x66, 0x55, 0x37, 0x46, 0x56, 0x3D, 0x46, 0x23,
0x33, 0x37, 0x56, 0x30, 0x6D, 0x22, 0x62, 0x27, 0x56, 0x58,
0x33, 0x35, 0x41, 0x36, 0x5B, 0x32, 0x41, 0x78, 0x3F, 0x2D,
0x76, 0x3F, 0x29, 0x42, 0x34, 0x31, 0x7B, 0x41, 0x66, 0x69,
0x38, 0x27, 0x7B, 0x36, 0x44, 0x29, 0x46, 0x53, 0x6D, 0x3B,
0x42, 0x38, 0x2E, 0x57, 0x38, 0x6D, 0x53, 0x4B, 0x7C, 0x4A,
0x4F, 0x50, 0x67, 0x36, 0x6E, 0x6E, 0x61, 0x76, 0x4C, 0x22,
0x4E, 0x69, 0x54, 0x48, 0x5B, 0x7E, 0x54, 0x63, 0x7E, 0x36,
0x46, 0x50, 0x66, 0x25, 0x5C, 0x60, 0x22, 0x36, 0x7B, 0x2E,
0x47, 0x5F, 0x3D, 0x73, 0x31, 0x60, 0x56, 0x43, 0x35, 0x25,
0x3D, 0x61, 0x49, 0x23, 0x27, 0x27, 0x54, 0x2E, 0x66, 0x3F,
0x28, 0x71, 0x2F, 0x5D, 0x3F, 0x59, 0x79, 0x57, 0x75, 0x79,
0x76, 0x77, 0x35, 0x35, 0x30, 0x4E, 0x2C, 0x34, 0x2A, 0x26,
0x6F, 0x68, 0x2C, 0x34, 0x4F, 0x6F, 0x62, 0x64, 0x40, 0x41,
0x62, 0x59, 0x68, 0x5D, 0x33, 0x77, 0x61, 0x5C, 0x7D, 0x2C,
0x4C, 0x33, 0x6B, 0x39, 0x6C, 0x2C, 0x4F, 0x2C, 0x22, 0x3E,
0x7E, 0x74, 0x7E, 0x59, 0x6C, 0x39, 0x3E, 0x55, 0x21, 0x4B,
0x72, 0x50, 0x70, 0x2D, 0x64, 0x21, 0x7A, 0x50, 0x22, 0x38,
0x7D, 0x66, 0x79, 0x60, 0x73, 0x50, 0x72, 0x36, 0x4F, 0x50,
0x2E, 0x26, 0x69, 0x4B, 0x5B, 0x58, 0x2F, 0x67, 0x55, 0x66,
0x30, 0x6F, 0x68, 0x5C, 0x5A, 0x46, 0x4F, 0x47, 0x44, 0x21,
0x62, 0x39, 0x56, 0x2C, 0x37, 0x3E, 0x7A, 0x26, 0x74, 0x56,
0x62, 0x28, 0x36, 0x40, 0x2F, 0x3C, 0x68, 0x60, 0x53, 0x6B,
0x45, 0x7E, 0x31, 0x76, 0x4D, 0x5C, 0x39, 0x3C, 0x66, 0x3E,
0x7A, 0x6D, 0x33, 0x74, 0x3D, 0x44, 0x78, 0x29, 0x2B, 0x57,
0x4C, 0x42, 0x50, 0x33, 0x33, 0x31, 0x3B, 0x4F, 0x55, 0x58,
0x6E, 0x74, 0x79, 0x30, 0x3D, 0x65, 0x59, 0x4A, 0x6D, 0x23,
0x38, 0x32, 0x61, 0x5D, 0x52, 0x7B, 0x74, 0x69, 0x36, 0x62,
0x26, 0x30, 0x3A, 0x7E, 0x36, 0x54, 0x24, 0x64, 0x78, 0x2C,
0x44, 0x28, 0x24, 0x49, 0x38, 0x69, 0x25, 0x4C, 0x32, 0x5E,
0x5C, 0x61, 0x76, 0x36, 0x4A, 0x6E, 0x2B, 0x3D, 0x57, 0x21,
0x58, 0x52, 0x48, 0x5A, 0x66, 0x2C, 0x55, 0x3B, 0x23, 0x3B,
0x5F, 0x6D, 0x25, 0x74, 0x40, 0x48, 0x7A, 0x47, 0x4A, 0x48,
0x79, 0x27, 0x6E, 0x22, 0x4E, 0x6E, 0x78, 0x39, 0x24, 0x5F,
0x3D, 0x5C, 0x55, 0x69, 0x74, 0x2E, 0x7B, 0x54, 0x3B, 0x65,
0x74, 0x48, 0x22, 0x27, 0x60, 0x6A, 0x5E, 0x53, 0x73, 0x7E,
0x4D, 0x24, 0x72, 0x72, 0x3F, 0x28, 0x59, 0x3A, 0x6C, 0x7A,
0x6A, 0x7B, 0x4B, 0x3F, 0x6C, 0x41, 0x5B, 0x49, 0x79, 0x79,
0x4B, 0x3B, 0x4C, 0x4C, 0x6B, 0x6F, 0x6C, 0x24, 0x63, 0x2E,
0x34, 0x6A, 0x4E, 0x60, 0x50, 0x39, 0x4D, 0x27, 0x6B, 0x53,
0x2D, 0x59, 0x5B, 0x22, 0x5E, 0x28, 0x72, 0x6A, 0x62, 0x21,
0x39, 0x61, 0x7E, 0x6F, 0x2D, 0x7C, 0x38, 0x4F, 0x37, 0x43,
0x4A, 0x51, 0x49, 0x45, 0x7A, 0x33, 0x29, 0x2A, 0x5F, 0x72,
0x4E, 0x77, 0x50, 0x3A, 0x73, 0x52, 0x6E, 0x4A, 0x41, 0x37,
0x5B, 0x78, 0x2C, 0x71, 0x50, 0x65, 0x25, 0x60, 0x26, 0x7E,
0x23, 0x3D, 0x6E, 0x5B, 0x21, 0x59, 0x6F, 0x29, 0x53, 0x3B,
0x31, 0x72, 0x62, 0x3A, 0x74, 0x57, 0x2E, 0x76, 0x78, 0x4F,
0x46, 0x24, 0x71, 0x64, 0x2D, 0x50, 0x26, 0x50, 0x3C, 0x47,
0x74, 0x36, 0x50, 0x38, 0x37, 0x31, 0x5A, 0x77, 0x21, 0x62,
0x76, 0x73, 0x69, 0x7D, 0x6C, 0x65, 0x4B, 0x55, 0x39, 0x36,
0x69, 0x2D, 0x69, 0x3D, 0x60, 0x50, 0x65, 0x4E, 0x54, 0x49,
0x69, 0x7C, 0x37, 0x54, 0x2D, 0x46, 0x29, 0x65, 0x50, 0x6F,
0x2B, 0x6B, 0x24, 0x30, 0x25, 0x47, 0x41, 0x43, 0x7E, 0x2A,
0x71, 0x5C, 0x58, 0x25, 0x6B, 0x72, 0x58, 0x6A, 0x23, 0x2E,
0x30, 0x45, 0x74, 0x4F, 0x69, 0x6E, 0x65, 0x2F, 0x6B, 0x76,
0x21, 0x52, 0x57, 0x3D, 0x67, 0x4A, 0x6E, 0x45, 0x4F, 0x6E,
0x7D, 0x6B, 0x3A, 0x66, 0x2E, 0x24, 0x24, 0x72, 0x49, 0x60,
0x7A, 0x6E, 0x64, 0x46, 0x31, 0x72, 0x70, 0x2F, 0x6F, 0x73,
0x4A, 0x41, 0x4D, 0x4E, 0x34, 0x4C, 0x46, 0x5D, 0x50, 0x25,
0x25, 0x31, 0x2D, 0x2D, 0x64, 0x44, 0x25, 0x4A, 0x50, 0x4A,
0x59, 0x6A, 0x34, 0x64, 0x7E, 0x55, 0x63, 0x50, 0x22, 0x5C,
0x4C, 0x36, 0x49, 0x73, 0x22, 0x53, 0x62, 0x7D, 0x58, 0x4D,
0x46, 0x33, 0x73, 0x2A, 0x6C, 0x36, 0x4B, 0x23, 0x6C, 0x3B,
0x45, 0x4E, 0x2C, 0x22, 0x45, 0x45, 0x7D, 0x44, 0x5A, 0x61,
0x29, 0x3A, 0x39, 0x6F, 0x7C, 0x7B, 0x34, 0x25, 0x73, 0x48,
0x70, 0x55, 0x38, 0x49, 0x3F, 0x66, 0x45, 0x4E, 0x79, 0x61,
0x69, 0x52, 0x68, 0x23, 0x4D, 0x44, 0x2B, 0x3A, 0x6D, 0x62,
0x69, 0x7D, 0x5A, 0x79, 0x6F, 0x7A, 0x2A, 0x34, 0x2E, 0x61,
0x25, 0x39, 0x6F, 0x3E, 0x33, 0x61, 0x46, 0x37, 0x38, 0x53,
0x38, 0x63, 0x4E, 0x3B, 0x52, 0x5D, 0x67, 0x7C, 0x24, 0x6A,
0x29, 0x49, 0x2B, 0x41, 0x55, 0x6F, 0x56, 0x7C, 0x5C, 0x45,
0x76, 0x5F, 0x3A, 0x6F, 0x3C, 0x54, 0x21, 0x64, 0x6B, 0x35,
0x41, 0x70, 0x28, 0x60, 0x39, 0x7B, 0x75, 0x53, 0x56, 0x47,
0x2D, 0x61, 0x78, 0x69, 0x30, 0x35, 0x35, 0x50, 0x47, 0x36,
0x4C, 0x76, 0x34, 0x35, 0x4E, 0x49, 0x3D, 0x71, 0x33, 0x32,
0x47, 0x3D, 0x68, 0x53, 0x4F, 0x27, 0x2E, 0x29, 0x43, 0x79,
0x2F, 0x35, 0x4B, 0x24, 0x42, 0x29, 0x21, 0x57, 0x5F, 0x2F,
0x66, 0x73, 0x42, 0x57, 0x74, 0x28, 0x3C, 0x78, 0x6D, 0x40,
0x50, 0x58, 0x25, 0x7B, 0x37, 0x29, 0x3F, 0x5C, 0x70, 0x72,
0x33, 0x4D, 0x54, 0x51, 0x22, 0x61, 0x6E, 0x50, 0x4E, 0x67,
0x6E, 0x7D, 0x2E, 0x42, 0x7D, 0x59, 0x6F, 0x28, 0x40, 0x52,
0x3F, 0x26, 0x64, 0x51, 0x4D, 0x3F, 0x4B, 0x23, 0x4E, 0x63,
0x49, 0x50, 0x74, 0x5A, 0x66, 0x57, 0x53, 0x61, 0x5D, 0x46,
0x25, 0x6C, 0x34, 0x5F, 0x7E, 0x2E, 0x5F, 0x68, 0x4C, 0x31,
0x3A, 0x25, 0x2E, 0x4C, 0x7E, 0x55, 0x77, 0x5A, 0x61, 0x55,
0x5E, 0x2A, 0x45, 0x6C, 0x4E, 0x57, 0x3C, 0x65, 0x4A, 0x37,
0x50, 0x4A, 0x34, 0x5B, 0x64, 0x6E, 0x6E, 0x63, 0x7E, 0x6E,
0x64, 0x5F, 0x6F, 0x4A, 0x5E, 0x7B, 0x78, 0x48, 0x64, 0x6C,
0x46, 0x61, 0x48, 0x5D, 0x22, 0x4C, 0x65, 0x6A, 0x61, 0x26,
0x73, 0x2C, 0x37, 0x42, 0x41, 0x42, 0x75, 0x25, 0x25, 0x7C,
0x76, 0x3A, 0x7A, 0x44, 0x4A, 0x58, 0x3C, 0x5A, 0x42, 0x66,
0x3F, 0x57, 0x58, 0x3D, 0x2C, 0x65, 0x6E, 0x47, 0x57, 0x4D,
0x54, 0x78, 0x41, 0x68, 0x30, 0x58, 0x24, 0x50, 0x4F, 0x3C,
0x5B, 0x3A, 0x4F, 0x29, 0x40, 0x6B, 0x67, 0x5C, 0x62, 0x2D,
0x46, 0x54, 0x25, 0x38, 0x50, 0x5F, 0x70, 0x25, 0x30, 0x54,
0x62, 0x6C, 0x28, 0x4B, 0x23, 0x73, 0x61, 0x50, 0x2F, 0x31,
0x71, 0x65, 0x46, 0x21, 0x48, 0x7C, 0x71, 0x6D, 0x4C, 0x34,
0x3C, 0x3B, 0x3B, 0x49, 0x32, 0x69, 0x23, 0x7B, 0x76, 0x7C,
0x4D, 0x7B, 0x64, 0x34, 0x4A, 0x32, 0x53, 0x2C, 0x48, 0x50,
0x73, 0x2E, 0x2B, 0x2B, 0x6E, 0x35, 0x2C, 0x23, 0x60, 0x47,
0x3E, 0x58, 0x22, 0x5E, 0x22, 0x53, 0x62, 0x67, 0x29, 0x7A,
0x28, 0x44, 0x26, 0x53, 0x64, 0x36, 0x47, 0x6C, 0x21, 0x6E,
0x4B, 0x29, 0x27, 0x30, 0x6A, 0x30, 0x6F, 0x27, 0x2D, 0x28,
0x48, 0x69, 0x24, 0x6B, 0x60, 0x74, 0x54, 0x3B, 0x52, 0x5F,
0x2E, 0x46, 0x4B, 0x6E, 0x6A, 0x37, 0x5C, 0x79, 0x58, 0x7B,
0x3B, 0x36, 0x7A, 0x72, 0x29, 0x24, 0x68, 0x4E, 0x3C, 0x4F,
0x4D, 0x5E, 0x53, 0x3F, 0x6B, 0x79, 0x47, 0x65, 0x79, 0x35,
0x65, 0x48, 0x7B, 0x6C, 0x53, 0x7C, 0x4A, 0x37, 0x70, 0x3F,
0x3F, 0x29, 0x78, 0x26, 0x38, 0x65, 0x46, 0x35, 0x60, 0x70,
0x77, 0x37, 0x5B, 0x52, 0x7E, 0x65, 0x77, 0x79, 0x3A, 0x5E,
0x27, 0x5E, 0x2C, 0x77, 0x78, 0x48, 0x22, 0x5B, 0x21, 0x4E,
0x24, 0x33, 0x6C, 0x6B, 0x6B, 0x7D, 0x3A, 0x54, 0x28, 0x68,
0x27, 0x22, 0x21, 0x66, 0x28, 0x6C, 0x4A, 0x39, 0x5F, 0x7B,
0x28, 0x5E, 0x5F, 0x44, 0x3A, 0x6C, 0x3E, 0x29, 0x4B, 0x50,
0x66, 0x54, 0x26, 0x74, 0x7D, 0x2F, 0x4A, 0x70, 0x29, 0x4A,
0x30, 0x69, 0x38, 0x2F, 0x49, 0x5B, 0x33, 0x71, 0x51, 0x55,
0x48, 0x55, 0x44, 0x60, 0x58, 0x50, 0x3F, 0x43, 0x49, 0x6C,
0x5F, 0x38, 0x67, 0x67, 0x46, 0x28, 0x77, 0x4F, 0x56, 0x6A,
0x42, 0x2B, 0x42, 0x5D, 0x61, 0x76, 0x52, 0x43, 0x25, 0x32,
0x4A, 0x47, 0x39, 0x4A, 0x4C, 0x36, 0x29, 0x60, 0x4C, 0x72,
0x2D, 0x30, 0x7E, 0x68, 0x37, 0x4F, 0x7A, 0x4D, 0x4F, 0x2B,
0x7C, 0x79, 0x4C, 0x68, 0x37, 0x23, 0x3C, 0x27, 0x48, 0x4E,
0x6F, 0x77, 0x6B, 0x4F, 0x3B, 0x62, 0x68, 0x77, 0x50, 0x36,
0x21, 0x3E, 0x21, 0x48, 0x58, 0x71, 0x73, 0x79, 0x53, 0x2E,
0x58, 0x2C, 0x79, 0x42, 0x35, 0x2D, 0x67, 0x7B, 0x63, 0x61,
0x6D, 0x2F, 0x7B, 0x22, 0x63, 0x3C, 0x48, 0x7A, 0x6C, 0x79,
0x53, 0x2C, 0x44, 0x40, 0x4D, 0x41, 0x6D, 0x7C, 0x7E, 0x27,
0x5A, 0x2F, 0x37, 0x7B, 0x72, 0x3B, 0x2B, 0x32, 0x74, 0x25,
0x37, 0x3A, 0x53, 0x56, 0x3E, 0x2C, 0x29, 0x52, 0x5F, 0x57,
0x37, 0x56, 0x2D, 0x4E, 0x5E, 0x62, 0x2F, 0x24, 0x26, 0x58,
0x5F, 0x70, 0x67, 0x59, 0x35, 0x46, 0x5D, 0x2E, 0x7D, 0x70,
0x58, 0x76, 0x71, 0x31, 0x36, 0x3F, 0x25, 0x42, 0x6B, 0x3A,
0x75, 0x78, 0x2C, 0x32, 0x79, 0x6C, 0x50, 0x77, 0x2C, 0x29,
0x55, 0x30, 0x33, 0x49, 0x34, 0x61, 0x4B, 0x75, 0x75, 0x6B,
0x7C, 0x5B, 0x36, 0x5B, 0x37, 0x35, 0x30, 0x40, 0x7A, 0x21,
0x58, 0x69, 0x46, 0x63, 0x44, 0x29, 0x6C, 0x28, 0x3A, 0x2B,
0x77, 0x7D, 0x74, 0x33, 0x38, 0x5C, 0x25, 0x5F, 0x41, 0x30,
0x31, 0x43, 0x54, 0x33, 0x66, 0x4B, 0x6D, 0x59, 0x2B, 0x22,
0x41, 0x22, 0x45, 0x3A, 0x50, 0x45, 0x3B, 0x6C, 0x21, 0x71,
0x3E, 0x7E, 0x59, 0x52, 0x6C, 0x53, 0x78, 0x6F, 0x76, 0x2B,
0x6B, 0x4A, 0x63, 0x3E, 0x2C, 0x5D, 0x53, 0x60, 0x71, 0x28,
0x4D, 0x4B, 0x43, 0x37, 0x7C, 0x76, 0x79, 0x6F, 0x62, 0x40,
0x49, 0x51, 0x2B, 0x5A, 0x42, 0x53, 0x22, 0x50, 0x3E, 0x52,
0x5E, 0x63, 0x37, 0x21, 0x66, 0x57, 0x33, 0x4A, 0x58, 0x2E,
0x55, 0x2A, 0x2C, 0x29, 0x7C, 0x4B, 0x66, 0x7C, 0x57, 0x7E,
0x2F, 0x48, 0x33, 0x3D, 0x79, 0x4F, 0x3E, 0x67, 0x50, 0x3F,
0x73, 0x2C, 0x55, 0x76, 0x7D, 0x50, 0x5D, 0x48, 0x22, 0x63,
0x5A, 0x64, 0x61, 0x74, 0x3F, 0x54, 0x38, 0x6B, 0x6B, 0x3E,
0x51, 0x43, 0x51, 0x36, 0x3F, 0x53, 0x4B, 0x3A, 0x50, 0x3D,
0x32, 0x49, 0x67, 0x45, 0x53, 0x29, 0x75, 0x5C, 0x26, 0x4C,
0x3A, 0x35, 0x57, 0x31, 0x65, 0x4A, 0x55, 0x63, 0x22, 0x27,
0x2E, 0x51, 0x25, 0x3D, 0x6A, 0x56, 0x5C, 0x2A, 0x27, 0x3C,
0x2C, 0x74, 0x5C, 0x42, 0x3C, 0x44, 0x2A, 0x5F, 0x2A, 0x28,
0x38, 0x61, 0x30, 0x49, 0x43, 0x4E, 0x71, 0x5C, 0x76, 0x31,
0x5A, 0x57, 0x68, 0x50, 0x78, 0x30, 0x57, 0x2C, 0x50, 0x63,
0x36, 0x24, 0x54, 0x6F, 0x2B, 0x5F, 0x21, 0x59, 0x3F, 0x5F,
0x24, 0x44, 0x3B, 0x57, 0x28, 0x3D, 0x4F, 0x62, 0x50, 0x5C,
0x24, 0x50, 0x76, 0x74, 0x2D, 0x70, 0x3D, 0x2E, 0x42, 0x4E,
0x3E, 0x3B, 0x63, 0x25, 0x2D, 0x77, 0x68, 0x51, 0x43, 0x54,
0x23, 0x21, 0x45, 0x2A, 0x40, 0x2C, 0x30, 0x7E, 0x54, 0x54,
0x25, 0x60, 0x36, 0x27, 0x75, 0x6B, 0x3E, 0x37, 0x24, 0x6A,
0x5E, 0x69, 0x58, 0x53, 0x57, 0x5A, 0x35, 0x23, 0x67, 0x6E,
0x7E, 0x70, 0x45, 0x27, 0x6F, 0x4C, 0x75, 0x69, 0x51, 0x52,
0x5D, 0x5A, 0x5C, 0x32, 0x73, 0x2D, 0x22, 0x49, 0x6E, 0x26,
0x5C, 0x60, 0x68, 0x27, 0x52, 0x48, 0x45, 0x4C, 0x7C, 0x58,
0x21, 0x7C, 0x47, 0x41, 0x49, 0x27, 0x51, 0x27, 0x4D, 0x7A,
0x73, 0x71, 0x51, 0x58, 0x5C, 0x64, 0x4C, 0x6A, 0x24, 0x2B,
0x49, 0x2A, 0x2E, 0x47, 0x56, 0x72, 0x52, 0x3A, 0x21, 0x21,
0x52, 0x55, 0x22, 0x37, 0x60, 0x72, 0x76, 0x5F, 0x5F, 0x31,
0x2E, 0x53, 0x52, 0x37, 0x26, 0x32, 0x57, 0x32, 0x2C, 0x61,
0x56, 0x33, 0x2E, 0x4A, 0x6D, 0x72, 0x5B, 0x5A, 0x58, 0x22,
0x58, 0x25, 0x26, 0x29, 0x2D, 0x64, 0x6C, 0x6A, 0x60, 0x2B,
0x6C, 0x38, 0x7A, 0x34, 0x5E, 0x38, 0x29, 0x4D, 0x39, 0x38,
0x36, 0x58, 0x7D, 0x51, 0x43, 0x28, 0x35, 0x26, 0x73, 0x52,
0x59, 0x76, 0x3D, 0x4A, 0x62, 0x65, 0x57, 0x46, 0x2A, 0x4F,
0x7A, 0x6B, 0x6A, 0x22, 0x37, 0x4F, 0x2B, 0x44, 0x46, 0x40,
0x2C, 0x65, 0x25, 0x4B, 0x68, 0x49, 0x70, 0x64, 0x51, 0x72,
0x64, 0x4F, 0x5B, 0x5E, 0x65, 0x79, 0x46, 0x7D, 0x55, 0x37,
0x53, 0x63, 0x7A, 0x77, 0x7E, 0x4C, 0x76, 0x2D, 0x73, 0x2C,
0x66, 0x6D, 0x72, 0x4C, 0x7A, 0x29, 0x5A, 0x7B, 0x7D, 0x6E,
0x66, 0x38, 0x30, 0x7B, 0x77, 0x79, 0x72, 0x74, 0x59, 0x5F,
0x48, 0x76, 0x4D, 0x76, 0x76, 0x6C, 0x27, 0x49, 0x64, 0x24,
0x5E, 0x44, 0x36, 0x7C, 0x79, 0x3C, 0x21, 0x42, 0x6D, 0x78,
0x2D, 0x3E, 0x28, 0x32, 0x47, 0x4E, 0x5A, 0x6B, 0x77, 0x3F,
0x51, 0x60, 0x43, 0x4A, 0x60, 0x50, 0x53, 0x45, 0x25, 0x32,
0x71, 0x64, 0x36, 0x3E, 0x26, 0x3F, 0x3E, 0x4C, 0x62, 0x5F,
0x2C, 0x54, 0x37, 0x43, 0x7C, 0x64, 0x3D, 0x29, 0x49, 0x32,
0x37, 0x34, 0x71, 0x62, 0x5A, 0x55, 0x77, 0x78, 0x4C, 0x46,
0x61, 0x77, 0x66, 0x2B, 0x3A, 0x30, 0x47, 0x3A, 0x3A, 0x39,
0x75, 0x54, 0x7E, 0x2B, 0x5B, 0x4E, 0x5C, 0x4B, 0x26, 0x34,
0x7E, 0x7C, 0x4C, 0x22, 0x6F, 0x60, 0x3A, 0x7A, 0x54, 0x50,
0x43, 0x4D, 0x68, 0x31, 0x39, 0x4A, 0x7A, 0x7A, 0x4F, 0x39,
0x4C, 0x5A, 0x21, 0x5B, 0x60, 0x54, 0x60, 0x33, 0x5F, 0x51,
0x23, 0x6E, 0x54, 0x48, 0x2A, 0x50, 0x38, 0x54, 0x41, 0x34,
0x30, 0x66, 0x78, 0x49, 0x2B, 0x30, 0x42, 0x46, 0x6E, 0x71,
0x3B, 0x53, 0x37, 0x43, 0x3B, 0x2A, 0x3D, 0x5D, 0x49, 0x36,
0x71, 0x73, 0x43, 0x27, 0x2D, 0x4C, 0x5C, 0x3B, 0x3E, 0x2B,
0x26, 0x5E, 0x77, 0x52, 0x6A, 0x2C, 0x64, 0x53, 0x2F, 0x6C,
0x63, 0x6E, 0x31, 0x75, 0x45, 0x26, 0x24, 0x25, 0x2D, 0x49,
0x63, 0x5A, 0x76, 0x4E, 0x73, 0x3B, 0x24, 0x59, 0x62, 0x70,
0x49, 0x61, 0x2B, 0x5F, 0x72, 0x61, 0x54, 0x52, 0x43, 0x5F,
0x74, 0x75, 0x23, 0x47, 0x33, 0x7C, 0x31, 0x61, 0x6E, 0x6A,
0x76, 0x69, 0x79, 0x74, 0x2A, 0x33, 0x55, 0x7B, 0x38, 0x5E,
0x75, 0x3D, 0x61, 0x34, 0x71, 0x57, 0x6B, 0x3D, 0x35, 0x66,
0x61, 0x2F, 0x31, 0x55, 0x42, 0x68, 0x71, 0x72, 0x24, 0x4F,
0x35, 0x57, 0x26, 0x63, 0x6E, 0x2F, 0x33, 0x31, 0x2B, 0x4C,
0x4E, 0x5F, 0x41, 0x4A, 0x64, 0x41, 0x5B, 0x67, 0x44, 0x78,
0x3B, 0x4B, 0x66, 0x4D, 0x2F, 0x2A, 0x5A, 0x5D, 0x25, 0x7D,
0x32, 0x6B, 0x55, 0x40, 0x52, 0x58, 0x64, 0x6E, 0x79, 0x60,
0x56, 0x6F, 0x68, 0x49, 0x5E, 0x35, 0x43, 0x42, 0x74, 0x78,
0x2A, 0x2A, 0x22, 0x3B, 0x31, 0x27, 0x62, 0x4A, 0x71, 0x67,
0x68, 0x42, 0x27, 0x26, 0x2C, 0x4A, 0x2F, 0x34, 0x52, 0x3A,
0x4B, 0x2B, 0x6B, 0x47, 0x35, 0x6D, 0x5B, 0x4D, 0x7E, 0x22,
0x25, 0x5C, 0x61, 0x39, 0x5D, 0x55, 0x41, 0x7B, 0x67, 0x3F,
0x7B, 0x26, 0x24, 0x48, 0x3E, 0x26, 0x55, 0x28, 0x5A, 0x5B,
0x2E, 0x41, 0x71, 0x55, 0x69, 0x7E, 0x75, 0x29, 0x56, 0x6C,
0x3A, 0x7D, 0x30, 0x52, 0x49, 0x5D, 0x72, 0x6B, 0x32, 0x27,
0x47, 0x39, 0x7E, 0x3B, 0x32, 0x69, 0x44, 0x7C, 0x5A, 0x52,
0x23, 0x58, 0x7C, 0x6B, 0x74, 0x22, 0x38, 0x7E, 0x7C, 0x52,
0x34, 0x6F, 0x5D, 0x70, 0x60, 0x4E, 0x4E, 0x6C, 0x35, 0x22,
0x7B, 0x3C, 0x48, 0x77, 0x54, 0x76, 0x2D, 0x65, 0x4F, 0x24,
0x58, 0x35, 0x37, 0x34, 0x58, 0x28, 0x3C, 0x5C, 0x51, 0x7B,
0x68, 0x2D, 0x28, 0x54, 0x75, 0x70, 0x4B, 0x34, 0x52, 0x74,
0x2D, 0x41, 0x28, 0x3A, 0x77, 0x4F, 0x45, 0x4D, 0x75, 0x24,
0x21, 0x4F, 0x61, 0x75, 0x57, 0x54, 0x21, 0x6A, 0x75, 0x62,
0x2E, 0x2D, 0x61, 0x41, 0x7E, 0x47, 0x30, 0x49, 0x48, 0x53,
0x6C, 0x30, 0x76, 0x4F, 0x58, 0x31, 0x21, 0x2B, 0x3C, 0x37,
0x60, 0x60, 0x30, 0x24, 0x30, 0x4F, 0x5F, 0x39, 0x3D, 0x37,
0x46, 0x5A, 0x3C, 0x27, 0x4A, 0x7C, 0x63, 0x22, 0x75, 0x40,
0x43, 0x46, 0x21, 0x52, 0x59, 0x61, 0x21, 0x38, 0x45, 0x55,
0x42, 0x2A, 0x63, 0x46, 0x42, 0x2F, 0x54, 0x79, 0x74, 0x72,
0x6E, 0x27, 0x3D, 0x45, 0x27, 0x45, 0x5A, 0x77, 0x76, 0x26,
0x7E, 0x40, 0x26, 0x44, 0x39, 0x64, 0x61, 0x67, 0x4E, 0x7C,
0x61, 0x56, 0x59, 0x21, 0x55, 0x70, 0x74, 0x66, 0x5B, 0x5B,
0x64, 0x30, 0x41, 0x49, 0x6E, 0x24, 0x5F, 0x4A, 0x32, 0x48,
0x60, 0x2B, 0x38, 0x41, 0x4E, 0x4B, 0x63, 0x26, 0x38, 0x2B,
0x24, 0x33, 0x3C, 0x32, 0x57, 0x64, 0x25, 0x5D, 0x59, 0x4A,
0x33, 0x44, 0x2C, 0x41, 0x33, 0x76, 0x22, 0x5A, 0x5A, 0x45,
0x41, 0x3C, 0x4E, 0x60, 0x4D, 0x7D, 0x58, 0x28, 0x66, 0x71,
0x7B, 0x3B, 0x6B, 0x60, 0x28, 0x7A, 0x3C, 0x4A, 0x66, 0x5F,
0x35, 0x78, 0x59, 0x70, 0x7B, 0x22, 0x67, 0x62, 0x2C, 0x50,
0x48, 0x2C, 0x23, 0x22, 0x38, 0x6A, 0x6E, 0x3F, 0x6E, 0x5B,
0x3A, 0x37, 0x79, 0x62, 0x77, 0x2B, 0x31, 0x7B, 0x60, 0x3F,
0x69, 0x26, 0x51, 0x68, 0x4A, 0x21, 0x5D, 0x22, 0x30, 0x73,
0x41, 0x7A, 0x44, 0x2E, 0x26, 0x61, 0x7C, 0x27, 0x38, 0x6D,
0x70, 0x4C, 0x7E, 0x69, 0x6F, 0x2C, 0x4A, 0x76, 0x60, 0x30,
0x40, 0x58, 0x52, 0x46, 0x42, 0x3E, 0x61, 0x76, 0x31, 0x43,
0x4A, 0x61, 0x3B, 0x5A, 0x34, 0x21, 0x77, 0x3A, 0x6A, 0x4F,
0x3E, 0x6F, 0x6F, 0x46, 0x3B, 0x42, 0x46, 0x52, 0x5D, 0x3D,
0x50, 0x7A, 0x71, 0x25, 0x3B, 0x37, 0x61, 0x24, 0x44, 0x37,
0x31, 0x49, 0x5F, 0x2E, 0x2D, 0x23, 0x3F, 0x51, 0x58, 0x2F,
0x36, 0x63, 0x70, 0x40, 0x62, 0x5D, 0x36, 0x3F, 0x63, 0x29,
0x39, 0x55, 0x55, 0x21, 0x63, 0x65, 0x4D, 0x66, 0x58, 0x27,
0x37, 0x32, 0x53, 0x26, 0x3D, 0x24, 0x59, 0x46, 0x58, 0x3B,
0x43, 0x5B, 0x78, 0x38, 0x5A, 0x46, 0x3B, 0x2E, 0x42, 0x72,
0x2F, 0x64, 0x4F, 0x3D, 0x42, 0x37, 0x63, 0x35, 0x50, 0x7C,
0x28, 0x73, 0x25, 0x49, 0x2A, 0x31, 0x33, 0x73, 0x31, 0x37,
0x68, 0x58, 0x7A, 0x4F, 0x71, 0x63, 0x61, 0x2A, 0x70, 0x70,
0x4C, 0x31, 0x21, 0x62, 0x41, 0x48, 0x3A, 0x67, 0x21, 0x27,
0x3A, 0x44, 0x2D, 0x55, 0x42, 0x6B, 0x41, 0x4E, 0x76, 0x27,
0x51, 0x58, 0x52, 0x3A, 0x4C, 0x5B, 0x2E, 0x4D, 0x6C, 0x56,
0x57, 0x50, 0x6A, 0x30, 0x44, 0x29, 0x51, 0x60, 0x3B, 0x4F,
0x46, 0x74, 0x6A, 0x74, 0x6A, 0x23, 0x64, 0x6E, 0x79, 0x21,
0x2A, 0x74, 0x3F, 0x36, 0x59, 0x44, 0x75, 0x6E, 0x24, 0x38,
0x28, 0x2F, 0x5A, 0x69, 0x25, 0x75, 0x5A, 0x6B, 0x7C, 0x75,
0x75, 0x66, 0x49, 0x44, 0x5C, 0x6A, 0x36, 0x62, 0x6C, 0x64,
0x30, 0x26, 0x51, 0x5E, 0x62, 0x57, 0x65, 0x6B, 0x2B, 0x43,
0x27, 0x74, 0x6D, 0x6B, 0x24, 0x50, 0x63, 0x4B, 0x32, 0x23,
0x52, 0x77, 0x2A, 0x27, 0x5B, 0x29, 0x5D, 0x27, 0x58, 0x4F,
0x68, 0x3A, 0x6E, 0x76, 0x53, 0x67, 0x42, 0x21, 0x6E, 0x56,
0x22, 0x31, 0x34, 0x77, 0x22, 0x4F, 0x50, 0x6B, 0x35, 0x46,
0x38, 0x2E, 0x6B, 0x3C, 0x7A, 0x7E, 0x60, 0x25, 0x35, 0x41,
0x4E, 0x70, 0x41, 0x43, 0x3C, 0x48, 0x3A, 0x26, 0x3F, 0x50,
0x67, 0x30, 0x39, 0x46, 0x6F, 0x45, 0x5F, 0x34, 0x23, 0x76,
0x33, 0x35, 0x5C, 0x56, 0x78, 0x6E, 0x27, 0x5D, 0x53, 0x5A,
0x47, 0x5B, 0x41, 0x6B, 0x3E, 0x3F, 0x24, 0x6F, 0x60, 0x70,
0x55, 0x6B, 0x47, 0x2C, 0x3E, 0x60, 0x75, 0x57, 0x45, 0x35,
0x5C, 0x63, 0x65, 0x44, 0x7E, 0x51, 0x39, 0x3C, 0x73, 0x24,
0x33, 0x44, 0x53, 0x48, 0x65, 0x45, 0x70, 0x4D, 0x64, 0x68,
0x53, 0x2B, 0x6C, 0x50, 0x6B, 0x62, 0x2C, 0x71, 0x4C, 0x5F,
0x4C, 0x55, 0x35, 0x6B, 0x36, 0x2F, 0x35, 0x40, 0x7C, 0x4A,
0x74, 0x74, 0x5E, 0x2C, 0x62, 0x3A, 0x2E, 0x78, 0x5B, 0x27,
0x39, 0x7E, 0x7D, 0x33, 0x2E, 0x49, 0x49, 0x38, 0x67, 0x75,
0x52, 0x28, 0x33, 0x5E, 0x3A, 0x32, 0x3F, 0x6E, 0x73, 0x68,
0x44, 0x6A, 0x79, 0x6B, 0x55, 0x4A, 0x78, 0x7E, 0x61, 0x3F,
0x3D, 0x78, 0x67, 0x64, 0x77, 0x70, 0x4F, 0x78, 0x24, 0x73,
0x77, 0x78, 0x34, 0x42, 0x23, 0x2E, 0x79, 0x7D, 0x39, 0x79,
0x2D, 0x44, 0x3B, 0x28, 0x56, 0x57, 0x5A, 0x77, 0x46, 0x6D,
0x24, 0x6B, 0x6D, 0x2B, 0x4F, 0x26, 0x28, 0x75, 0x37, 0x5B,
0x73, 0x6F, 0x61, 0x4A, 0x2A, 0x60, 0x3D, 0x62, 0x60, 0x54,
0x2D, 0x7D, 0x76, 0x35, 0x28, 0x3F, 0x41, 0x55, 0x37, 0x28,
0x7E, 0x69, 0x4A, 0x65, 0x3C, 0x2C, 0x3F, 0x35, 0x23, 0x42,
0x50, 0x5B, 0x50, 0x3E, 0x4D, 0x7E, 0x6C, 0x6A, 0x57, 0x79,
0x6D, 0x2B, 0x56, 0x5E, 0x40, 0x52, 0x3C, 0x30, 0x48, 0x3D,
0x24, 0x47, 0x37, 0x40, 0x39, 0x25, 0x28, 0x50, 0x3F, 0x37,
0x43, 0x7E, 0x6F, 0x4F, 0x32, 0x3D, 0x78, 0x58, 0x72, 0x3E,
0x4F, 0x48, 0x60, 0x67, 0x7E, 0x32, 0x77, 0x5F, 0x3D, 0x42,
0x3F, 0x75, 0x32, 0x27, 0x63, 0x5D, 0x45, 0x54, 0x72, 0x65,
0x79, 0x6D, 0x43, 0x3A, 0x3C, 0x2F, 0x7A, 0x4A, 0x3D, 0x28,
0x54, 0x3B, 0x3D, 0x4D, 0x36, 0x50, 0x4A, 0x40, 0x2E, 0x61,
0x42, 0x36, 0x4C, 0x26, 0x6A, 0x7C, 0x7B, 0x39, 0x51, 0x5C,
0x46, 0x73, 0x5A, 0x54, 0x59, 0x55, 0x4B, 0x58, 0x48, 0x30,
0x39, 0x4A, 0x67, 0x72, 0x3C, 0x4C, 0x40, 0x2D, 0x5E, 0x35,
0x5E, 0x7E, 0x44, 0x53, 0x59, 0x7B, 0x2E, 0x75, 0x2E, 0x5D,
0x74, 0x28, 0x4C, 0x4B, 0x22, 0x3E, 0x4C, 0x53, 0x22, 0x7A,
0x6D, 0x24, 0x2D, 0x2F, 0x5A, 0x6D, 0x57, 0x24, 0x3A, 0x66,
0x30, 0x75, 0x30, 0x7A, 0x34, 0x6A, 0x67, 0x5B, 0x4C, 0x4B,
0x2F, 0x60, 0x57, 0x40, 0x6A, 0x52, 0x38, 0x70, 0x79, 0x58,
0x5A, 0x75, 0x66, 0x76, 0x7B, 0x6F, 0x6F, 0x4C, 0x79, 0x42,
0x78, 0x6D, 0x57, 0x41, 0x4E, 0x5B, 0x5B, 0x34, 0x34, 0x41,
0x68, 0x63, 0x2C, 0x7A, 0x21, 0x3C, 0x50, 0x22, 0x43, 0x74,
0x4F, 0x28, 0x5E, 0x77, 0x38, 0x28, 0x5C, 0x68, 0x4C, 0x4D,
0x7A, 0x2C, 0x58, 0x27, 0x43, 0x50, 0x56, 0x4C, 0x61, 0x6B,
0x77, 0x5A, 0x57, 0x76, 0x46, 0x74, 0x24, 0x6B, 0x40, 0x3B,
0x47, 0x4D, 0x4A, 0x73, 0x7D, 0x2E, 0x69, 0x4B, 0x55, 0x67,
0x5F, 0x37, 0x2C, 0x6E, 0x35, 0x46, 0x37, 0x22, 0x4F, 0x2F,
0x57, 0x70, 0x4E, 0x6F, 0x3E, 0x4C, 0x34, 0x44, 0x54, 0x69,
0x2F, 0x40, 0x36, 0x3C, 0x4C, 0x48, 0x55, 0x7B, 0x57, 0x3D,
0x6E, 0x4C, 0x3B, 0x52, 0x52, 0x4C, 0x38, 0x5B, 0x5F, 0x4E,
0x28, 0x31, 0x72, 0x47, 0x2C, 0x43, 0x70, 0x30, 0x37, 0x4A,
0x23, 0x43, 0x74, 0x3F, 0x42, 0x7D, 0x4F, 0x57, 0x53, 0x42,
0x42, 0x3A, 0x44, 0x4D, 0x69, 0x2C, 0x56, 0x28, 0x61, 0x4F,
0x71, 0x4C, 0x32, 0x33, 0x47, 0x45, 0x59, 0x42, 0x37, 0x60,
0x36, 0x2D, 0x3A, 0x50, 0x63, 0x29, 0x2C, 0x7E, 0x7D, 0x5C,
0x34, 0x6E, 0x57, 0x55, 0x5D, 0x77, 0x3F, 0x35, 0x30, 0x6B,
0x3A, 0x6F, 0x49, 0x7D, 0x56, 0x25, 0x37, 0x2E, 0x51, 0x6F,
0x21, 0x2D, 0x64, 0x5F, 0x36, 0x72, 0x67, 0x5A, 0x79, 0x3B,
0x53, 0x4B, 0x32, 0x3C, 0x22, 0x4B, 0x54, 0x6E, 0x56, 0x52,
0x40, 0x23, 0x21, 0x40, 0x68, 0x2F, 0x3F, 0x6F, 0x24, 0x4D,
0x58, 0x29, 0x64, 0x7E, 0x66, 0x23, 0x42, 0x53, 0x4C, 0x6A,
0x38, 0x7A, 0x5D, 0x50, 0x71, 0x30, 0x65, 0x79, 0x23, 0x3A,
0x28, 0x45, 0x2C, 0x56, 0x2C, 0x67, 0x64, 0x49, 0x6C, 0x3C,
0x5F, 0x73, 0x4B, 0x78, 0x70, 0x3D, 0x6B, 0x57, 0x38, 0x39,
0x71, 0x29, 0x78, 0x29, 0x7A, 0x37, 0x6F, 0x2E, 0x53, 0x6D,
0x6A, 0x27, 0x48, 0x72, 0x52, 0x65, 0x73, 0x6C, 0x6D, 0x32,
0x2B, 0x2A, 0x74, 0x4B, 0x2E, 0x78, 0x25, 0x72, 0x36, 0x71,
0x6E, 0x55, 0x7A, 0x51, 0x75, 0x67, 0x3C, 0x39, 0x2C, 0x71,
0x4C, 0x47, 0x50, 0x33, 0x7E, 0x65, 0x75, 0x67, 0x32, 0x7A,
0x26, 0x48, 0x6C, 0x65, 0x47, 0x36, 0x2C, 0x78, 0x31, 0x78,
0x4F, 0x6E, 0x2F, 0x3B, 0x30, 0x22, 0x6C, 0x40, 0x44, 0x7E,
0x7B, 0x34, 0x77, 0x51, 0x39, 0x5C, 0x4D, 0x3A, 0x79, 0x21,
0x69, 0x6C, 0x6E, 0x46, 0x4F, 0x41, 0x6A, 0x4C, 0x4A, 0x5B,
0x76, 0x23, 0x70, 0x4D, 0x72, 0x79, 0x30, 0x41, 0x6C, 0x76,
0x48, 0x4E, 0x55, 0x45, 0x31, 0x2E, 0x46, 0x49, 0x33, 0x48,
0x32, 0x6F, 0x26, 0x6C, 0x58, 0x71, 0x63, 0x46, 0x26, 0x2B,
0x48, 0x51, 0x5A, 0x4E, 0x65, 0x6C, 0x67, 0x3A, 0x49, 0x55,
0x60, 0x22, 0x59, 0x49, 0x32, 0x4E, 0x42, 0x67, 0x28, 0x48,
0x4A, 0x49, 0x50, 0x60, 0x42, 0x4B, 0x26, 0x5F, 0x47, 0x70,
0x2F, 0x4F, 0x59, 0x25, 0x52, 0x2C, 0x7A, 0x38, 0x77, 0x2B,
0x2B, 0x51, 0x4F, 0x6E, 0x5E, 0x4F, 0x67, 0x3F, 0x59, 0x62,
0x6B, 0x7E, 0x7A, 0x2A, 0x79, 0x34, 0x72, 0x34, 0x30, 0x74,
0x65, 0x69, 0x62, 0x6F, 0x41, 0x7B, 0x30, 0x65, 0x7D, 0x60,
0x31, 0x36, 0x3C, 0x2B, 0x62, 0x41, 0x6F, 0x72, 0x48, 0x22,
0x47, 0x23, 0x7A, 0x6E, 0x72, 0x36, 0x65, 0x69, 0x4D, 0x3B,
0x36, 0x62, 0x6D, 0x7E, 0x5D, 0x68, 0x54, 0x30, 0x57, 0x4D,
0x3E, 0x78, 0x6B, 0x24, 0x75, 0x30, 0x79, 0x6C, 0x53, 0x4E,
0x34, 0x78, 0x41, 0x56, 0x46, 0x45, 0x35, 0x3B, 0x4F, 0x54,
0x73, 0x23, 0x76, 0x66, 0x4E, 0x2D, 0x28, 0x73, 0x6D, 0x64,
0x66, 0x67, 0x5B, 0x6C, 0x3F, 0x5C, 0x47, 0x51, 0x2B, 0x36,
0x61, 0x3F, 0x2D, 0x52, 0x7A, 0x50, 0x79, 0x58, 0x36, 0x31,
0x36, 0x39, 0x40, 0x2E, 0x51, 0x44, 0x40, 0x5D, 0x2E, 0x4B,
0x23, 0x70, 0x42, 0x47, 0x2D, 0x3A, 0x2E, 0x2F, 0x78, 0x24,
0x52, 0x2B, 0x7B, 0x6F, 0x2D, 0x2F, 0x54, 0x36, 0x44, 0x4C,
0x2D, 0x34, 0x7E, 0x52, 0x2E, 0x71, 0x7E, 0x4E, 0x54, 0x37,
0x39, 0x31, 0x38, 0x5C, 0x33, 0x78, 0x63, 0x6C, 0x3C, 0x4F,
0x21, 0x5C, 0x63, 0x4F, 0x25, 0x7A, 0x2C, 0x67, 0x45, 0x5F,
0x74, 0x7B, 0x44, 0x60, 0x45, 0x55, 0x7C, 0x2C, 0x6F, 0x78,
0x77, 0x3C, 0x5C, 0x3E, 0x5A, 0x6F, 0x28, 0x64, 0x2F, 0x54,
0x42, 0x5E, 0x36, 0x52, 0x34, 0x25, 0x21, 0x3F, 0x2D, 0x73,
0x71, 0x7B, 0x7A, 0x51, 0x5B, 0x21, 0x48, 0x24, 0x4D, 0x5A,
0x43, 0x78, 0x38, 0x4F, 0x2D, 0x37, 0x77, 0x28, 0x40, 0x65,
0x2F, 0x44, 0x76, 0x32, 0x7B, 0x5A, 0x57, 0x46, 0x28, 0x67,
0x51, 0x3A, 0x4E, 0x6E, 0x27, 0x71, 0x64, 0x30, 0x6B, 0x4C,
0x7A, 0x61, 0x6A, 0x72, 0x2F, 0x22, 0x6E, 0x45, 0x6B, 0x5A,
0x2D, 0x3E, 0x26, 0x25, 0x2E, 0x6D, 0x63, 0x57, 0x67, 0x5B,
0x2C, 0x4F, 0x53, 0x5A, 0x49, 0x55, 0x23, 0x28, 0x54, 0x43,
0x2F, 0x47, 0x42, 0x2B, 0x77, 0x48, 0x40, 0x73, 0x60, 0x25,
0x65, 0x4E, 0x4D, 0x30, 0x28, 0x67, 0x2F, 0x7E, 0x50, 0x68,
0x3F, 0x5B, 0x37, 0x4F, 0x78, 0x70, 0x54, 0x36, 0x5D, 0x77,
0x3E, 0x37, 0x73, 0x53, 0x5C, 0x4F, 0x61, 0x30, 0x66, 0x33,
0x63, 0x3A, 0x74, 0x3B, 0x51, 0x5D, 0x74, 0x4B, 0x52, 0x48,
0x7D, 0x69, 0x35, 0x28, 0x47, 0x4B, 0x2D, 0x3A, 0x4F, 0x62,
0x2C, 0x72, 0x54, 0x46, 0x33, 0x24, 0x60, 0x6F, 0x62, 0x7C,
0x7B, 0x42, 0x29, 0x62, 0x31, 0x63, 0x58, 0x34, 0x26, 0x2C,
0x53, 0x2F, 0x3C, 0x36, 0x6B, 0x48, 0x42, 0x2C, 0x5D, 0x57,
0x5C, 0x61, 0x66, 0x74, 0x2F, 0x51, 0x27, 0x42, 0x73, 0x41,
0x4B, 0x72, 0x76, 0x27, 0x76, 0x31, 0x5E, 0x57, 0x60, 0x29,
0x30, 0x58, 0x43, 0x43, 0x57, 0x6F, 0x36, 0x5D, 0x31, 0x52,
0x47, 0x2D, 0x77, 0x79, 0x21, 0x43, 0x3C, 0x75, 0x61, 0x7D,
0x70, 0x7D, 0x7B, 0x6B, 0x3C, 0x47, 0x30, 0x59, 0x3F, 0x43,
0x4E, 0x2D, 0x6F, 0x38, 0x43, 0x50, 0x4F, 0x51, 0x56, 0x75,
0x3F, 0x72, 0x55, 0x2C, 0x56, 0x5C, 0x3B, 0x7B, 0x37, 0x5E,
0x34, 0x64, 0x7D, 0x70, 0x3F, 0x26, 0x52, 0x40, 0x34, 0x69,
0x26, 0x2E, 0x4D, 0x65, 0x6B, 0x7B, 0x76, 0x70, 0x56, 0x38,
0x35, 0x5E, 0x71, 0x37, 0x6C, 0x6D, 0x48, 0x64, 0x6A, 0x25,
0x5F, 0x5E, 0x61, 0x34, 0x60, 0x50, 0x3E, 0x38, 0x27, 0x43,
0x79, 0x45, 0x4D, 0x4B, 0x51, 0x32, 0x23, 0x4F, 0x72, 0x54,
0x67, 0x5E, 0x5F, 0x38, 0x5F, 0x51, 0x5E, 0x32, 0x71, 0x67,
0x75, 0x31, 0x64, 0x6F, 0x32, 0x30, 0x5F, 0x54, 0x7B, 0x74,
0x40, 0x75, 0x65, 0x73, 0x30, 0x32, 0x33, 0x47, 0x3C, 0x64,
0x3B, 0x48, 0x35, 0x66, 0x6C, 0x79, 0x3D, 0x55, 0x77, 0x78,
0x78, 0x3B, 0x66, 0x6A, 0x25, 0x64, 0x32, 0x2D, 0x67, 0x6F,
0x7D, 0x79, 0x65, 0x4F, 0x2A, 0x3A, 0x2D, 0x75, 0x4B, 0x6B,
0x3C, 0x39, 0x3F, 0x33, 0x42, 0x28, 0x77, 0x5E, 0x7D, 0x78,
0x60, 0x62, 0x4F, 0x6A, 0x32, 0x35, 0x4F, 0x36, 0x61, 0x3C,
0x6E, 0x44, 0x23, 0x78, 0x46, 0x78, 0x3A, 0x62, 0x3A, 0x49,
0x26, 0x23, 0x28, 0x32, 0x42, 0x3C, 0x30, 0x31, 0x49, 0x64,
0x3E, 0x41, 0x27, 0x58, 0x39, 0x58, 0x4A, 0x70, 0x5D, 0x72,
0x64, 0x7C, 0x34, 0x3F, 0x7D, 0x75, 0x2A, 0x34, 0x22, 0x3E,
0x3D, 0x7C, 0x74, 0x41, 0x59, 0x69, 0x4F, 0x5B, 0x6A, 0x40,
0x59, 0x29, 0x6B, 0x6C, 0x2E, 0x47, 0x44, 0x73, 0x6C, 0x26,
0x3A, 0x37, 0x5F, 0x43, 0x57, 0x4C, 0x3F, 0x27, 0x36, 0x47,
0x74, 0x72, 0x77, 0x63, 0x62, 0x49, 0x5A, 0x31, 0x6E, 0x53,
0x2A, 0x23, 0x42, 0x62, 0x3F, 0x27, 0x64, 0x28, 0x45, 0x23,
0x47, 0x23, 0x7E, 0x7D, 0x57, 0x38, 0x66, 0x46, 0x5B, 0x46,
0x71, 0x50, 0x71, 0x67, 0x5C, 0x42, 0x4F, 0x37, 0x21, 0x50,
0x35, 0x28, 0x67, 0x71, 0x2B, 0x21, 0x3C, 0x42, 0x27, 0x70,
0x67, 0x24, 0x39, 0x2F, 0x25, 0x71, 0x51, 0x69, 0x2A, 0x6D,
0x5C, 0x6F, 0x3E, 0x79, 0x77, 0x23, 0x43, 0x60, 0x48, 0x35,
0x29, 0x5F, 0x6E, 0x5A, 0x22, 0x59, 0x5F, 0x60, 0x56, 0x61,
0x3A, 0x51, 0x61, 0x78, 0x3E, 0x3B, 0x3D, 0x6D, 0x6E, 0x70,
0x70, 0x36, 0x33, 0x35, 0x2D, 0x52, 0x46, 0x59, 0x46, 0x4A,
0x29, 0x40, 0x52, 0x54, 0x39, 0x79, 0x23, 0x2C, 0x44, 0x23,
0x65, 0x45, 0x3F, 0x77, 0x3B, 0x2E, 0x38, 0x61, 0x2E, 0x7C,
0x71, 0x4F, 0x27, 0x26, 0x4A, 0x76, 0x72, 0x41, 0x75, 0x7E,
0x28, 0x3A, 0x31, 0x55, 0x58, 0x36, 0x4E, 0x49, 0x7D, 0x31,
0x57, 0x3E, 0x40, 0x42, 0x55, 0x35, 0x30, 0x31, 0x5A, 0x60,
0x73, 0x71, 0x50, 0x36, 0x3D, 0x3D, 0x62, 0x76, 0x68, 0x78,
0x63, 0x35, 0x2C, 0x25, 0x26, 0x3C, 0x57, 0x65, 0x36, 0x41,
0x42, 0x3D, 0x53, 0x7C, 0x50, 0x2F, 0x60, 0x7A, 0x3F, 0x33,
0x41, 0x21, 0x54, 0x57, 0x52, 0x7B, 0x21, 0x72, 0x78, 0x6C,
0x5B, 0x70, 0x32, 0x67, 0x76, 0x3A, 0x3A, 0x50, 0x4E, 0x4D,
0x5C, 0x34, 0x5F, 0x48, 0x23, 0x40, 0x6C, 0x5B, 0x4A, 0x21,
0x73, 0x38, 0x51, 0x70, 0x64, 0x37, 0x54, 0x61, 0x29, 0x4F,
0x70, 0x5F, 0x3E, 0x27, 0x2C, 0x30, 0x7A, 0x49, 0x50, 0x64,
0x6D, 0x68, 0x46, 0x4F, 0x44, 0x4E, 0x60, 0x6E, 0x2B, 0x43,
0x2F, 0x53, 0x55, 0x50, 0x40, 0x3B, 0x65, 0x3B, 0x58, 0x2C,
0x58, 0x7B, 0x7D, 0x34, 0x6B, 0x37, 0x4A, 0x4A, 0x51, 0x24,
0x31, 0x46, 0x6F, 0x77, 0x5F, 0x45, 0x78, 0x6B, 0x22, 0x48,
0x50, 0x41, 0x3E, 0x32, 0x3A, 0x47, 0x2F, 0x6B, 0x43, 0x6C,
0x51, 0x58, 0x68, 0x7A, 0x38, 0x3A, 0x53, 0x2B, 0x2D, 0x4A,
0x67, 0x68, 0x6D, 0x47, 0x78, 0x5D, 0x56, 0x71, 0x6C, 0x73,
0x6F, 0x55, 0x3E, 0x78, 0x23, 0x5E, 0x23, 0x7E, 0x74, 0x2C,
0x4E, 0x73, 0x37, 0x70, 0x53, 0x3E, 0x3A, 0x3C, 0x63, 0x6F,
0x6D, 0x29, 0x53, 0x7E, 0x29, 0x53, 0x7B, 0x32, 0x5C, 0x70,
0x37, 0x60, 0x34, 0x52, 0x23, 0x7D, 0x78, 0x4B, 0x2D, 0x5C,
0x31, 0x45, 0x73, 0x57, 0x50, 0x3A, 0x29, 0x47, 0x65, 0x56,
0x78, 0x32, 0x6B, 0x2D, 0x4C, 0x2D, 0x75, 0x3E, 0x62, 0x79,
0x51, 0x7D, 0x5F, 0x69, 0x47, 0x21, 0x68, 0x48, 0x4E, 0x36,
0x6F, 0x62, 0x7E, 0x42, 0x44, 0x2F, 0x7C, 0x22, 0x46, 0x71,
0x4F, 0x3A, 0x28, 0x4A, 0x40, 0x7C, 0x55, 0x76, 0x5A, 0x55,
0x2D, 0x6F, 0x68, 0x77, 0x43, 0x55, 0x77, 0x41, 0x34, 0x66,
0x33, 0x2F, 0x66, 0x61, 0x4E, 0x2D, 0x7A, 0x4E, 0x3E, 0x6C,
0x64, 0x56, 0x22, 0x2C, 0x6E, 0x52, 0x5C, 0x25, 0x25, 0x3B,
0x44, 0x77, 0x44, 0x70, 0x44, 0x3B, 0x58, 0x24, 0x7E, 0x25,
0x57, 0x73, 0x2F, 0x68, 0x60, 0x70, 0x73, 0x25, 0x5D, 0x55,
0x2C, 0x41, 0x74, 0x4A, 0x26, 0x4B, 0x2E, 0x77, 0x65, 0x2E,
0x54, 0x3A, 0x4E, 0x4D, 0x57, 0x7D, 0x37, 0x69, 0x45, 0x30,
0x30, 0x2A, 0x43, 0x32, 0x7B, 0x5F, 0x66, 0x46, 0x6C, 0x7A,
0x30, 0x67, 0x72, 0x71, 0x4B, 0x25, 0x32, 0x3A, 0x46, 0x30,
0x38, 0x41, 0x52, 0x5A, 0x2E, 0x79, 0x2E, 0x78, 0x58, 0x37,
0x53, 0x78, 0x28, 0x29, 0x7E, 0x5A, 0x52, 0x40, 0x27, 0x50,
0x53, 0x4B, 0x6C, 0x6B, 0x52, 0x5C, 0x36, 0x26, 0x56, 0x7D,
0x7C, 0x2E, 0x43, 0x5F, 0x73, 0x67, 0x37, 0x37, 0x67, 0x53,
0x54, 0x74, 0x33, 0x32, 0x79, 0x59, 0x47, 0x7A, 0x32, 0x4D,
0x55, 0x6E, 0x2C, 0x77, 0x74, 0x3A, 0x32, 0x4A, 0x5F, 0x30,
0x4C, 0x3F, 0x48, 0x3F, 0x3B, 0x21, 0x74, 0x35, 0x6E, 0x4B,
0x3C, 0x55, 0x74, 0x60, 0x4B, 0x5F, 0x5B, 0x66, 0x48, 0x73,
0x4F, 0x55, 0x2C, 0x5B, 0x67, 0x2E, 0x77, 0x2D, 0x5E, 0x66,
0x73, 0x36, 0x34, 0x2B, 0x32, 0x3E, 0x62, 0x65, 0x27, 0x27,
0x7E, 0x48, 0x54, 0x2C, 0x42, 0x32, 0x42, 0x3A, 0x40, 0x5F,
0x48, 0x40, 0x2F, 0x28, 0x41, 0x6E, 0x6F, 0x4B, 0x48, 0x37,
0x31, 0x5D, 0x79, 0x77, 0x37, 0x26, 0x3A, 0x74, 0x24, 0x56,
0x24, 0x5A, 0x63, 0x59, 0x3E, 0x64, 0x47, 0x42, 0x7C, 0x67,
0x3C, 0x37, 0x4C, 0x58, 0x43, 0x40, 0x3E, 0x39, 0x47, 0x5F,
0x3F, 0x52, 0x2B, 0x59, 0x77, 0x3D, 0x50, 0x70, 0x40, 0x57,
0x28, 0x5C, 0x3E, 0x6F, 0x2C, 0x26, 0x5A, 0x65, 0x71, 0x65,
0x76, 0x5E, 0x32, 0x33, 0x7E, 0x67, 0x45, 0x6B, 0x32, 0x42,
0x3A, 0x32, 0x71, 0x4A, 0x3E, 0x32, 0x7B, 0x76, 0x7A, 0x43,
0x5F, 0x3F, 0x49, 0x42, 0x28, 0x62, 0x29, 0x43, 0x30, 0x61,
0x2B, 0x4A, 0x28, 0x3E, 0x68, 0x47, 0x4C, 0x2E, 0x7D, 0x70,
0x23, 0x57, 0x4D, 0x31, 0x6F, 0x55, 0x51, 0x5C, 0x44, 0x61,
0x38, 0x76, 0x2F, 0x3A, 0x62, 0x36, 0x60, 0x6E, 0x21, 0x77,
0x52, 0x2C, 0x66, 0x71, 0x71, 0x33, 0x26, 0x6F, 0x25, 0x59,
0x58, 0x57, 0x75, 0x7A, 0x2A, 0x65, 0x3F, 0x78, 0x3C, 0x41,
0x67, 0x74, 0x33, 0x71, 0x66, 0x27, 0x2C, 0x5E, 0x49, 0x24,
0x6C, 0x38, 0x51, 0x25, 0x52, 0x40, 0x2A, 0x75, 0x7E, 0x5A,
0x3D, 0x6C, 0x58, 0x4F, 0x6A, 0x6C, 0x78, 0x22, 0x47, 0x60,
0x7C, 0x6B, 0x60, 0x50, 0x57, 0x41, 0x78, 0x67, 0x2C, 0x32,
0x6E, 0x74, 0x3C, 0x5C, 0x4B, 0x41, 0x2D, 0x4A, 0x6D, 0x69,
0x6E, 0x31, 0x48, 0x48, 0x54, 0x46, 0x34, 0x67, 0x22, 0x44,
0x69, 0x7D, 0x4E, 0x32, 0x67, 0x75, 0x74, 0x61, 0x6E, 0x53,
0x51, 0x5B, 0x6F, 0x7B, 0x25, 0x4B, 0x3A, 0x65, 0x6E, 0x41,
0x4B, 0x47, 0x25, 0x23, 0x70, 0x3C, 0x31, 0x51, 0x62, 0x2A,
0x56, 0x7E, 0x73, 0x76, 0x66, 0x63, 0x48, 0x3C, 0x6C, 0x29,
0x2B, 0x74, 0x35, 0x77, 0x36, 0x59, 0x5B, 0x5D, 0x79, 0x61,
0x52, 0x40, 0x77, 0x52, 0x6A, 0x36, 0x69, 0x33, 0x56, 0x3C,
0x49, 0x44, 0x54, 0x36, 0x78, 0x34, 0x59, 0x62, 0x60, 0x73,
0x22, 0x2D, 0x50, 0x7E, 0x36, 0x25, 0x68, 0x4F, 0x50, 0x73,
0x74, 0x2E, 0x57, 0x48, 0x5C, 0x35, 0x4E, 0x5D, 0x40, 0x37,
0x5C, 0x5A, 0x48, 0x35, 0x57, 0x36, 0x43, 0x62, 0x6F, 0x7B,
0x21, 0x6E, 0x5A, 0x7E, 0x6B, 0x56, 0x5E, 0x34, 0x34, 0x6A,
0x2A, 0x65, 0x4D, 0x5C, 0x69, 0x45, 0x2B, 0x7A, 0x36, 0x36,
0x69, 0x7E, 0x25, 0x36, 0x6E, 0x2C, 0x21, 0x42, 0x29, 0x41,
0x22, 0x35, 0x77, 0x2C, 0x78, 0x4E, 0x4C, 0x37, 0x4F, 0x4E,
0x5B, 0x28, 0x50, 0x48, 0x6D, 0x60, 0x67, 0x41, 0x44, 0x50,
0x73, 0x58, 0x5D, 0x7B, 0x34, 0x48, 0x4F, 0x33, 0x50, 0x4B,
0x25, 0x4B, 0x73, 0x51, 0x4B, 0x5E, 0x68, 0x71, 0x2D, 0x72,
0x31, 0x51, 0x3E, 0x73, 0x4C, 0x63, 0x23, 0x64, 0x5B, 0x2A,
0x6E, 0x2C, 0x79, 0x3F, 0x4D, 0x74, 0x7C, 0x4A, 0x34, 0x36,
0x65, 0x6F, 0x31, 0x62, 0x32, 0x21, 0x60, 0x77, 0x54, 0x49,
0x6D, 0x77, 0x78, 0x61, 0x25, 0x7E, 0x5E, 0x31, 0x49, 0x61,
0x2F, 0x22, 0x4D, 0x23, 0x48, 0x32, 0x77, 0x3A, 0x40, 0x42,
0x6A, 0x5E, 0x6E, 0x42, 0x35, 0x6E, 0x38, 0x41, 0x32, 0x30,
0x41, 0x51, 0x25, 0x5E, 0x67, 0x6A, 0x26, 0x4A, 0x61, 0x69,
0x41, 0x45, 0x72, 0x42, 0x22, 0x7C, 0x33, 0x39, 0x39, 0x5F,
0x48, 0x75, 0x60, 0x23, 0x21, 0x3C, 0x6C, 0x51, 0x76, 0x76,
0x53, 0x3F, 0x44, 0x31, 0x77, 0x3D, 0x3A, 0x63, 0x77, 0x68,
0x41, 0x31, 0x6F, 0x34, 0x3B, 0x76, 0x38, 0x6E, 0x67, 0x24,
0x39, 0x7D, 0x50, 0x41, 0x75, 0x7E, 0x41, 0x48, 0x36, 0x60,
0x6A, 0x6C, 0x66, 0x6F, 0x52, 0x47, 0x70, 0x77, 0x38, 0x57,
0x40, 0x33, 0x53, 0x3B, 0x47, 0x4B, 0x70, 0x5A, 0x6E, 0x26,
0x7A, 0x6E, 0x47, 0x62, 0x26, 0x41, 0x5F, 0x57, 0x6A, 0x29,
0x3C, 0x35, 0x6A, 0x64, 0x42, 0x34, 0x43, 0x2E, 0x64, 0x34,
0x53, 0x4C, 0x6C, 0x24, 0x3C, 0x35, 0x4B, 0x4E, 0x55, 0x3F,
0x47, 0x35, 0x2F, 0x3A, 0x4A, 0x4E, 0x50, 0x2C, 0x23, 0x4D,
0x54, 0x26, 0x41, 0x54, 0x44, 0x74, 0x23, 0x6D, 0x51, 0x4C,
0x4F, 0x62, 0x2D, 0x7B, 0x40, 0x50, 0x42, 0x68, 0x2A, 0x2C,
0x4A, 0x62, 0x67, 0x3E, 0x60, 0x2E, 0x30, 0x2B, 0x6E, 0x28,
0x5D, 0x30, 0x29, 0x4A, 0x41, 0x6F, 0x4D, 0x45, 0x5D, 0x56,
0x4D, 0x66, 0x64, 0x3F, 0x50, 0x71, 0x3F, 0x75, 0x33, 0x48,
0x64, 0x40, 0x2F, 0x33, 0x60, 0x55, 0x37, 0x2B, 0x6A, 0x2D,
0x63, 0x5A, 0x2D, 0x47, 0x78, 0x5B, 0x62, 0x5C, 0x28, 0x39,
0x4B, 0x79, 0x66, 0x3F, 0x38, 0x75, 0x2F, 0x2E, 0x40, 0x2B,
0x24, 0x3B, 0x72, 0x61, 0x5F, 0x7E, 0x4B, 0x6E, 0x3D, 0x62,
0x38, 0x48, 0x38, 0x75, 0x5C, 0x40, 0x26, 0x61, 0x2B, 0x39,
0x70, 0x2C, 0x51, 0x65, 0x6D, 0x58, 0x75, 0x42, 0x52, 0x4F,
0x3A, 0x34, 0x4D, 0x4F, 0x58, 0x78, 0x37, 0x2A, 0x3D, 0x52,
0x4D, 0x4A, 0x4B, 0x39, 0x29, 0x26, 0x5E, 0x3E, 0x2E, 0x61,
0x31, 0x54, 0x70, 0x58, 0x7D, 0x4B, 0x66, 0x5F, 0x29, 0x35,
0x74, 0x52, 0x57, 0x62, 0x4C, 0x5B, 0x7B, 0x62, 0x27, 0x28,
0x27, 0x2F, 0x47, 0x3E, 0x71, 0x4A, 0x53, 0x5C, 0x7E, 0x50,
0x31, 0x34, 0x65, 0x53, 0x53, 0x36, 0x36, 0x7D, 0x74, 0x44,
0x26, 0x52, 0x5C, 0x78, 0x2E, 0x71, 0x4F, 0x64, 0x33, 0x5F,
0x56, 0x4F, 0x59, 0x74, 0x59, 0x4B, 0x5D, 0x30, 0x65, 0x3F,
0x2B, 0x6F, 0x6D, 0x5D, 0x67, 0x79, 0x27, 0x66, 0x54, 0x54,
0x36, 0x43, 0x73, 0x33, 0x5A, 0x7D, 0x77, 0x54, 0x27, 0x2D,
0x6C, 0x74, 0x7E, 0x58, 0x62, 0x46, 0x3E, 0x38, 0x2D, 0x50,
0x52, 0x55, 0x55, 0x63, 0x31, 0x74, 0x3F, 0x63, 0x36, 0x3A,
0x6E, 0x36, 0x7E, 0x33, 0x28, 0x7B, 0x66, 0x77, 0x28, 0x6A,
0x46, 0x7B, 0x39, 0x75, 0x70, 0x48, 0x2B, 0x72, 0x50, 0x30,
0x6B, 0x57, 0x63, 0x50, 0x49, 0x43, 0x2E, 0x25, 0x42, 0x52,
0x52, 0x63, 0x5D, 0x7A, 0x5E, 0x21, 0x47, 0x2C, 0x5D, 0x2C,
0x75, 0x53, 0x69, 0x6F, 0x44, 0x28, 0x30, 0x54, 0x29, 0x64,
0x2D, 0x38, 0x6C, 0x24, 0x24, 0x6A, 0x2B, 0x34, 0x46, 0x44,
0x49, 0x6D, 0x7A, 0x72, 0x67, 0x62, 0x4C, 0x6C, 0x75, 0x3D,
0x35, 0x35, 0x6B, 0x3B, 0x7C, 0x69, 0x61, 0x61, 0x58, 0x53,
0x50, 0x46, 0x33, 0x2C, 0x3F, 0x2A, 0x4B, 0x44, 0x72, 0x3E,
0x51, 0x51, 0x5B, 0x35, 0x2B, 0x55, 0x45, 0x53, 0x77, 0x28,
0x65, 0x3C, 0x36, 0x56, 0x60, 0x28, 0x6F, 0x21, 0x5D, 0x7A,
0x62, 0x33, 0x49, 0x40, 0x5D, 0x43, 0x2E, 0x67, 0x66, 0x3C,
0x4F, 0x7C, 0x40, 0x7C, 0x3A, 0x4E, 0x56, 0x72, 0x44, 0x5C,
0x21, 0x78, 0x29, 0x41, 0x52, 0x70, 0x46, 0x27, 0x7E, 0x2A,
0x61, 0x36, 0x2D, 0x33, 0x33, 0x47, 0x50, 0x2F, 0x47, 0x5A,
0x7A, 0x6F, 0x30, 0x3D, 0x27, 0x52, 0x27, 0x24, 0x4F, 0x4E,
0x54, 0x2C, 0x79, 0x3E, 0x23, 0x39, 0x59, 0x65, 0x3F, 0x55,
0x75, 0x24, 0x6B, 0x40, 0x71, 0x4F, 0x61, 0x79, 0x3F, 0x3E,
0x35, 0x2A, 0x21, 0x76, 0x2D, 0x22, 0x6B, 0x5B, 0x55, 0x36,
0x34, 0x5E, 0x2E, 0x24, 0x23, 0x5D, 0x7E, 0x24, 0x64, 0x3D,
0x3C, 0x69, 0x64, 0x65, 0x5A, 0x6C, 0x40, 0x6A, 0x41, 0x52,
0x50, 0x7C, 0x61, 0x3D, 0x61, 0x31, 0x7B, 0x2B, 0x74, 0x3A,
0x75, 0x35, 0x4C, 0x2E, 0x79, 0x57, 0x68, 0x56, 0x57, 0x3C,
0x65, 0x4A, 0x46, 0x75, 0x70, 0x7C, 0x67, 0x7A, 0x6F, 0x36,
0x4F, 0x7A, 0x69, 0x5A, 0x2E, 0x51, 0x72, 0x28, 0x50, 0x69,
0x46, 0x2A, 0x3D, 0x24, 0x46, 0x3C, 0x35, 0x5C, 0x59, 0x3A,
0x76, 0x24, 0x64, 0x25, 0x79, 0x32, 0x49, 0x4E, 0x6C, 0x73,
0x29, 0x68, 0x2B, 0x5F, 0x55, 0x31, 0x72, 0x4B, 0x2B, 0x76,
0x38, 0x5D, 0x68, 0x72, 0x6E, 0x2C, 0x45, 0x79, 0x7C, 0x67,
0x58, 0x45, 0x2E, 0x47, 0x61, 0x69, 0x2C, 0x7E, 0x69, 0x47,
0x34, 0x2B, 0x36, 0x24, 0x39, 0x21, 0x43, 0x42, 0x48, 0x62,
0x7E, 0x59, 0x5F, 0x69, 0x66, 0x7E, 0x43, 0x5C, 0x5A, 0x35,
0x64, 0x5C, 0x55, 0x7D, 0x3B, 0x3F, 0x4A, 0x21, 0x4B, 0x2E,
0x31, 0x68, 0x4D, 0x65, 0x6E, 0x2D, 0x7B, 0x57, 0x73, 0x43,
0x76, 0x69, 0x3E, 0x6E, 0x69, 0x66, 0x31, 0x4D, 0x52, 0x58,
0x2B, 0x60, 0x48, 0x43, 0x5B, 0x79, 0x7E, 0x29, 0x3B, 0x70,
0x4E, 0x43, 0x55, 0x42, 0x53, 0x68, 0x2A, 0x53, 0x4C, 0x68,
0x46, 0x72, 0x24, 0x23, 0x44, 0x45, 0x6D, 0x41, 0x35, 0x7D,
0x37, 0x39, 0x7B, 0x7E, 0x5B, 0x74, 0x5D, 0x36, 0x74, 0x52,
0x6C, 0x68, 0x5D, 0x27, 0x3A, 0x41, 0x75, 0x3A, 0x60, 0x25,
0x57, 0x53, 0x3F, 0x76, 0x6A, 0x3B, 0x37, 0x21, 0x63, 0x7B,
0x2D, 0x56, 0x69, 0x6E, 0x2B, 0x35, 0x4E, 0x60, 0x7E, 0x51,
0x43, 0x38, 0x4C, 0x38, 0x68, 0x5A, 0x46, 0x48, 0x63, 0x72,
0x65, 0x24, 0x6E, 0x62, 0x5A, 0x25, 0x72, 0x26, 0x27, 0x39,
0x6A, 0x34, 0x6C, 0x24, 0x35, 0x2D, 0x53, 0x27, 0x74, 0x51,
0x2D, 0x73, 0x4F, 0x53, 0x47, 0x74, 0x3F, 0x6F, 0x3F, 0x32,
0x56, 0x24, 0x78, 0x35, 0x4A, 0x53, 0x31, 0x22, 0x4D, 0x76,
0x33, 0x30, 0x54, 0x60, 0x2B, 0x53, 0x66, 0x5A, 0x32, 0x74,
0x3D, 0x41, 0x5A, 0x28, 0x58, 0x57, 0x23, 0x70, 0x6F, 0x2F,
0x78, 0x57, 0x5E, 0x34, 0x6A, 0x24, 0x6E, 0x32, 0x78, 0x4B,
0x25, 0x5D, 0x74, 0x35, 0x4A, 0x61, 0x4D, 0x77, 0x6D, 0x3F,
0x22, 0x52, 0x53, 0x6D, 0x57, 0x63, 0x32, 0x2B, 0x71, 0x41,
0x49, 0x3E, 0x3A, 0x50, 0x5C, 0x3A, 0x51, 0x79, 0x59, 0x70,
0x6B, 0x50, 0x58, 0x62, 0x37, 0x43, 0x4C, 0x6C, 0x3D, 0x39,
0x2A, 0x71, 0x79, 0x69, 0x5D, 0x26, 0x43, 0x44, 0x24, 0x7D,
0x6C, 0x63, 0x51, 0x49, 0x6D, 0x6D, 0x7D, 0x4F, 0x25, 0x4E,
0x43, 0x4A, 0x60, 0x28, 0x34, 0x34, 0x56, 0x74, 0x7C, 0x77,
0x2D, 0x66, 0x21, 0x21, 0x69, 0x34, 0x5D, 0x2B, 0x4C, 0x54,
0x77, 0x34, 0x55, 0x48, 0x38, 0x61, 0x65, 0x36, 0x25, 0x7D,
0x24, 0x71, 0x38, 0x39, 0x28, 0x28, 0x4F, 0x60, 0x6D, 0x4C,
0x70, 0x5F, 0x64, 0x21, 0x5C, 0x39, 0x52, 0x53, 0x26, 0x7B,
0x66, 0x76, 0x54, 0x23, 0x6F, 0x3D, 0x30, 0x45, 0x23, 0x25,
0x60, 0x6F, 0x61, 0x59, 0x55, 0x4B, 0x39, 0x37, 0x4E, 0x5B,
0x53, 0x75, 0x4A, 0x23, 0x78, 0x73, 0x49, 0x48, 0x78, 0x4A,
0x3B, 0x32, 0x58, 0x76, 0x77, 0x34, 0x47, 0x4C, 0x45, 0x47,
0x69, 0x51, 0x7E, 0x40, 0x36, 0x2A, 0x66, 0x5D, 0x75, 0x34,
0x54, 0x3E, 0x35, 0x49, 0x29, 0x3C, 0x50, 0x3C, 0x44, 0x5B,
0x3D, 0x25, 0x29, 0x6A, 0x23, 0x57, 0x67, 0x41, 0x3D, 0x6C,
0x62, 0x4A, 0x26, 0x3F, 0x76, 0x6D, 0x3B, 0x36, 0x26, 0x3D,
0x2F, 0x31, 0x34, 0x54, 0x45, 0x39, 0x7E, 0x52, 0x27, 0x65,
0x54, 0x51, 0x7A, 0x53, 0x48, 0x5B, 0x45, 0x30, 0x59, 0x4D,
0x68, 0x2C, 0x5E, 0x3C, 0x24, 0x40, 0x5E, 0x57, 0x65, 0x7D,
0x43, 0x21, 0x57, 0x65, 0x35, 0x32, 0x58, 0x3E, 0x56, 0x75,
0x23, 0x7E, 0x6B, 0x3A, 0x5E, 0x6E, 0x46, 0x53, 0x4E, 0x71,
0x4D, 0x74, 0x22, 0x2F, 0x2F, 0x34, 0x45, 0x42, 0x58, 0x42,
0x62, 0x5B, 0x7B, 0x62, 0x27, 0x2D, 0x68, 0x5F, 0x46, 0x6F,
0x21, 0x3E, 0x79, 0x63, 0x62, 0x4F, 0x5A, 0x34, 0x5F, 0x3C,
0x31, 0x33, 0x75, 0x49, 0x77, 0x2B, 0x77, 0x29, 0x7B, 0x54,
0x27, 0x3A, 0x7B, 0x3E, 0x76, 0x78, 0x2E, 0x40, 0x3F, 0x3D,
0x26, 0x3C, 0x63, 0x7B, 0x2D, 0x74, 0x47, 0x60, 0x2E, 0x75,
0x51, 0x22, 0x2E, 0x6A, 0x6C, 0x67, 0x37, 0x6B, 0x2F, 0x61,
0x59, 0x74, 0x5D, 0x60, 0x50, 0x72, 0x42, 0x57, 0x58, 0x4E,
0x79, 0x4E, 0x24, 0x41, 0x60, 0x4A, 0x77, 0x7E, 0x3A, 0x3E,
0x49, 0x3B, 0x7B, 0x4B, 0x53, 0x5D, 0x2C, 0x7D, 0x69, 0x3A,
0x70, 0x25, 0x7B, 0x76, 0x42, 0x2E, 0x41, 0x6C, 0x5F, 0x4C,
0x2D, 0x62, 0x36, 0x7A, 0x63, 0x5E, 0x55, 0x3E, 0x3C, 0x3B,
0x76, 0x6A, 0x71, 0x59, 0x41, 0x61, 0x3F, 0x3C, 0x41, 0x5A,
0x56, 0x53, 0x4A, 0x38, 0x3C, 0x66, 0x50, 0x41, 0x6B, 0x3A,
0x54, 0x24, 0x7D, 0x4B, 0x58, 0x5E, 0x29, 0x68, 0x30, 0x54,
0x5F, 0x33, 0x4B, 0x76, 0x59, 0x2C, 0x5C, 0x62, 0x38, 0x3C,
0x7D, 0x3C, 0x44, 0x61, 0x4F, 0x4E, 0x3A, 0x4F, 0x38, 0x4D,
0x52, 0x5E, 0x42, 0x23, 0x5F, 0x34, 0x33, 0x2B, 0x21, 0x4D,
0x24, 0x4F, 0x67, 0x77, 0x7B, 0x52, 0x44, 0x44, 0x58, 0x37,
0x44, 0x6A, 0x4E, 0x3D, 0x73, 0x49, 0x51, 0x25, 0x3C, 0x65,
0x5A, 0x38, 0x26, 0x64, 0x7D, 0x55, 0x49, 0x60, 0x61, 0x46,
0x45, 0x23, 0x52, 0x28, 0x58, 0x57, 0x3C, 0x21, 0x27, 0x5B,
0x2A, 0x6B, 0x2A, 0x2B, 0x48, 0x38, 0x78, 0x47, 0x31, 0x3C,
0x7C, 0x2F, 0x36, 0x34, 0x3A, 0x2F, 0x24, 0x78, 0x7C, 0x65,
0x63, 0x7A, 0x26, 0x23, 0x40, 0x7C, 0x25, 0x28, 0x61, 0x7B,
0x33, 0x50, 0x4F, 0x30, 0x22, 0x23, 0x5B, 0x39, 0x5F, 0x4A,
0x60, 0x65, 0x34, 0x4C, 0x62, 0x23, 0x2F, 0x42, 0x39, 0x74,
0x71, 0x4B, 0x74, 0x51, 0x5C, 0x27, 0x5B, 0x66, 0x3E, 0x40,
0x25, 0x35, 0x21, 0x4D, 0x5E, 0x4C, 0x33, 0x72, 0x72, 0x6D,
0x64, 0x5E, 0x27, 0x4D, 0x5A, 0x3C, 0x56, 0x62, 0x55, 0x27,
0x3D, 0x62, 0x7C, 0x2A, 0x4E, 0x52, 0x60, 0x6C, 0x3F, 0x5A,
0x46, 0x2A, 0x73, 0x49, 0x5E, 0x69, 0x5C, 0x3C, 0x42, 0x55,
0x44, 0x7B, 0x33, 0x21, 0x75, 0x26, 0x68, 0x53, 0x22, 0x56,
0x7D, 0x49, 0x51, 0x48, 0x42, 0x78, 0x5A, 0x74, 0x58, 0x55,
0x63, 0x6A, 0x75, 0x4D, 0x37, 0x59, 0x7B, 0x2D, 0x26, 0x39,
0x34, 0x62, 0x73, 0x44, 0x5A, 0x2F, 0x5B, 0x31, 0x21, 0x43,
0x27, 0x22, 0x69, 0x34, 0x3D, 0x57, 0x36, 0x40, 0x3E, 0x36,
0x45, 0x4D, 0x7A, 0x52, 0x64, 0x6F, 0x65, 0x2E, 0x7A, 0x55,
0x7D, 0x26, 0x7E, 0x4F, 0x35, 0x2C, 0x25, 0x50, 0x62, 0x39,
0x6E, 0x6C, 0x7B, 0x7E, 0x5E, 0x7A, 0x52, 0x7E, 0x42, 0x77,
0x45, 0x48, 0x22, 0x61, 0x7A, 0x6B, 0x72, 0x21, 0x35, 0x3A,
0x35, 0x5D, 0x24, 0x69, 0x23, 0x56, 0x59, 0x5C, 0x5F, 0x4A,
0x72, 0x32, 0x4C, 0x53, 0x34, 0x49, 0x5E, 0x39, 0x71, 0x38,
0x29, 0x5C, 0x23, 0x6D, 0x44, 0x3A, 0x55, 0x4C, 0x2A, 0x5F,
0x54, 0x6A, 0x53, 0x30, 0x7B, 0x49, 0x34, 0x2F, 0x74, 0x61,
0x3E, 0x29, 0x67, 0x35, 0x76, 0x2C, 0x6F, 0x32, 0x4C, 0x2A,
0x63, 0x2E, 0x31, 0x6F, 0x56, 0x31, 0x21, 0x29, 0x61, 0x3A,
0x70, 0x4B, 0x26, 0x44, 0x79, 0x22, 0x6B, 0x22, 0x36, 0x72,
0x2F, 0x36, 0x59, 0x56, 0x67, 0x57, 0x3C, 0x39, 0x59, 0x40,
0x64, 0x7E, 0x5D, 0x2C, 0x72, 0x66, 0x52, 0x64, 0x2B, 0x3B,
0x4B, 0x6E, 0x2A, 0x4F, 0x63, 0x4C, 0x55, 0x33, 0x70, 0x4C,
0x55, 0x43, 0x28, 0x69, 0x69, 0x38, 0x69, 0x69, 0x5C, 0x7D,
0x30, 0x34, 0x6D, 0x39, 0x78, 0x6B, 0x6C, 0x48, 0x39, 0x44,
0x56, 0x51, 0x6C, 0x5D, 0x62, 0x6F, 0x67, 0x70, 0x4B, 0x2C,
0x37, 0x6E, 0x71, 0x77, 0x75, 0x74, 0x38, 0x42, 0x26, 0x33,
0x2B, 0x7A, 0x22, 0x2C, 0x35, 0x42, 0x67, 0x3A, 0x32, 0x2F,
0x35, 0x35, 0x49, 0x2D, 0x2A, 0x44, 0x67, 0x22, 0x5C, 0x52,
0x4C, 0x56, 0x78, 0x21, 0x35, 0x57, 0x7C, 0x47, 0x6F, 0x54,
0x6F, 0x6F, 0x60, 0x7E, 0x2B, 0x46, 0x33, 0x40, 0x21, 0x51,
0x45, 0x36, 0x63, 0x48, 0x7C, 0x78, 0x33, 0x4C, 0x3D, 0x42,
0x68, 0x26, 0x77, 0x7E, 0x6B, 0x3F, 0x72, 0x28, 0x3E, 0x7D,
0x6C, 0x59, 0x56, 0x45, 0x40, 0x38, 0x26, 0x74, 0x7A, 0x3C,
0x33, 0x60, 0x41, 0x74, 0x35, 0x28, 0x6E, 0x6E, 0x7B, 0x61,
0x31, 0x3F, 0x62, 0x6E, 0x76, 0x6B, 0x54, 0x39, 0x48, 0x29,
0x61, 0x25, 0x22, 0x3A, 0x22, 0x35, 0x66, 0x74, 0x49, 0x4C,
0x58, 0x39, 0x28, 0x24, 0x3E, 0x2F, 0x71, 0x59, 0x53, 0x69,
0x78, 0x70, 0x5A, 0x3D, 0x6A, 0x48, 0x2A, 0x67, 0x41, 0x39,
0x3B, 0x7C, 0x3D, 0x51, 0x70, 0x2C, 0x66, 0x2A, 0x56, 0x5F,
0x73, 0x5C, 0x6A, 0x60, 0x2C, 0x41, 0x28, 0x27, 0x34, 0x73,
0x7B, 0x5D, 0x74, 0x2C, 0x47, 0x65, 0x62, 0x3B, 0x66, 0x6C,
0x31, 0x4E, 0x26, 0x72, 0x2D, 0x47, 0x58, 0x42, 0x7E, 0x4E,
0x5E, 0x30, 0x43, 0x30, 0x3D, 0x4E, 0x60, 0x53, 0x32, 0x44,
0x5F, 0x4A, 0x71, 0x70, 0x74, 0x5B, 0x75, 0x48, 0x53, 0x2F,
0x73, 0x48, 0x69, 0x28, 0x24, 0x41, 0x57, 0x58, 0x73, 0x77,
0x32, 0x53, 0x66, 0x73, 0x5D, 0x5C, 0x5B, 0x6C, 0x22, 0x5C,
0x78, 0x55, 0x31, 0x62, 0x6A, 0x7D, 0x21, 0x2C, 0x49, 0x2A,
0x7A, 0x6D, 0x64, 0x43, 0x6A, 0x63, 0x48, 0x5C, 0x45, 0x40,
0x7C, 0x31, 0x51, 0x6D, 0x77, 0x42, 0x7B, 0x28, 0x64, 0x64,
0x47, 0x41, 0x6E, 0x37, 0x2E, 0x38, 0x5D, 0x6F, 0x21, 0x65,
0x59, 0x6C, 0x68, 0x28, 0x6F, 0x65, 0x54, 0x76, 0x37, 0x33,
0x59, 0x7B, 0x55, 0x52, 0x4E, 0x41, 0x43, 0x40, 0x45, 0x74,
0x60, 0x38, 0x2D, 0x4D, 0x68, 0x62, 0x52, 0x5B, 0x64, 0x53,
0x78, 0x24, 0x3F, 0x4A, 0x75, 0x2B, 0x5E, 0x69, 0x5D, 0x54,
0x27, 0x57, 0x72, 0x66, 0x7A, 0x27, 0x35, 0x7C, 0x75, 0x73,
0x28, 0x61, 0x75, 0x49, 0x40, 0x44, 0x26, 0x6B, 0x7E, 0x40,
0x73, 0x3A, 0x21, 0x2E, 0x6C, 0x3D, 0x29, 0x48, 0x69, 0x30,
0x3E, 0x2C, 0x46, 0x4D, 0x25, 0x45, 0x64, 0x6C, 0x45, 0x4B,
0x2E, 0x3F, 0x78, 0x44, 0x48, 0x23, 0x7A, 0x5F, 0x4B, 0x36,
0x4F, 0x5E, 0x2F, 0x4A, 0x27, 0x6D, 0x46, 0x64, 0x6C, 0x78,
0x6D, 0x53, 0x59, 0x7C, 0x36, 0x46, 0x77, 0x53, 0x3A, 0x6C,
0x57, 0x79, 0x55, 0x7D, 0x4D, 0x72, 0x33, 0x73, 0x5E, 0x69,
0x7D, 0x6F, 0x53, 0x7E, 0x78, 0x78, 0x70, 0x7C, 0x4E, 0x4D,
0x27, 0x49, 0x26, 0x38, 0x7C, 0x55, 0x28, 0x67, 0x28, 0x24,
0x55, 0x4E, 0x36, 0x4F, 0x7D, 0x75, 0x65, 0x6B, 0x57, 0x64,
0x46, 0x22, 0x3A, 0x2E, 0x61, 0x4C, 0x3C, 0x6D, 0x2E, 0x74,
0x27, 0x5C, 0x7D, 0x57, 0x63, 0x5C, 0x3A, 0x57, 0x55, 0x57,
0x71, 0x67, 0x32, 0x38, 0x27, 0x5A, 0x63, 0x2F, 0x70, 0x33,
0x41, 0x32, 0x77, 0x60, 0x73, 0x55, 0x22, 0x29, 0x2A, 0x41,
0x6D, 0x57, 0x72, 0x5D, 0x4C, 0x4F, 0x46, 0x6B, 0x79, 0x4A,
0x28, 0x5A, 0x3B, 0x5A, 0x26, 0x73, 0x6A, 0x3D, 0x7C, 0x74,
0x27, 0x72, 0x30, 0x72, 0x2D, 0x41, 0x75, 0x58, 0x50, 0x2A,
0x4B, 0x29, 0x53, 0x5D, 0x26, 0x7C, 0x25, 0x6A, 0x3F, 0x30,
0x35, 0x51, 0x58, 0x31, 0x3A, 0x6F, 0x4E, 0x25, 0x42, 0x7E,
0x2D, 0x6A, 0x3E, 0x75, 0x30, 0x5F, 0x4B, 0x26, 0x58, 0x73,
0x29, 0x7B, 0x32, 0x73, 0x5E, 0x3D, 0x4B, 0x66, 0x27, 0x6C,
0x7B, 0x5D, 0x35, 0x31, 0x53, 0x65, 0x44, 0x49, 0x22, 0x57,
0x60, 0x35, 0x3A, 0x4E, 0x26, 0x70, 0x51, 0x3D, 0x72, 0x5B,
0x2F, 0x31, 0x41, 0x5F, 0x4C, 0x23, 0x6E, 0x7C, 0x46, 0x7D,
0x5E, 0x29, 0x48, 0x58, 0x3C, 0x47, 0x38, 0x58, 0x51, 0x6E,
0x27, 0x79, 0x2F, 0x65, 0x33, 0x4D, 0x6A, 0x3E, 0x3C, 0x3A,
0x5F, 0x55, 0x3A, 0x60, 0x7D, 0x27, 0x6E, 0x46, 0x22, 0x4D,
0x7D, 0x5D, 0x36, 0x47, 0x6B, 0x22, 0x7E, 0x53, 0x23, 0x55,
0x52, 0x28, 0x79, 0x7D, 0x65, 0x3C, 0x7B, 0x28, 0x7D, 0x5A,
0x73, 0x40, 0x51, 0x34, 0x39, 0x3F, 0x49, 0x27, 0x40, 0x45,
0x42, 0x35, 0x75, 0x79, 0x3B, 0x4E, 0x62, 0x35, 0x67, 0x23,
0x35, 0x5C, 0x30, 0x7C, 0x43, 0x3C, 0x6F, 0x24, 0x36, 0x7D,
0x60, 0x54, 0x2D, 0x57, 0x49, 0x59, 0x71, 0x35, 0x21, 0x56,
0x69, 0x7E, 0x2B, 0x4C, 0x39, 0x71, 0x37, 0x67, 0x6A, 0x6F,
0x5E, 0x64, 0x58, 0x45, 0x6F, 0x3F, 0x69, 0x49, 0x50, 0x39,
0x57, 0x50, 0x6A, 0x5F, 0x44, 0x74, 0x41, 0x44, 0x69, 0x54,
0x34, 0x4D, 0x2B, 0x39, 0x32, 0x63, 0x59, 0x23, 0x55, 0x6C,
0x79, 0x77, 0x2B, 0x57, 0x40, 0x37, 0x27, 0x6F, 0x5D, 0x79,
0x52, 0x50, 0x73, 0x74, 0x74, 0x7C, 0x26, 0x6D, 0x57, 0x69,
0x6F, 0x4A, 0x2F, 0x21, 0x77, 0x41, 0x3E, 0x40, 0x26, 0x49,
0x32, 0x2E, 0x68, 0x7D, 0x65, 0x45, 0x34, 0x21, 0x29, 0x58,
0x48, 0x6A, 0x7E, 0x7E, 0x7D, 0x68, 0x22, 0x78, 0x48, 0x73,
0x48, 0x2B, 0x5E, 0x2F, 0x3E, 0x3F, 0x50, 0x66, 0x33, 0x3F,
0x7A, 0x69, 0x2E, 0x7E, 0x36, 0x4E, 0x5F, 0x41, 0x23, 0x3B,
0x75, 0x32, 0x7C, 0x60, 0x31, 0x79, 0x3C, 0x2A, 0x54, 0x65,
0x70, 0x29, 0x51, 0x2E, 0x64, 0x4B, 0x51, 0x25, 0x60, 0x7E,
0x75, 0x2C, 0x7E, 0x21, 0x71, 0x70, 0x32, 0x59, 0x7D, 0x65,
0x4E, 0x50, 0x29, 0x79, 0x5F, 0x76, 0x71, 0x26, 0x7C, 0x5A,
0x5D, 0x48, 0x36, 0x75, 0x5F, 0x7A, 0x48, 0x7B, 0x79, 0x47,
0x56, 0x65, 0x2E, 0x34, 0x58, 0x4D, 0x3B, 0x5F, 0x52, 0x66,
0x69, 0x49, 0x38, 0x58, 0x4C, 0x69, 0x7B, 0x23, 0x4D, 0x41,
0x60, 0x4C, 0x52, 0x49, 0x66, 0x7A, 0x56, 0x70, 0x6D, 0x54,
0x26, 0x2C, 0x3D, 0x24, 0x6B, 0x73, 0x6B, 0x2F, 0x29, 0x7C,
0x36, 0x5D, 0x4F, 0x38, 0x4E, 0x77, 0x5C, 0x61, 0x47, 0x36,
0x3D, 0x24, 0x41, 0x37, 0x5B, 0x79, 0x73, 0x25, 0x3C, 0x7B,
0x37, 0x62, 0x44, 0x51, 0x2B, 0x5D, 0x6A, 0x69, 0x50, 0x63,
0x4B, 0x48, 0x70, 0x56, 0x27, 0x32, 0x66, 0x40, 0x46, 0x40,
0x79, 0x6B, 0x50, 0x31, 0x30, 0x3F, 0x4D, 0x5D, 0x22, 0x57,
0x68, 0x41, 0x50, 0x5E, 0x7C, 0x44, 0x7D, 0x62, 0x46, 0x74,
0x52, 0x47, 0x3C, 0x47, 0x40, 0x34, 0x6D, 0x68, 0x4E, 0x2B,
0x25, 0x3E, 0x54, 0x6B, 0x21, 0x21, 0x75, 0x2A, 0x5C, 0x45,
0x39, 0x52, 0x43, 0x54, 0x55, 0x7C, 0x6D, 0x70, 0x49, 0x4F,
0x63, 0x50, 0x38, 0x3E, 0x71, 0x6D, 0x49, 0x3D, 0x45, 0x6F,
0x47, 0x7B, 0x2B, 0x54, 0x29, 0x57, 0x25, 0x33, 0x4C, 0x68,
0x4E, 0x5F, 0x5F, 0x56, 0x58, 0x25, 0x31, 0x5E, 0x49, 0x72,
0x79, 0x72, 0x7C, 0x7E, 0x24, 0x52, 0x7C, 0x74, 0x49, 0x7B,
0x76, 0x25, 0x22, 0x27, 0x59, 0x53, 0x5B, 0x3B, 0x3F, 0x6B,
0x69, 0x60, 0x54, 0x28, 0x5C, 0x21, 0x71, 0x49, 0x6F, 0x3D,
0x5C, 0x73, 0x44, 0x69, 0x54, 0x71, 0x7A, 0x5B, 0x57, 0x55,
0x75, 0x62, 0x24, 0x3F, 0x2E, 0x54, 0x40, 0x74, 0x45, 0x4F,
0x5D, 0x77, 0x37, 0x58, 0x5C, 0x4F, 0x40, 0x3A, 0x6A, 0x6C,
0x34, 0x72, 0x66, 0x6C, 0x4D, 0x26, 0x47, 0x54, 0x73, 0x71,
0x26, 0x5C, 0x4F, 0x64, 0x6F, 0x3F, 0x35, 0x56, 0x7D, 0x5B,
0x4F, 0x58, 0x40, 0x50, 0x4A, 0x2B, 0x43, 0x4E, 0x4F, 0x6B,
0x6B, 0x48, 0x79, 0x3E, 0x68, 0x6B, 0x53, 0x68, 0x4F, 0x24,
0x5B, 0x6C, 0x7B, 0x3C, 0x4D, 0x71, 0x79, 0x60, 0x72, 0x32,
0x36, 0x6A, 0x2B, 0x74, 0x49, 0x6F, 0x54, 0x23, 0x3F, 0x6D,
0x25, 0x39, 0x2B, 0x33, 0x22, 0x7E, 0x3F, 0x74, 0x47, 0x2D,
0x55, 0x28, 0x5D, 0x33, 0x45, 0x7C, 0x3A, 0x51, 0x6C, 0x5A,
0x48, 0x2A, 0x75, 0x53, 0x7B, 0x5A, 0x74, 0x69, 0x27, 0x71,
0x2E, 0x79, 0x5F, 0x35, 0x36, 0x5A, 0x3D, 0x4E, 0x24, 0x7A,
0x3C, 0x64, 0x2E, 0x49, 0x44, 0x62, 0x37, 0x4E, 0x45, 0x35,
0x27, 0x2D, 0x5C, 0x66, 0x2D, 0x38, 0x5E, 0x4F, 0x2E, 0x65,
0x60, 0x2E, 0x6F, 0x4E, 0x4A, 0x5A, 0x2C, 0x70, 0x45, 0x5A,
0x45, 0x43, 0x6B, 0x3F, 0x59, 0x3B, 0x78, 0x5D, 0x6C, 0x66,
0x71, 0x22, 0x27, 0x6E, 0x27, 0x36, 0x2D, 0x5B, 0x23, 0x4D,
0x48, 0x32, 0x33, 0x32, 0x4A, 0x78, 0x38, 0x73, 0x37, 0x6B,
0x72, 0x4E, 0x3B, 0x76, 0x4E, 0x77, 0x40, 0x48, 0x40, 0x40,
0x50, 0x42, 0x26, 0x68, 0x5C, 0x70, 0x5D, 0x47, 0x3F, 0x34,
0x5B, 0x53, 0x34, 0x66, 0x73, 0x43, 0x6D, 0x5C, 0x79, 0x6E,
0x47, 0x2B, 0x6E, 0x7A, 0x24, 0x3D, 0x74, 0x42, 0x34, 0x4B,
0x3C, 0x3F, 0x7B, 0x4E, 0x35, 0x6C, 0x56, 0x7A, 0x32, 0x73,
0x4F, 0x5B, 0x51, 0x69, 0x43, 0x38, 0x6B, 0x28, 0x45, 0x74,
0x32, 0x3C, 0x29, 0x2E, 0x31, 0x5D, 0x74, 0x45, 0x44, 0x22,
0x31, 0x40, 0x44, 0x4E, 0x38, 0x71, 0x5D, 0x79, 0x58, 0x5C,
0x46, 0x42, 0x5D, 0x5A, 0x22, 0x4F, 0x2A, 0x48, 0x39, 0x7A,
0x4D, 0x62, 0x69, 0x50, 0x23, 0x75, 0x25, 0x40, 0x65, 0x7B,
0x2E, 0x3B, 0x74, 0x78, 0x71, 0x37, 0x4C, 0x7B, 0x79, 0x30,
0x3D, 0x44, 0x46, 0x4B, 0x40, 0x40, 0x38, 0x7D, 0x3C, 0x49,
0x2B, 0x47, 0x63, 0x64, 0x4C, 0x31, 0x67, 0x26, 0x5B, 0x35,
0x4D, 0x38, 0x62, 0x3D, 0x21, 0x56, 0x78, 0x2F, 0x4F, 0x63,
0x59, 0x5A, 0x35, 0x50, 0x3D, 0x5B, 0x52, 0x53, 0x68, 0x60,
0x28, 0x44, 0x68, 0x22, 0x56, 0x26, 0x73, 0x56, 0x79, 0x76,
0x3E, 0x5A, 0x66, 0x69, 0x46, 0x57, 0x45, 0x71, 0x77, 0x34,
0x3E, 0x5C, 0x6D, 0x61, 0x3E, 0x55, 0x6D, 0x4A, 0x75, 0x34,
0x6B, 0x42, 0x43, 0x43, 0x78, 0x62, 0x59, 0x23, 0x44, 0x5D,
0x74, 0x3C, 0x33, 0x57, 0x2A, 0x66, 0x76, 0x4A, 0x4D, 0x38,
0x39, 0x32, 0x3B, 0x7D, 0x6F, 0x22, 0x49, 0x56, 0x2E, 0x2D,
0x5B, 0x71, 0x75, 0x6A, 0x41, 0x3C, 0x38, 0x65, 0x2E, 0x23,
0x25, 0x26, 0x47, 0x61, 0x39, 0x65, 0x3F, 0x6F, 0x66, 0x6B,
0x7D, 0x48, 0x77, 0x42, 0x52, 0x66, 0x5E, 0x63, 0x28, 0x72,
0x7A, 0x32, 0x62, 0x6E, 0x39, 0x66, 0x5D, 0x65, 0x22, 0x27,
0x4C, 0x6C, 0x6C, 0x29, 0x26, 0x68, 0x71, 0x71, 0x55, 0x60,
0x55, 0x5A, 0x7C, 0x45, 0x4A, 0x5E, 0x33, 0x7B, 0x2C, 0x46,
0x39, 0x6F, 0x7D, 0x65, 0x44, 0x73, 0x38, 0x23, 0x55, 0x78,
0x36, 0x24, 0x29, 0x23, 0x2F, 0x53, 0x70, 0x3B, 0x21, 0x49,
0x21, 0x67, 0x33, 0x46, 0x38, 0x2B, 0x34, 0x31, 0x2F, 0x28,
0x6E, 0x27, 0x62, 0x6A, 0x7C, 0x78, 0x60, 0x4E, 0x7D, 0x6D,
0x49, 0x36, 0x59, 0x37, 0x2F, 0x32, 0x33, 0x60, 0x76, 0x2B,
0x52, 0x7D, 0x5A, 0x74, 0x5C, 0x6A, 0x54, 0x47, 0x51, 0x76,
0x70, 0x4B, 0x46, 0x77, 0x25, 0x32, 0x7E, 0x7B, 0x53, 0x75,
0x63, 0x45, 0x6E, 0x62, 0x4B, 0x6B, 0x61, 0x46, 0x7C, 0x36,
0x77, 0x5A, 0x7A, 0x27, 0x41, 0x3F, 0x6C, 0x5F, 0x6F, 0x44,
0x6B, 0x36, 0x3B, 0x6C, 0x3F, 0x49, 0x24, 0x6C, 0x53, 0x24,
0x30, 0x79, 0x27, 0x4B, 0x6B, 0x66, 0x69, 0x70, 0x67, 0x34,
0x2E, 0x31, 0x6B, 0x5B, 0x63, 0x27, 0x5B, 0x52, 0x78, 0x3F,
0x3C, 0x2F, 0x53, 0x74, 0x2B, 0x5C, 0x7D, 0x22, 0x38, 0x6C,
0x28, 0x2C, 0x22, 0x71, 0x2C, 0x64, 0x4A, 0x36, 0x30, 0x68,
0x59, 0x71, 0x38, 0x7C, 0x3C, 0x61, 0x64, 0x72, 0x78, 0x55,
0x5D, 0x2E, 0x3D, 0x79, 0x4E, 0x66, 0x3E, 0x78, 0x28, 0x2D,
0x3E, 0x5F, 0x65, 0x64, 0x31, 0x59, 0x24, 0x4F, 0x42, 0x6B,
0x47, 0x67, 0x24, 0x2A, 0x3C, 0x61, 0x25, 0x23, 0x5A, 0x42,
0x3C, 0x27, 0x4A, 0x3F, 0x41, 0x74, 0x2A, 0x27, 0x7B, 0x54,
0x6A, 0x5C, 0x24, 0x42, 0x25, 0x62, 0x54, 0x7E, 0x2C, 0x45,
0x45, 0x26, 0x77, 0x71, 0x3A, 0x3B, 0x55, 0x6F, 0x62, 0x26,
0x46, 0x5E, 0x6D, 0x39, 0x2E, 0x4E, 0x43, 0x3F, 0x67, 0x35,
0x4E, 0x64, 0x5D, 0x4F, 0x39, 0x56, 0x2A, 0x6F, 0x57, 0x3F,
0x6C, 0x43, 0x53, 0x7C, 0x5E, 0x65, 0x21, 0x51, 0x27, 0x62,
0x22, 0x58, 0x77, 0x7D, 0x67, 0x51, 0x2C, 0x70, 0x75, 0x35,
0x50, 0x5C, 0x56, 0x71, 0x5A, 0x72, 0x54, 0x6D, 0x3A, 0x2D,
0x56, 0x45, 0x66, 0x4A, 0x44, 0x40, 0x64, 0x3C, 0x7C, 0x79,
0x6F, 0x46, 0x35, 0x72, 0x6D, 0x7A, 0x58, 0x70, 0x23, 0x71,
0x7B, 0x36, 0x69, 0x58, 0x71, 0x6F, 0x40, 0x52, 0x32, 0x7E,
0x67, 0x38, 0x28, 0x5C, 0x68, 0x6B, 0x68, 0x45, 0x7E, 0x57,
0x73, 0x70, 0x3E, 0x66, 0x77, 0x6D, 0x4E, 0x75, 0x4A, 0x54,
0x40, 0x7D, 0x42, 0x48, 0x57, 0x69, 0x41, 0x6D, 0x25, 0x27,
0x24, 0x39, 0x4E, 0x27, 0x4F, 0x22, 0x2F, 0x7D, 0x5A, 0x2E,
0x62, 0x4D, 0x74, 0x42, 0x41, 0x5F, 0x72, 0x65, 0x29, 0x54,
0x6A, 0x33, 0x46, 0x3E, 0x4D, 0x26, 0x74, 0x55, 0x3C, 0x62,
0x79, 0x45, 0x66, 0x40, 0x2C, 0x72, 0x49, 0x3F, 0x57, 0x23,
0x53, 0x38, 0x35, 0x69, 0x28, 0x55, 0x32, 0x7C, 0x45, 0x69,
0x7D, 0x55, 0x37, 0x4E, 0x76, 0x34, 0x66, 0x6F, 0x67, 0x30,
0x6A, 0x5A, 0x5B, 0x3F, 0x38, 0x7E, 0x6A, 0x24, 0x63, 0x70,
0x70, 0x33, 0x3C, 0x29, 0x5A, 0x4A, 0x4C, 0x2A, 0x31, 0x4A,
0x5A, 0x65, 0x53, 0x41, 0x2D, 0x4A, 0x6E, 0x55, 0x3F, 0x75,
0x2D, 0x22, 0x30, 0x2A, 0x79, 0x56, 0x6B, 0x55, 0x55, 0x72,
0x5C, 0x55, 0x4F, 0x72, 0x2F, 0x54, 0x59, 0x2A, 0x51, 0x48,
0x22, 0x7B, 0x6E, 0x29, 0x68, 0x42, 0x41, 0x72, 0x61, 0x3F,
0x27, 0x72, 0x72, 0x64, 0x27, 0x45, 0x3D, 0x31, 0x78, 0x3C,
0x79, 0x6F, 0x7C, 0x5B, 0x33, 0x2D, 0x54, 0x46, 0x33, 0x66,
0x53, 0x55, 0x69, 0x50, 0x55, 0x39, 0x4D, 0x5E, 0x43, 0x44,
0x6E, 0x33, 0x72, 0x49, 0x3E, 0x79, 0x32, 0x31, 0x74, 0x27,
0x3D, 0x61, 0x40, 0x66, 0x79, 0x23, 0x3C, 0x42, 0x6A, 0x56,
0x62, 0x2D, 0x72, 0x37, 0x66, 0x7E, 0x61, 0x34, 0x5B, 0x37,
0x23, 0x55, 0x62, 0x34, 0x4F, 0x7D, 0x57, 0x2D, 0x69, 0x57,
0x2C, 0x57, 0x46, 0x21, 0x7D, 0x3F, 0x4B, 0x5A, 0x51, 0x7D,
0x46, 0x42, 0x21, 0x33, 0x21, 0x33, 0x2C, 0x70, 0x59, 0x3B,
0x42, 0x37, 0x36, 0x3F, 0x6D, 0x78, 0x54, 0x66, 0x50, 0x78,
0x7E, 0x60, 0x5D, 0x49, 0x6F, 0x61, 0x2F, 0x40, 0x37, 0x7C,
0x38, 0x27, 0x3A, 0x62, 0x59, 0x7C, 0x4B, 0x73, 0x66, 0x77,
0x2C, 0x58, 0x45, 0x45, 0x3F, 0x30, 0x2E, 0x33, 0x21, 0x4F,
0x44, 0x7A, 0x35, 0x2E, 0x4C, 0x5B, 0x73, 0x47, 0x7C, 0x36,
0x2A, 0x4A, 0x46, 0x64, 0x35, 0x31, 0x67, 0x24, 0x61, 0x24,
0x57, 0x78, 0x63, 0x42, 0x69, 0x2F, 0x48, 0x62, 0x48, 0x37,
0x58, 0x4F, 0x55, 0x4F, 0x4A, 0x67, 0x24, 0x3F, 0x67, 0x32,
0x36, 0x56, 0x79, 0x3A, 0x69, 0x5C, 0x3F, 0x4C, 0x53, 0x6D,
0x40, 0x50, 0x71, 0x7A, 0x5B, 0x27, 0x5C, 0x63, 0x71, 0x2B,
0x63, 0x6C, 0x4B, 0x22, 0x75, 0x44, 0x31, 0x5E, 0x65, 0x6F,
0x49, 0x3D, 0x46, 0x70, 0x48, 0x61, 0x61, 0x21, 0x29, 0x72,
0x40, 0x22, 0x62, 0x5F, 0x65, 0x5F, 0x3B, 0x76, 0x3A, 0x63,
0x36, 0x4B, 0x46, 0x4B, 0x62, 0x2D, 0x40, 0x43, 0x22, 0x3B,
0x2A, 0x63, 0x45, 0x3D, 0x63, 0x32, 0x64, 0x64, 0x46, 0x7D,
0x3F, 0x4D, 0x33, 0x3A, 0x7B, 0x6F, 0x43, 0x3F, 0x51, 0x45,
0x6F, 0x65, 0x41, 0x37, 0x65, 0x64, 0x72, 0x25, 0x3C, 0x76,
0x33, 0x33, 0x75, 0x5D, 0x6D, 0x21, 0x25, 0x32, 0x42, 0x65,
0x7C, 0x6B, 0x25, 0x3D, 0x52, 0x7E, 0x21, 0x7D, 0x6F, 0x4D,
0x5C, 0x4A, 0x3F, 0x7E, 0x61, 0x6F, 0x37, 0x7E, 0x6E, 0x52,
0x26, 0x30, 0x37, 0x27, 0x5F, 0x75, 0x33, 0x46, 0x72, 0x40,
0x29, 0x5B, 0x3C, 0x28, 0x57, 0x35, 0x43, 0x6B, 0x4E, 0x63,
0x44, 0x76, 0x54, 0x64, 0x68, 0x76, 0x6F, 0x47, 0x7C, 0x26,
0x36, 0x69, 0x5A, 0x44, 0x69, 0x68, 0x33, 0x5A, 0x29, 0x66,
0x49, 0x72, 0x31, 0x77, 0x28, 0x44, 0x49, 0x7C, 0x4D, 0x5A,
0x33, 0x7C, 0x64, 0x49, 0x25, 0x57, 0x5A, 0x2A, 0x63, 0x7B,
0x7D, 0x3F, 0x50, 0x36, 0x76, 0x6F, 0x73, 0x2B, 0x2B, 0x21,
0x36, 0x2C, 0x7B, 0x62, 0x4B, 0x3D, 0x59, 0x70, 0x26, 0x3F,
0x33, 0x4D, 0x30, 0x50, 0x54, 0x57, 0x66, 0x4F, 0x3A, 0x46,
0x6A, 0x51, 0x6D, 0x60, 0x50, 0x49, 0x68, 0x66, 0x6A, 0x68,
0x4B, 0x79, 0x63, 0x21, 0x5F, 0x33, 0x76, 0x3F, 0x7A, 0x6B,
0x5B, 0x67, 0x26, 0x5C, 0x6A, 0x45, 0x37, 0x41, 0x64, 0x6B,
0x5E, 0x46, 0x58, 0x75, 0x44, 0x23, 0x57, 0x77, 0x65, 0x5A,
0x70, 0x79, 0x74, 0x7E, 0x53, 0x60, 0x6E, 0x2F, 0x26, 0x36,
0x64, 0x3A, 0x6B, 0x6E, 0x70, 0x6A, 0x62, 0x71, 0x4A, 0x6E,
0x41, 0x68, 0x40, 0x23, 0x79, 0x4C, 0x45, 0x37, 0x28, 0x2F,
0x44, 0x3A, 0x53, 0x79, 0x4F, 0x51, 0x40, 0x7E, 0x4C, 0x47,
0x47, 0x33, 0x75, 0x2B, 0x4B, 0x69, 0x46, 0x41, 0x7C, 0x5B,
0x4C, 0x60, 0x61, 0x6F, 0x47, 0x6B, 0x77, 0x4E, 0x3F, 0x55,
0x34, 0x6E, 0x33, 0x32, 0x7C, 0x35, 0x55, 0x5A, 0x4C, 0x50,
0x5B, 0x61, 0x2A, 0x59, 0x6C, 0x4F, 0x59, 0x2F, 0x3F, 0x2B,
0x4A, 0x3C, 0x4C, 0x7D, 0x65, 0x63, 0x75, 0x41, 0x56, 0x67,
0x21, 0x3D, 0x30, 0x3D, 0x76, 0x5B, 0x52, 0x4C, 0x27, 0x2B,
0x46, 0x3E, 0x23, 0x2E, 0x35, 0x51, 0x67, 0x59, 0x42, 0x24,
0x34, 0x48, 0x53, 0x65, 0x5B, 0x7D, 0x7D, 0x58, 0x76, 0x5F,
0x29, 0x2F, 0x2F, 0x76, 0x66, 0x34, 0x3D, 0x34, 0x2E, 0x75,
0x3D, 0x41, 0x47, 0x2F, 0x5A, 0x79, 0x6D, 0x5F, 0x79, 0x78,
0x70, 0x26, 0x54, 0x42, 0x78, 0x36, 0x54, 0x51, 0x30, 0x31,
0x29, 0x6F, 0x2D, 0x38, 0x36, 0x31, 0x3F, 0x32, 0x73, 0x21,
0x6D, 0x4C, 0x2C, 0x7A, 0x6B, 0x7D, 0x68, 0x53, 0x3D, 0x79,
0x75, 0x62, 0x36, 0x79, 0x22, 0x41, 0x2A, 0x4C, 0x60, 0x35,
0x2F, 0x40, 0x39, 0x27, 0x72, 0x7B, 0x7A, 0x6C, 0x3C, 0x5D,
0x26, 0x2E, 0x3F, 0x21, 0x2C, 0x58, 0x6D, 0x38, 0x47, 0x2E,
0x3F, 0x5B, 0x2D, 0x5F, 0x5D, 0x5F, 0x23, 0x21, 0x6C, 0x40,
0x29, 0x2B, 0x40, 0x38, 0x46, 0x32, 0x4B, 0x35, 0x75, 0x22,
0x61, 0x24, 0x4C, 0x63, 0x69, 0x32, 0x63, 0x55, 0x5F, 0x34,
0x2B, 0x27, 0x68, 0x52, 0x46, 0x4E, 0x3B, 0x72, 0x7B, 0x3E,
0x70, 0x51, 0x42, 0x2B, 0x2B, 0x63, 0x6C, 0x7D, 0x5A, 0x29,
0x69, 0x27, 0x31, 0x3C, 0x5C, 0x42, 0x75, 0x7E, 0x6E, 0x74,
0x38, 0x43, 0x5F, 0x61, 0x68, 0x41, 0x55, 0x5B, 0x78, 0x35,
0x6C, 0x6E, 0x3F, 0x3A, 0x75, 0x7B, 0x56, 0x4A, 0x41, 0x76,
0x56, 0x2E, 0x3F, 0x27, 0x52, 0x49, 0x4A, 0x74, 0x2A, 0x24,
0x38, 0x41, 0x75, 0x45, 0x71, 0x6B, 0x63, 0x66, 0x75, 0x43,
0x65, 0x4D, 0x78, 0x5D, 0x79, 0x59, 0x5E, 0x58, 0x6F, 0x4C,
0x42, 0x57, 0x51, 0x52, 0x5E, 0x63, 0x31, 0x6A, 0x69, 0x59,
0x2C, 0x6B, 0x46, 0x7E, 0x6F, 0x27, 0x30, 0x65, 0x63, 0x2C,
0x4D, 0x32, 0x2A, 0x3D, 0x44, 0x3F, 0x61, 0x67, 0x55, 0x2D,
0x2F, 0x7D, 0x42, 0x58, 0x51, 0x69, 0x70, 0x31, 0x5F, 0x6A,
0x3B, 0x7D, 0x2A, 0x59, 0x67, 0x22, 0x67, 0x3E, 0x4B, 0x60,
0x3A, 0x63, 0x7D, 0x44, 0x3A, 0x51, 0x59, 0x4E, 0x5A, 0x60,
0x68, 0x46, 0x72, 0x52, 0x79, 0x39, 0x31, 0x57, 0x60, 0x39,
0x71, 0x27, 0x60, 0x52, 0x64, 0x2B, 0x79, 0x6F, 0x23, 0x33,
0x54, 0x27, 0x2E, 0x2A, 0x70, 0x4C, 0x7A, 0x25, 0x79, 0x3D,
0x76, 0x56, 0x6C, 0x3E, 0x76, 0x74, 0x49, 0x3C, 0x49, 0x6B,
0x26, 0x7B, 0x28, 0x3F, 0x2C, 0x32, 0x2B, 0x69, 0x22, 0x64,
0x5F, 0x7E, 0x67, 0x29, 0x7A, 0x35, 0x66, 0x27, 0x4A, 0x66,
0x76, 0x3C, 0x2A, 0x2A, 0x34, 0x2B, 0x39, 0x53, 0x42, 0x4E,
0x41, 0x66, 0x47, 0x67, 0x2D, 0x3A, 0x31, 0x6D, 0x23, 0x3D,
0x62, 0x74, 0x7B, 0x2C, 0x28, 0x35, 0x6F, 0x27, 0x6E, 0x5F,
0x4C, 0x5A, 0x4C, 0x52, 0x2F, 0x5A, 0x74, 0x73, 0x5F, 0x49,
0x7E, 0x63, 0x33, 0x6E, 0x4A, 0x75, 0x6B, 0x70, 0x54, 0x5C,
0x6E, 0x49, 0x5B, 0x6D, 0x3E, 0x3B, 0x35, 0x4F, 0x2D, 0x36,
0x32, 0x66, 0x4B, 0x7C, 0x46, 0x3D, 0x6C, 0x7A, 0x49, 0x5D,
0x69, 0x23, 0x26, 0x57, 0x33, 0x28, 0x25, 0x6D, 0x48, 0x48,
0x42, 0x4A, 0x79, 0x26, 0x4D, 0x49, 0x4C, 0x4A, 0x67, 0x76,
0x39, 0x26, 0x6E, 0x6D, 0x79, 0x7B, 0x26, 0x78, 0x79, 0x5B,
0x39, 0x74, 0x51, 0x5B, 0x30, 0x6E, 0x4C, 0x57, 0x34, 0x51,
0x60, 0x74, 0x4D, 0x6F, 0x6A, 0x3F, 0x4F, 0x50, 0x55, 0x56,
0x72, 0x42, 0x71, 0x69, 0x2C, 0x61, 0x4B, 0x2E, 0x2A, 0x26,
0x35, 0x71, 0x63, 0x25, 0x4E, 0x69, 0x5A, 0x22, 0x39, 0x3F,
0x25, 0x47, 0x45, 0x33, 0x64, 0x5F, 0x50, 0x7A, 0x3F, 0x21,
0x2C, 0x33, 0x3C, 0x6C, 0x57, 0x5B, 0x48, 0x65, 0x2B, 0x68,
0x53, 0x36, 0x27, 0x7D, 0x65, 0x5F, 0x2B, 0x23, 0x3C, 0x7A,
0x2B, 0x43, 0x74, 0x34, 0x7A, 0x6C, 0x4D, 0x6F, 0x60, 0x71,
0x43, 0x2C, 0x72, 0x53, 0x2A, 0x36, 0x7E, 0x58, 0x39, 0x4E,
0x2B, 0x30, 0x7C, 0x7C, 0x79, 0x33, 0x79, 0x7C, 0x77, 0x61,
0x45, 0x4F, 0x4E, 0x44, 0x4B, 0x29, 0x35, 0x41, 0x4E, 0x31,
0x67, 0x60, 0x33, 0x4A, 0x2A, 0x34, 0x63, 0x45, 0x3C, 0x59,
0x74, 0x60, 0x4B, 0x62, 0x2D, 0x3E, 0x58, 0x51, 0x44, 0x32,
0x3A, 0x54, 0x62, 0x44, 0x66, 0x71, 0x59, 0x3F, 0x3A, 0x50,
0x4E, 0x5A, 0x3D, 0x65, 0x23, 0x5C, 0x55, 0x37, 0x4D, 0x5F,
0x43, 0x3B, 0x64, 0x48, 0x52, 0x2E, 0x21, 0x2D, 0x60, 0x4F,
0x68, 0x33, 0x57, 0x41, 0x6D, 0x27, 0x78, 0x67, 0x31, 0x48,
0x4C, 0x74, 0x44, 0x49, 0x6D, 0x70, 0x7A, 0x23, 0x5F, 0x23,
0x68, 0x46, 0x2F, 0x59, 0x7C, 0x32, 0x4F, 0x2D, 0x43, 0x41,
0x30, 0x22, 0x5A, 0x71, 0x5F, 0x54, 0x24, 0x24, 0x6A, 0x79,
0x66, 0x54, 0x55, 0x6B, 0x55, 0x6A, 0x74, 0x68, 0x22, 0x59,
0x43, 0x43, 0x3D, 0x55, 0x2E, 0x75, 0x65, 0x49, 0x38, 0x3C,
0x7D, 0x4F, 0x62, 0x28, 0x39, 0x3B, 0x6C, 0x6F, 0x3F, 0x49,
0x40, 0x5E, 0x4E, 0x6E, 0x3A, 0x3A, 0x48, 0x29, 0x56, 0x3A,
0x69, 0x3A, 0x3E, 0x3C, 0x4F, 0x46, 0x23, 0x47, 0x5A, 0x29,
0x36, 0x77, 0x49, 0x2B, 0x4E, 0x47, 0x28, 0x67, 0x59, 0x7C,
0x68, 0x7D, 0x41, 0x77, 0x37, 0x67, 0x2B, 0x68, 0x39, 0x5A,
0x6B, 0x51, 0x4D, 0x6E, 0x7D, 0x67, 0x4B, 0x26, 0x6B, 0x3F,
0x31, 0x2A, 0x68, 0x24, 0x56, 0x3D, 0x59, 0x64, 0x39, 0x5E,
0x4B, 0x23, 0x67, 0x67, 0x6D, 0x2F, 0x3E, 0x46, 0x6A, 0x2A,
0x63, 0x3F, 0x37, 0x4B, 0x75, 0x5F, 0x60, 0x56, 0x4F, 0x21,
0x39, 0x57, 0x72, 0x3E, 0x46, 0x77, 0x53, 0x6E, 0x38, 0x48,
0x35, 0x5A, 0x6C, 0x21, 0x3A, 0x46, 0x2C, 0x7D, 0x3C, 0x2C,
0x70, 0x73, 0x5D, 0x49, 0x28, 0x42, 0x4B, 0x40, 0x75, 0x7E,
0x42, 0x6A, 0x67, 0x4B, 0x40, 0x25, 0x6A, 0x43, 0x59, 0x6A,
0x73, 0x21, 0x6A, 0x45, 0x5B, 0x67, 0x21, 0x4B, 0x25, 0x77,
0x5F, 0x46, 0x4B, 0x32, 0x51, 0x72, 0x41, 0x2E, 0x7A, 0x30,
0x69, 0x54, 0x7B, 0x5B, 0x26, 0x41, 0x21, 0x45, 0x27, 0x5C,
0x39, 0x74, 0x52, 0x60, 0x3E, 0x60, 0x33, 0x59, 0x2F, 0x3B,
0x58, 0x64, 0x2A, 0x78, 0x70, 0x56, 0x22, 0x5B, 0x25, 0x6F,
0x2D, 0x22, 0x62, 0x44, 0x47, 0x54, 0x45, 0x52, 0x71, 0x68,
0x29, 0x7B, 0x26, 0x44, 0x70, 0x44, 0x49, 0x63, 0x4A, 0x54,
0x29, 0x65, 0x2B, 0x32, 0x34, 0x66, 0x4A, 0x72, 0x79, 0x7B,
0x4C, 0x7C, 0x5B, 0x36, 0x2B, 0x6B, 0x33, 0x4C, 0x33, 0x7D,
0x50, 0x36, 0x79, 0x48, 0x7C, 0x56, 0x4A, 0x48, 0x45, 0x5C,
0x47, 0x4F, 0x5E, 0x72, 0x36, 0x7D, 0x27, 0x57, 0x5E, 0x72,
0x76, 0x36, 0x6C, 0x26, 0x3E, 0x35, 0x2D, 0x26, 0x40, 0x5E,
0x44, 0x32, 0x74, 0x56, 0x4B, 0x6A, 0x4E, 0x58, 0x4B, 0x44,
0x34, 0x59, 0x6C, 0x50, 0x71, 0x41, 0x3C, 0x69, 0x3A, 0x28,
0x69, 0x39, 0x25, 0x4A, 0x42, 0x39, 0x63, 0x67, 0x2C, 0x7E,
0x4E, 0x59, 0x6D, 0x74, 0x31, 0x76, 0x22, 0x62, 0x78, 0x48,
0x55, 0x77, 0x6C, 0x24, 0x2D, 0x55, 0x59, 0x66, 0x68, 0x21,
0x41, 0x31, 0x77, 0x21, 0x5F, 0x5D, 0x44, 0x48, 0x45, 0x3A,
0x64, 0x4A, 0x56, 0x33, 0x47, 0x40, 0x5E, 0x4B, 0x63, 0x29,
0x51, 0x71, 0x57, 0x67, 0x2B, 0x79, 0x57, 0x30, 0x26, 0x26,
0x55, 0x4E, 0x44, 0x69, 0x78, 0x2C, 0x5E, 0x6D, 0x2E, 0x5A,
0x68, 0x59, 0x32, 0x5F, 0x2F, 0x29, 0x4D, 0x74, 0x5C, 0x27,
0x67, 0x2A, 0x52, 0x3F, 0x60, 0x70, 0x37, 0x5A, 0x3C, 0x5C,
0x4B, 0x7A, 0x46, 0x58, 0x50, 0x6D, 0x55, 0x7E, 0x78, 0x5A,
0x29, 0x6F, 0x48, 0x76, 0x2B, 0x67, 0x4D, 0x4E, 0x34, 0x64,
0x37, 0x2F, 0x41, 0x69, 0x4E, 0x4A, 0x30, 0x70, 0x2C, 0x61,
0x24, 0x61, 0x4F, 0x3D, 0x79, 0x22, 0x3A, 0x32, 0x56, 0x55,
0x68, 0x36, 0x75, 0x5E, 0x33, 0x42, 0x72, 0x26, 0x56, 0x2E,
0x3A, 0x49, 0x33, 0x62, 0x63, 0x3C, 0x55, 0x27, 0x63, 0x69,
0x41, 0x5B, 0x3C, 0x76, 0x7D, 0x31, 0x7B, 0x30, 0x78, 0x4E,
0x52, 0x50, 0x3A, 0x72, 0x7D, 0x5A, 0x53, 0x79, 0x67, 0x55,
0x2E, 0x72, 0x71, 0x45, 0x29, 0x6F, 0x63, 0x68, 0x6E, 0x57,
0x3E, 0x2E, 0x5C, 0x6D, 0x3E, 0x71, 0x62, 0x6F, 0x27, 0x54,
0x2D, 0x44, 0x43, 0x3C, 0x2E, 0x7D, 0x57, 0x68, 0x42, 0x3D,
0x50, 0x54, 0x59, 0x38, 0x22, 0x3D, 0x5B, 0x7D, 0x28, 0x41,
0x2E, 0x58, 0x36, 0x58, 0x5A, 0x56, 0x63, 0x24, 0x43, 0x49,
0x64, 0x59, 0x4C, 0x52, 0x3B, 0x57, 0x57, 0x21, 0x60, 0x6E,
0x52, 0x3A, 0x67, 0x47, 0x70, 0x3B, 0x49, 0x64, 0x58, 0x44,
0x24, 0x7E, 0x5B, 0x59, 0x5D, 0x4E, 0x4C, 0x79, 0x43, 0x43,
0x35, 0x3C, 0x59, 0x6B, 0x72, 0x61, 0x53, 0x37, 0x2C, 0x30,
0x49, 0x45, 0x31, 0x24, 0x62, 0x7A, 0x37, 0x2B, 0x4B, 0x3C,
0x40, 0x59, 0x6B, 0x3D, 0x63, 0x2D, 0x47, 0x4E, 0x3E, 0x45,
0x24, 0x4A, 0x49, 0x52, 0x2E, 0x6D, 0x2F, 0x63, 0x70, 0x65,
0x4E, 0x4C, 0x29, 0x5C, 0x23, 0x27, 0x6C, 0x4A, 0x57, 0x36,
0x59, 0x52, 0x2E, 0x50, 0x3F, 0x67, 0x79, 0x72, 0x61, 0x62,
0x41, 0x2C, 0x2C, 0x74, 0x52, 0x3F, 0x4D, 0x67, 0x49, 0x63,
0x43, 0x2E, 0x3C, 0x3B, 0x3E, 0x45, 0x7A, 0x5D, 0x5F, 0x4F,
0x64, 0x65, 0x6F, 0x74, 0x5D, 0x43, 0x23, 0x7B, 0x59, 0x61,
0x25, 0x71, 0x32, 0x75, 0x4A, 0x37, 0x63, 0x61, 0x2A, 0x4B,
0x6C, 0x64, 0x63, 0x43, 0x4D, 0x79, 0x39, 0x25, 0x43, 0x31,
0x4E, 0x31, 0x50, 0x72, 0x67, 0x52, 0x60, 0x64, 0x2E, 0x74,
0x6E, 0x2A, 0x36, 0x3B, 0x6B, 0x53, 0x79, 0x5C, 0x62, 0x3F,
0x54, 0x7C, 0x32, 0x7B, 0x74, 0x57, 0x64, 0x68, 0x43, 0x28,
0x2B, 0x4B, 0x4A, 0x48, 0x21, 0x38, 0x5F, 0x28, 0x22, 0x66,
0x5E, 0x29, 0x43, 0x5C, 0x6D, 0x75, 0x24, 0x4C, 0x38, 0x53,
0x44, 0x79, 0x5B, 0x4C, 0x46, 0x76, 0x4F, 0x56, 0x46, 0x26,
0x6A, 0x61, 0x42, 0x62, 0x4D, 0x33, 0x3F, 0x69, 0x44, 0x66,
0x66, 0x2F, 0x52, 0x3B, 0x65, 0x3A, 0x30, 0x46, 0x2F, 0x5C,
0x36, 0x47, 0x5A, 0x5C, 0x66, 0x6D, 0x5D, 0x32, 0x2E, 0x76,
0x21, 0x79, 0x37, 0x2F, 0x36, 0x21, 0x33, 0x47, 0x44, 0x4C,
0x59, 0x72, 0x74, 0x62, 0x43, 0x7E, 0x40, 0x43, 0x37, 0x23,
0x39, 0x6C, 0x64, 0x34, 0x58, 0x39, 0x6E, 0x51, 0x5B, 0x3B,
0x6E, 0x50, 0x69, 0x51, 0x71, 0x5E, 0x62, 0x31, 0x4A, 0x7B,
0x2C, 0x58, 0x50, 0x71, 0x5A, 0x3B, 0x4F, 0x5D, 0x40, 0x76,
0x5A, 0x41, 0x5F, 0x29, 0x42, 0x66, 0x36, 0x4C, 0x39, 0x3D,
0x4E, 0x4F, 0x33, 0x45, 0x64, 0x7B, 0x34, 0x21, 0x5E, 0x37,
0x63, 0x58, 0x39, 0x2D, 0x58, 0x24, 0x32, 0x38, 0x7E, 0x66,
0x58, 0x51, 0x71, 0x77, 0x34, 0x3C, 0x6A, 0x69, 0x25, 0x73,
0x5F, 0x34, 0x3B, 0x2F, 0x69, 0x37, 0x27, 0x46, 0x29, 0x40,
0x65, 0x7B, 0x74, 0x39, 0x35, 0x5A, 0x7D, 0x45, 0x5E, 0x57,
0x72, 0x50, 0x39, 0x54, 0x58, 0x2D, 0x60, 0x29, 0x41, 0x72,
0x23, 0x32, 0x73, 0x78, 0x21, 0x2B, 0x3D, 0x39, 0x32, 0x31,
0x47, 0x52, 0x51, 0x71, 0x4C, 0x26, 0x35, 0x4E, 0x7A, 0x29,
0x34, 0x62, 0x63, 0x74, 0x4C, 0x61, 0x5F, 0x76, 0x39, 0x62,
0x30, 0x70, 0x6E, 0x4A, 0x73, 0x58, 0x41, 0x4E, 0x71, 0x2F,
0x67, 0x55, 0x55, 0x2A, 0x6F, 0x6B, 0x3D, 0x70, 0x33, 0x6B,
0x2A, 0x24, 0x3C, 0x31, 0x34, 0x3F, 0x77, 0x59, 0x37, 0x37,
0x6C, 0x4C, 0x5F, 0x7D, 0x59, 0x41, 0x48, 0x2B, 0x2B, 0x46,
0x35, 0x4D, 0x3F, 0x57, 0x29, 0x78, 0x3C, 0x69, 0x5C, 0x40,
0x40, 0x4A, 0x32, 0x58, 0x32, 0x21, 0x4E, 0x2D, 0x32, 0x6C,
0x2F, 0x4B, 0x3B, 0x67, 0x29, 0x56, 0x5F, 0x4E, 0x51, 0x66,
0x2C, 0x37, 0x4A, 0x7A, 0x53, 0x3A, 0x75, 0x2F, 0x22, 0x4C,
0x64, 0x3C, 0x3D, 0x5C, 0x44, 0x5D, 0x4D, 0x63, 0x4B, 0x32,
0x3F, 0x40, 0x74, 0x6A, 0x2B, 0x5F, 0x35, 0x23, 0x3A, 0x5E,
0x7D, 0x5B, 0x33, 0x25, 0x5D, 0x5F, 0x6F, 0x25, 0x68, 0x21,
0x5F, 0x2A, 0x22, 0x42, 0x40, 0x55, 0x66, 0x76, 0x77, 0x60,
0x24, 0x6C, 0x6C, 0x77, 0x5D, 0x3D, 0x4A, 0x27, 0x41, 0x47,
0x5E, 0x72, 0x62, 0x6D, 0x3B, 0x61, 0x65, 0x7E, 0x67, 0x39,
0x34, 0x56, 0x5D, 0x30, 0x62, 0x6E, 0x28, 0x5D, 0x5F, 0x3E,
0x68, 0x78, 0x64, 0x64, 0x56, 0x6B, 0x77, 0x3C, 0x70, 0x58,
0x46, 0x33, 0x32, 0x65, 0x42, 0x6F, 0x7A, 0x30, 0x40, 0x2B,
0x42, 0x55, 0x46, 0x48, 0x6B, 0x4B, 0x51, 0x3C, 0x72, 0x78,
0x21, 0x5F, 0x21, 0x6A, 0x6E, 0x47, 0x51, 0x73, 0x26, 0x3B,
0x4A, 0x64, 0x21, 0x2D, 0x42, 0x50, 0x46, 0x4C, 0x5F, 0x37,
0x39, 0x4C, 0x2B, 0x4A, 0x79, 0x5B, 0x2F, 0x21, 0x61, 0x78,
0x4E, 0x56, 0x32, 0x55, 0x2D, 0x61, 0x47, 0x5C, 0x6A, 0x4A,
0x6B, 0x2D, 0x6A, 0x32, 0x4C, 0x3C, 0x76, 0x2E, 0x6F, 0x3A,
0x43, 0x6D, 0x27, 0x7D, 0x76, 0x44, 0x26, 0x56, 0x24, 0x77,
0x3D, 0x5D, 0x22, 0x5D, 0x40, 0x51, 0x41, 0x39, 0x70, 0x7C,
0x4D, 0x41, 0x36, 0x28, 0x33, 0x4B, 0x5A, 0x5E, 0x25, 0x75,
0x2C, 0x72, 0x5D, 0x6E, 0x3E, 0x44, 0x76, 0x42, 0x56, 0x3D,
0x7E, 0x2F, 0x30, 0x38, 0x23, 0x54, 0x6B, 0x5B, 0x5E, 0x66,
0x42, 0x6E, 0x45, 0x24, 0x31, 0x6E, 0x6C, 0x48, 0x43, 0x28,
0x24, 0x47, 0x6D, 0x26, 0x21, 0x6C, 0x47, 0x4D, 0x5E, 0x44,
0x44, 0x58, 0x25, 0x4D, 0x39, 0x75, 0x67, 0x3C, 0x2D, 0x56,
0x35, 0x35, 0x32, 0x3C, 0x35, 0x73, 0x6C, 0x34, 0x7B, 0x73,
0x5A, 0x40, 0x61, 0x5A, 0x59, 0x6E, 0x5C, 0x36, 0x72, 0x24,
0x70, 0x29, 0x33, 0x72, 0x6D, 0x75, 0x4F, 0x47, 0x68, 0x30,
0x4C, 0x5C, 0x6D, 0x22, 0x59, 0x21, 0x61, 0x5A, 0x56, 0x7D,
0x63, 0x42, 0x59, 0x35, 0x56, 0x22, 0x3F, 0x6C, 0x42, 0x2A,
0x72, 0x25, 0x32, 0x71, 0x58, 0x6C, 0x58, 0x33, 0x35, 0x4B,
0x7D, 0x38, 0x5C, 0x3B, 0x36, 0x34, 0x52, 0x3E, 0x67, 0x24,
0x4E, 0x4D, 0x5F, 0x6F, 0x65, 0x77, 0x71, 0x49, 0x5D, 0x57,
0x6C, 0x43, 0x42, 0x7B, 0x77, 0x6C, 0x66, 0x68, 0x49, 0x46,
0x63, 0x44, 0x50, 0x33, 0x79, 0x64, 0x24, 0x67, 0x44, 0x6F,
0x28, 0x49, 0x2A, 0x34, 0x5D, 0x7B, 0x6A, 0x72, 0x4A, 0x31,
0x52, 0x3E, 0x39, 0x39, 0x33, 0x3B, 0x23, 0x51, 0x5F, 0x52,
0x74, 0x28, 0x7E, 0x60, 0x4C, 0x47, 0x7C, 0x28, 0x28, 0x2A,
0x57, 0x7D, 0x22, 0x2E, 0x50, 0x7D, 0x59, 0x33, 0x7C, 0x35,
0x2A, 0x76, 0x2D, 0x3B, 0x50, 0x22, 0x76, 0x66, 0x7A, 0x2D,
0x66, 0x5E, 0x65, 0x7C, 0x58, 0x34, 0x67, 0x7D, 0x6A, 0x2E,
0x43, 0x67, 0x79, 0x3A, 0x50, 0x74, 0x65, 0x35, 0x38, 0x57,
0x71, 0x31, 0x38, 0x75, 0x7B, 0x36, 0x6A, 0x42, 0x4A, 0x45,
0x40, 0x6B, 0x5B, 0x74, 0x63, 0x36, 0x49, 0x7B, 0x2E, 0x47,
0x41, 0x57, 0x6A, 0x6D, 0x43, 0x34, 0x2A, 0x3A, 0x6C, 0x2A,
0x3F, 0x36, 0x7E, 0x75, 0x2E, 0x69, 0x69, 0x32, 0x67, 0x27,
0x33, 0x67, 0x49, 0x59, 0x2B, 0x4B, 0x45, 0x45, 0x5A, 0x77,
0x4E, 0x33, 0x48, 0x53, 0x3F, 0x63, 0x27, 0x47, 0x3E, 0x77,
0x24, 0x3E, 0x55, 0x42, 0x78, 0x23, 0x68, 0x36, 0x2B, 0x2F,
0x7B, 0x22, 0x78, 0x79, 0x2D, 0x6A, 0x37, 0x72, 0x37, 0x2B,
0x45, 0x3E, 0x37, 0x4D, 0x5C, 0x4B, 0x61, 0x2C, 0x5B, 0x70,
0x77, 0x5C, 0x52, 0x2D, 0x3F, 0x2D, 0x46, 0x54, 0x39, 0x4D,
0x33, 0x78, 0x47, 0x6D, 0x27, 0x25, 0x7E, 0x7D, 0x76, 0x60,
0x63, 0x54, 0x65, 0x7B, 0x70, 0x47, 0x7C, 0x51, 0x52, 0x3F,
0x72, 0x21, 0x51, 0x3E, 0x2F, 0x70, 0x70, 0x39, 0x5E, 0x7B,
0x79, 0x32, 0x6F, 0x7B, 0x76, 0x72, 0x2B, 0x65, 0x65, 0x37,
0x64, 0x34, 0x3D, 0x52, 0x5E, 0x23, 0x41, 0x6F, 0x72, 0x74,
0x3C, 0x7C, 0x5F, 0x4C, 0x6A, 0x76, 0x39, 0x5C, 0x75, 0x42,
0x3E, 0x35, 0x59, 0x2C, 0x39, 0x67, 0x78, 0x49, 0x79, 0x47,
0x37, 0x3E, 0x79, 0x61, 0x4D, 0x2E, 0x28, 0x49, 0x4E, 0x34,
0x71, 0x5A, 0x5B, 0x33, 0x45, 0x2C, 0x2B, 0x69, 0x33, 0x32,
0x2A, 0x5C, 0x5A, 0x6B, 0x65, 0x54, 0x26, 0x7D, 0x63, 0x31,
0x4E, 0x33, 0x7E, 0x6B, 0x67, 0x5F, 0x3D, 0x67, 0x4D, 0x54,
0x5E, 0x45, 0x45, 0x75, 0x49, 0x28, 0x4B, 0x33, 0x34, 0x7A,
0x72, 0x5D, 0x72, 0x49, 0x48, 0x27, 0x75, 0x7C, 0x6B, 0x4A,
0x60, 0x25, 0x62, 0x6D, 0x29, 0x30, 0x6C, 0x3F, 0x34, 0x49,
0x2B, 0x45, 0x2D, 0x58, 0x54, 0x35, 0x42, 0x41, 0x7D, 0x31,
0x4A, 0x64, 0x67, 0x6D, 0x5A, 0x34, 0x46, 0x4E, 0x39, 0x3C,
0x59, 0x34, 0x35, 0x2C, 0x3E, 0x7E, 0x5F, 0x32, 0x37, 0x68,
0x7B, 0x29, 0x57, 0x44, 0x5E, 0x24, 0x72, 0x50, 0x44, 0x37,
0x68, 0x7E, 0x45, 0x56, 0x60, 0x4A, 0x38, 0x77, 0x53, 0x43,
0x50, 0x4A, 0x37, 0x62, 0x22, 0x45, 0x4B, 0x7C, 0x57, 0x7A,
0x63, 0x44, 0x4D, 0x55, 0x2F, 0x50, 0x44, 0x65, 0x6C, 0x78,
0x49, 0x34, 0x61, 0x5E, 0x65, 0x34, 0x25, 0x3C, 0x3A, 0x43,
0x63, 0x40, 0x7D, 0x24, 0x7C, 0x75, 0x59, 0x7A, 0x33, 0x4C,
0x59, 0x47, 0x65, 0x78, 0x3B, 0x30, 0x25, 0x66, 0x33, 0x32,
0x58, 0x48, 0x7C, 0x7C, 0x3E, 0x32, 0x70, 0x26, 0x66, 0x35,
0x24, 0x30, 0x2C, 0x44, 0x76, 0x72, 0x28, 0x54, 0x64, 0x3D,
0x5E, 0x42, 0x51, 0x36, 0x76, 0x40, 0x54, 0x50, 0x51, 0x3F,
0x46, 0x55, 0x44, 0x53, 0x5F, 0x4D, 0x4B, 0x26, 0x78, 0x78,
0x5A, 0x2B, 0x34, 0x31, 0x63, 0x3B, 0x41, 0x56, 0x62, 0x32,
0x58, 0x54, 0x26, 0x32, 0x28, 0x57, 0x49, 0x31, 0x5B, 0x46,
0x71, 0x40, 0x42, 0x55, 0x7C, 0x33, 0x40, 0x2D, 0x3D, 0x3D,
0x49, 0x73, 0x2B, 0x5D, 0x32, 0x2A, 0x5C, 0x2A, 0x5E, 0x71,
0x62, 0x53, 0x26, 0x26, 0x53, 0x2B, 0x56, 0x74, 0x6A, 0x5E,
0x4B, 0x68, 0x62, 0x2A, 0x67, 0x5C, 0x3B, 0x31, 0x2F, 0x5E,
0x4E, 0x6D, 0x57, 0x6E, 0x6E, 0x73, 0x53, 0x56, 0x35, 0x3A,
0x22, 0x5A, 0x6F, 0x44, 0x39, 0x2D, 0x23, 0x21, 0x42, 0x74,
0x4B, 0x3C, 0x74, 0x61, 0x38, 0x24, 0x70
]
# -------------------------------------------------------------------------------------------------
if __name__ == "__main__":
print '[+] Simple machine side channel attack started.'
for di in xrange(0xdead*0xbeef % 33):
disk_data = disk_data[0x200:0x200*33] + disk_data[:0x200]
key = ''
for cx in xrange(2, 0x23):
idx = ((cx - 2)*0xD + 1) & 0x1FF
key += chr(disk_data[0x200*(cx-2) + idx])
print '[+] Final key:', key
print '[+] Program finished successfully. Bye bye :)'
# -------------------------------------------------------------------------------------------------
'''
ispo@leet:~/ctf/codegate_2020/malicious$ ./malicious_mbr_crack.py
[+] Simple machine side channel attack started.
[+] Final key: 8_bits_per_byte_1_byte_per_sector
[+] Program finished successfully. Bye bye :)
'''
# ------------------------------------------------------------------------------------------------- | Gathered CTF writeups/codegate_quals_2020/malicious/malicious_mbr_crack.py | import struct
import sys
import os
disk_data = [
0x4A, 0x57, 0x5E, 0x75, 0x38, 0x66, 0x3B, 0x79, 0x3A, 0x60,
0x75, 0x61, 0x26, 0x38, 0x68, 0x5E, 0x28, 0x68, 0x6C, 0x6C,
0x72, 0x76, 0x71, 0x7E, 0x55, 0x47, 0x38, 0x42, 0x7A, 0x4A,
0x6B, 0x4D, 0x4D, 0x65, 0x37, 0x79, 0x45, 0x62, 0x2E, 0x70,
0x4C, 0x63, 0x38, 0x74, 0x79, 0x3D, 0x3D, 0x36, 0x50, 0x62,
0x5F, 0x77, 0x66, 0x55, 0x6E, 0x33, 0x79, 0x6C, 0x56, 0x29,
0x41, 0x36, 0x75, 0x65, 0x6A, 0x2E, 0x4F, 0x68, 0x54, 0x5B,
0x5F, 0x47, 0x6C, 0x76, 0x64, 0x6E, 0x47, 0x60, 0x47, 0x69,
0x71, 0x74, 0x4A, 0x66, 0x63, 0x78, 0x3C, 0x66, 0x5F, 0x5C,
0x3B, 0x7A, 0x55, 0x4B, 0x75, 0x2D, 0x60, 0x3E, 0x25, 0x3A,
0x2A, 0x27, 0x2B, 0x7A, 0x5D, 0x39, 0x48, 0x28, 0x65, 0x62,
0x5A, 0x44, 0x4B, 0x6B, 0x60, 0x37, 0x3B, 0x6F, 0x69, 0x3B,
0x6B, 0x74, 0x77, 0x7C, 0x44, 0x4B, 0x49, 0x77, 0x31, 0x50,
0x52, 0x39, 0x63, 0x3E, 0x50, 0x3F, 0x4C, 0x61, 0x46, 0x5E,
0x71, 0x7E, 0x55, 0x41, 0x2C, 0x63, 0x5E, 0x43, 0x31, 0x37,
0x24, 0x4B, 0x5F, 0x42, 0x39, 0x2A, 0x48, 0x32, 0x47, 0x28,
0x5B, 0x6A, 0x3D, 0x72, 0x79, 0x64, 0x29, 0x25, 0x4B, 0x4C,
0x6A, 0x49, 0x65, 0x4A, 0x23, 0x4C, 0x74, 0x63, 0x54, 0x32,
0x60, 0x41, 0x6B, 0x39, 0x47, 0x4F, 0x69, 0x48, 0x6B, 0x31,
0x72, 0x5E, 0x22, 0x29, 0x4E, 0x3A, 0x5F, 0x23, 0x5E, 0x36,
0x72, 0x37, 0x25, 0x46, 0x25, 0x62, 0x4D, 0x73, 0x32, 0x62,
0x3C, 0x78, 0x70, 0x28, 0x6A, 0x44, 0x7B, 0x40, 0x7D, 0x5C,
0x68, 0x50, 0x64, 0x64, 0x39, 0x58, 0x79, 0x73, 0x57, 0x32,
0x39, 0x58, 0x6B, 0x5E, 0x3B, 0x79, 0x22, 0x66, 0x67, 0x47,
0x33, 0x4B, 0x3B, 0x23, 0x23, 0x2A, 0x46, 0x49, 0x61, 0x64,
0x26, 0x35, 0x2C, 0x27, 0x39, 0x50, 0x62, 0x3A, 0x7A, 0x54,
0x7E, 0x63, 0x4D, 0x69, 0x3F, 0x51, 0x43, 0x65, 0x51, 0x2B,
0x4B, 0x7B, 0x76, 0x5B, 0x30, 0x42, 0x4A, 0x38, 0x54, 0x43,
0x73, 0x6F, 0x76, 0x38, 0x5E, 0x3F, 0x21, 0x2F, 0x7B, 0x24,
0x4B, 0x36, 0x52, 0x46, 0x78, 0x70, 0x32, 0x38, 0x35, 0x7C,
0x4E, 0x7E, 0x5D, 0x22, 0x79, 0x40, 0x26, 0x71, 0x6D, 0x78,
0x23, 0x57, 0x7B, 0x48, 0x5D, 0x23, 0x79, 0x72, 0x38, 0x56,
0x24, 0x27, 0x74, 0x54, 0x31, 0x6F, 0x73, 0x51, 0x3D, 0x24,
0x5E, 0x4A, 0x4C, 0x7B, 0x26, 0x57, 0x36, 0x6B, 0x6A, 0x2B,
0x4B, 0x67, 0x33, 0x72, 0x5D, 0x72, 0x6A, 0x78, 0x23, 0x29,
0x5F, 0x51, 0x39, 0x6A, 0x5D, 0x54, 0x4A, 0x54, 0x4A, 0x5D,
0x27, 0x38, 0x32, 0x5D, 0x79, 0x2D, 0x5B, 0x5D, 0x48, 0x48,
0x23, 0x21, 0x53, 0x4B, 0x60, 0x6C, 0x21, 0x72, 0x42, 0x77,
0x54, 0x60, 0x3C, 0x4B, 0x5A, 0x7E, 0x4B, 0x5D, 0x22, 0x34,
0x48, 0x5C, 0x4A, 0x65, 0x52, 0x43, 0x60, 0x26, 0x7A, 0x45,
0x51, 0x61, 0x35, 0x3A, 0x7E, 0x7B, 0x6A, 0x63, 0x68, 0x53,
0x72, 0x3C, 0x38, 0x4B, 0x6C, 0x3C, 0x30, 0x71, 0x73, 0x60,
0x4F, 0x7D, 0x2B, 0x6B, 0x54, 0x2E, 0x76, 0x53, 0x25, 0x5C,
0x6E, 0x31, 0x2B, 0x37, 0x25, 0x65, 0x52, 0x21, 0x24, 0x67,
0x59, 0x3E, 0x53, 0x5B, 0x4A, 0x6B, 0x4D, 0x36, 0x30, 0x7B,
0x21, 0x21, 0x5D, 0x3C, 0x30, 0x66, 0x68, 0x53, 0x7E, 0x77,
0x33, 0x7B, 0x65, 0x3C, 0x6A, 0x62, 0x50, 0x53, 0x38, 0x55,
0x49, 0x48, 0x70, 0x6A, 0x52, 0x79, 0x29, 0x71, 0x34, 0x3B,
0x6C, 0x72, 0x38, 0x5B, 0x45, 0x7D, 0x70, 0x7B, 0x51, 0x3C,
0x7B, 0x65, 0x2C, 0x41, 0x71, 0x3C, 0x6D, 0x2C, 0x7E, 0x72,
0x61, 0x56, 0x65, 0x48, 0x55, 0x64, 0x64, 0x53, 0x37, 0x31,
0x67, 0x51, 0x57, 0x48, 0x5E, 0x37, 0x26, 0x4A, 0x52, 0x4D,
0x65, 0x25, 0x71, 0x35, 0x2C, 0x71, 0x69, 0x7E, 0x45, 0x70,
0x7A, 0x6D, 0x79, 0x76, 0x71, 0x4D, 0x79, 0x59, 0x51, 0x48,
0x69, 0x4E, 0x60, 0x57, 0x65, 0x2D, 0x2A, 0x78, 0x7C, 0x28,
0x70, 0x32, 0x70, 0x4A, 0x71, 0x60, 0x4A, 0x32, 0x22, 0x2C,
0x7C, 0x63, 0x6D, 0x43, 0x25, 0x74, 0x56, 0x57, 0x3E, 0x50,
0x3E, 0x42, 0x6F, 0x28, 0x3D, 0x7C, 0x56, 0x50, 0x3F, 0x33,
0x22, 0x5C, 0x42, 0x33, 0x3A, 0x57, 0x5F, 0x78, 0x6E, 0x4F,
0x2A, 0x51, 0x37, 0x3E, 0x67, 0x79, 0x5D, 0x4A, 0x5C, 0x46,
0x2E, 0x56, 0x7B, 0x40, 0x30, 0x60, 0x3C, 0x21, 0x26, 0x59,
0x4B, 0x75, 0x34, 0x56, 0x4A, 0x5C, 0x27, 0x32, 0x7E, 0x2B,
0x47, 0x6F, 0x50, 0x2B, 0x41, 0x4B, 0x7B, 0x3B, 0x65, 0x78,
0x69, 0x64, 0x58, 0x4E, 0x53, 0x69, 0x41, 0x5B, 0x44, 0x72,
0x62, 0x57, 0x62, 0x44, 0x44, 0x68, 0x5D, 0x3C, 0x29, 0x56,
0x5D, 0x26, 0x55, 0x39, 0x5B, 0x6A, 0x6C, 0x51, 0x67, 0x6A,
0x5B, 0x43, 0x3F, 0x4E, 0x30, 0x55, 0x47, 0x59, 0x65, 0x3C,
0x2F, 0x23, 0x76, 0x54, 0x72, 0x38, 0x48, 0x59, 0x22, 0x5E,
0x35, 0x7E, 0x65, 0x51, 0x4C, 0x7B, 0x70, 0x5C, 0x3A, 0x3F,
0x41, 0x73, 0x7A, 0x55, 0x52, 0x4F, 0x34, 0x6E, 0x6C, 0x27,
0x23, 0x32, 0x69, 0x21, 0x2E, 0x5B, 0x53, 0x29, 0x50, 0x67,
0x28, 0x6A, 0x61, 0x21, 0x2E, 0x4A, 0x51, 0x6C, 0x34, 0x6B,
0x41, 0x31, 0x6C, 0x38, 0x60, 0x59, 0x3C, 0x3C, 0x2E, 0x79,
0x2A, 0x7D, 0x7E, 0x60, 0x4C, 0x4F, 0x27, 0x42, 0x2C, 0x22,
0x31, 0x6A, 0x40, 0x28, 0x42, 0x52, 0x66, 0x3A, 0x62, 0x62,
0x57, 0x6C, 0x7B, 0x56, 0x41, 0x78, 0x38, 0x57, 0x25, 0x77,
0x47, 0x5F, 0x5C, 0x30, 0x41, 0x42, 0x5E, 0x30, 0x4A, 0x5E,
0x71, 0x3F, 0x60, 0x58, 0x4E, 0x38, 0x7B, 0x21, 0x3C, 0x78,
0x74, 0x55, 0x6D, 0x71, 0x37, 0x36, 0x22, 0x62, 0x7E, 0x6A,
0x76, 0x28, 0x4C, 0x25, 0x6A, 0x48, 0x4E, 0x4A, 0x37, 0x39,
0x2C, 0x5C, 0x25, 0x48, 0x62, 0x7E, 0x2E, 0x51, 0x2A, 0x49,
0x2F, 0x22, 0x78, 0x29, 0x2D, 0x47, 0x5E, 0x2A, 0x52, 0x2E,
0x5F, 0x7C, 0x4C, 0x7D, 0x6B, 0x23, 0x4A, 0x30, 0x7C, 0x31,
0x4E, 0x23, 0x59, 0x37, 0x28, 0x4F, 0x64, 0x6F, 0x66, 0x33,
0x34, 0x4E, 0x52, 0x2D, 0x35, 0x7E, 0x35, 0x67, 0x6C, 0x54,
0x22, 0x49, 0x47, 0x31, 0x5C, 0x62, 0x36, 0x57, 0x39, 0x48,
0x7B, 0x67, 0x6F, 0x6F, 0x5F, 0x4B, 0x58, 0x54, 0x38, 0x5F,
0x23, 0x57, 0x5F, 0x59, 0x58, 0x29, 0x2F, 0x38, 0x62, 0x5F,
0x5F, 0x3E, 0x79, 0x6B, 0x2B, 0x7B, 0x65, 0x3D, 0x5E, 0x5C,
0x44, 0x65, 0x50, 0x5D, 0x78, 0x73, 0x4D, 0x4B, 0x42, 0x5F,
0x39, 0x45, 0x5A, 0x5A, 0x6C, 0x2F, 0x59, 0x54, 0x65, 0x51,
0x5F, 0x40, 0x25, 0x49, 0x27, 0x32, 0x2F, 0x53, 0x76, 0x5F,
0x63, 0x50, 0x5A, 0x21, 0x56, 0x69, 0x27, 0x7B, 0x7E, 0x4C,
0x6D, 0x7B, 0x35, 0x4F, 0x6B, 0x75, 0x24, 0x53, 0x6C, 0x77,
0x46, 0x2E, 0x50, 0x77, 0x78, 0x71, 0x43, 0x47, 0x58, 0x5B,
0x41, 0x60, 0x38, 0x72, 0x72, 0x46, 0x78, 0x46, 0x47, 0x3E,
0x39, 0x58, 0x35, 0x35, 0x4C, 0x6F, 0x50, 0x4C, 0x4E, 0x69,
0x36, 0x42, 0x2D, 0x74, 0x57, 0x69, 0x27, 0x28, 0x6D, 0x7B,
0x58, 0x54, 0x38, 0x66, 0x6C, 0x49, 0x42, 0x70, 0x68, 0x37,
0x5A, 0x22, 0x7A, 0x3D, 0x5F, 0x7D, 0x7D, 0x5D, 0x77, 0x4C,
0x57, 0x5C, 0x43, 0x42, 0x5A, 0x7D, 0x73, 0x58, 0x39, 0x59,
0x6B, 0x38, 0x5A, 0x6C, 0x3C, 0x67, 0x76, 0x4A, 0x32, 0x6C,
0x24, 0x5A, 0x32, 0x61, 0x55, 0x62, 0x7B, 0x3A, 0x51, 0x6D,
0x28, 0x22, 0x4B, 0x6E, 0x7B, 0x3F, 0x3E, 0x7C, 0x3B, 0x79,
0x2C, 0x57, 0x68, 0x7D, 0x4E, 0x61, 0x70, 0x3C, 0x7C, 0x5B,
0x64, 0x50, 0x52, 0x44, 0x40, 0x67, 0x44, 0x29, 0x30, 0x3C,
0x39, 0x70, 0x39, 0x4E, 0x58, 0x45, 0x46, 0x51, 0x76, 0x4C,
0x4C, 0x3A, 0x58, 0x7A, 0x28, 0x69, 0x75, 0x33, 0x4D, 0x49,
0x68, 0x26, 0x72, 0x7B, 0x23, 0x60, 0x59, 0x45, 0x46, 0x68,
0x68, 0x78, 0x7C, 0x58, 0x75, 0x4F, 0x76, 0x68, 0x63, 0x7E,
0x37, 0x2D, 0x7C, 0x5A, 0x35, 0x2F, 0x2B, 0x2B, 0x23, 0x3C,
0x40, 0x60, 0x6E, 0x3C, 0x42, 0x21, 0x3A, 0x78, 0x37, 0x4E,
0x75, 0x64, 0x59, 0x53, 0x6B, 0x33, 0x25, 0x35, 0x46, 0x68,
0x3F, 0x27, 0x58, 0x63, 0x3C, 0x39, 0x23, 0x53, 0x5B, 0x55,
0x3F, 0x28, 0x7A, 0x7B, 0x5C, 0x2D, 0x59, 0x5C, 0x31, 0x4C,
0x68, 0x39, 0x4F, 0x21, 0x76, 0x75, 0x24, 0x3C, 0x7A, 0x73,
0x6E, 0x32, 0x26, 0x61, 0x3A, 0x6C, 0x65, 0x74, 0x3C, 0x7C,
0x67, 0x49, 0x51, 0x38, 0x3D, 0x36, 0x5B, 0x42, 0x63, 0x74,
0x3C, 0x30, 0x35, 0x4A, 0x2A, 0x7C, 0x7A, 0x22, 0x40, 0x35,
0x2E, 0x32, 0x22, 0x7E, 0x7E, 0x2A, 0x59, 0x33, 0x7C, 0x52,
0x6A, 0x75, 0x2E, 0x23, 0x61, 0x3D, 0x56, 0x68, 0x3D, 0x6C,
0x21, 0x41, 0x5C, 0x36, 0x44, 0x6A, 0x29, 0x75, 0x7A, 0x22,
0x7E, 0x3E, 0x63, 0x36, 0x4B, 0x23, 0x3F, 0x2E, 0x42, 0x44,
0x7E, 0x77, 0x4E, 0x6F, 0x37, 0x71, 0x26, 0x68, 0x23, 0x55,
0x40, 0x72, 0x5A, 0x48, 0x52, 0x59, 0x5F, 0x32, 0x35, 0x3B,
0x2C, 0x34, 0x4B, 0x6C, 0x50, 0x67, 0x40, 0x39, 0x6D, 0x29,
0x27, 0x78, 0x53, 0x32, 0x45, 0x55, 0x66, 0x40, 0x63, 0x47,
0x43, 0x2B, 0x38, 0x3A, 0x4A, 0x70, 0x48, 0x2C, 0x71, 0x35,
0x57, 0x31, 0x47, 0x54, 0x48, 0x6C, 0x61, 0x4C, 0x3C, 0x39,
0x79, 0x3E, 0x27, 0x64, 0x73, 0x27, 0x55, 0x47, 0x43, 0x27,
0x44, 0x39, 0x66, 0x6C, 0x23, 0x58, 0x6E, 0x7A, 0x4D, 0x37,
0x54, 0x7C, 0x7E, 0x6E, 0x6E, 0x6F, 0x35, 0x5D, 0x59, 0x41,
0x4A, 0x5D, 0x75, 0x59, 0x7B, 0x49, 0x30, 0x3B, 0x48, 0x2A,
0x77, 0x54, 0x29, 0x76, 0x2C, 0x6E, 0x2B, 0x3B, 0x29, 0x3A,
0x4F, 0x4C, 0x36, 0x26, 0x38, 0x4B, 0x26, 0x4E, 0x63, 0x3B,
0x53, 0x50, 0x6B, 0x77, 0x72, 0x2C, 0x27, 0x5E, 0x43, 0x62,
0x73, 0x7E, 0x60, 0x69, 0x60, 0x34, 0x6F, 0x52, 0x6A, 0x75,
0x3E, 0x58, 0x57, 0x2D, 0x40, 0x77, 0x5F, 0x3F, 0x75, 0x7C,
0x46, 0x50, 0x23, 0x3A, 0x6C, 0x2D, 0x49, 0x22, 0x57, 0x6E,
0x72, 0x34, 0x77, 0x6C, 0x76, 0x67, 0x67, 0x75, 0x50, 0x5E,
0x6A, 0x2F, 0x7E, 0x68, 0x69, 0x5D, 0x35, 0x46, 0x2D, 0x59,
0x35, 0x64, 0x4A, 0x74, 0x21, 0x6A, 0x32, 0x2A, 0x54, 0x6C,
0x58, 0x66, 0x36, 0x65, 0x78, 0x55, 0x56, 0x37, 0x65, 0x3C,
0x50, 0x41, 0x42, 0x79, 0x68, 0x41, 0x55, 0x40, 0x25, 0x42,
0x3B, 0x7B, 0x6B, 0x2D, 0x2C, 0x40, 0x4C, 0x49, 0x64, 0x6C,
0x3F, 0x4D, 0x6E, 0x7E, 0x30, 0x76, 0x48, 0x28, 0x76, 0x45,
0x70, 0x60, 0x31, 0x49, 0x40, 0x66, 0x5D, 0x40, 0x5D, 0x42,
0x4B, 0x4D, 0x56, 0x7A, 0x34, 0x3F, 0x5F, 0x29, 0x7A, 0x78,
0x2E, 0x59, 0x7C, 0x6D, 0x75, 0x48, 0x3E, 0x63, 0x62, 0x45,
0x40, 0x4A, 0x71, 0x47, 0x46, 0x42, 0x21, 0x3E, 0x27, 0x25,
0x34, 0x42, 0x7C, 0x7A, 0x21, 0x54, 0x30, 0x70, 0x5C, 0x29,
0x5D, 0x32, 0x67, 0x72, 0x56, 0x23, 0x28, 0x3B, 0x2E, 0x23,
0x31, 0x53, 0x77, 0x28, 0x4E, 0x38, 0x2A, 0x63, 0x71, 0x27,
0x47, 0x23, 0x52, 0x62, 0x32, 0x63, 0x73, 0x59, 0x44, 0x6C,
0x21, 0x53, 0x46, 0x25, 0x53, 0x5F, 0x25, 0x58, 0x24, 0x75,
0x7B, 0x25, 0x42, 0x6C, 0x53, 0x6F, 0x4D, 0x34, 0x28, 0x4E,
0x7D, 0x74, 0x3F, 0x7A, 0x21, 0x7E, 0x3D, 0x69, 0x66, 0x61,
0x7A, 0x2B, 0x37, 0x34, 0x57, 0x79, 0x40, 0x72, 0x7B, 0x4D,
0x29, 0x41, 0x4A, 0x73, 0x6D, 0x4D, 0x6A, 0x60, 0x4C, 0x41,
0x6C, 0x4F, 0x22, 0x7D, 0x5C, 0x62, 0x3E, 0x2B, 0x4A, 0x79,
0x22, 0x5C, 0x4D, 0x56, 0x2F, 0x36, 0x53, 0x61, 0x33, 0x22,
0x31, 0x49, 0x2A, 0x32, 0x5F, 0x61, 0x79, 0x72, 0x2D, 0x2F,
0x2A, 0x5B, 0x2D, 0x29, 0x4F, 0x2C, 0x2F, 0x61, 0x26, 0x4F,
0x78, 0x5A, 0x4A, 0x64, 0x23, 0x40, 0x24, 0x44, 0x22, 0x4C,
0x75, 0x57, 0x42, 0x29, 0x72, 0x50, 0x2F, 0x53, 0x46, 0x29,
0x62, 0x6E, 0x4E, 0x4A, 0x75, 0x29, 0x44, 0x5B, 0x3B, 0x69,
0x50, 0x6C, 0x7E, 0x34, 0x48, 0x76, 0x60, 0x2A, 0x68, 0x6B,
0x2B, 0x25, 0x7B, 0x7D, 0x6D, 0x6E, 0x38, 0x4D, 0x56, 0x65,
0x26, 0x7C, 0x6C, 0x25, 0x39, 0x2A, 0x33, 0x58, 0x73, 0x2B,
0x3A, 0x2C, 0x64, 0x3F, 0x48, 0x6F, 0x3A, 0x78, 0x61, 0x71,
0x60, 0x29, 0x27, 0x5F, 0x28, 0x70, 0x70, 0x76, 0x2F, 0x47,
0x5E, 0x4B, 0x3B, 0x54, 0x65, 0x65, 0x77, 0x2E, 0x57, 0x21,
0x47, 0x76, 0x44, 0x32, 0x49, 0x46, 0x3C, 0x23, 0x29, 0x4D,
0x49, 0x29, 0x43, 0x6D, 0x25, 0x24, 0x51, 0x25, 0x77, 0x62,
0x74, 0x6C, 0x6B, 0x37, 0x4F, 0x6D, 0x5D, 0x5F, 0x22, 0x40,
0x27, 0x32, 0x23, 0x50, 0x22, 0x33, 0x5F, 0x44, 0x71, 0x6B,
0x7B, 0x72, 0x36, 0x4F, 0x58, 0x60, 0x6F, 0x73, 0x44, 0x5E,
0x7A, 0x62, 0x7D, 0x50, 0x3C, 0x4D, 0x7D, 0x7E, 0x54, 0x23,
0x21, 0x60, 0x5A, 0x4B, 0x51, 0x77, 0x49, 0x5E, 0x6D, 0x79,
0x62, 0x4D, 0x3F, 0x6E, 0x62, 0x26, 0x24, 0x47, 0x6E, 0x25,
0x6F, 0x3B, 0x2D, 0x78, 0x77, 0x32, 0x6C, 0x7C, 0x72, 0x64,
0x44, 0x79, 0x4D, 0x42, 0x78, 0x70, 0x71, 0x31, 0x41, 0x48,
0x6C, 0x71, 0x78, 0x35, 0x67, 0x31, 0x4D, 0x5E, 0x38, 0x67,
0x2B, 0x34, 0x7D, 0x24, 0x23, 0x6F, 0x21, 0x5F, 0x32, 0x65,
0x7D, 0x7E, 0x7D, 0x49, 0x25, 0x37, 0x51, 0x2C, 0x64, 0x40,
0x6B, 0x37, 0x44, 0x61, 0x6A, 0x4D, 0x71, 0x28, 0x6F, 0x46,
0x48, 0x51, 0x35, 0x51, 0x29, 0x60, 0x79, 0x4D, 0x53, 0x28,
0x6D, 0x51, 0x7A, 0x2D, 0x5B, 0x52, 0x31, 0x54, 0x4F, 0x49,
0x28, 0x51, 0x43, 0x62, 0x7A, 0x3E, 0x50, 0x4B, 0x57, 0x70,
0x27, 0x3D, 0x27, 0x38, 0x6F, 0x5F, 0x79, 0x3A, 0x33, 0x57,
0x67, 0x61, 0x22, 0x79, 0x7B, 0x2F, 0x5D, 0x63, 0x49, 0x31,
0x28, 0x66, 0x73, 0x3B, 0x3B, 0x31, 0x36, 0x4E, 0x71, 0x4D,
0x40, 0x27, 0x48, 0x44, 0x3A, 0x43, 0x5D, 0x6F, 0x49, 0x75,
0x35, 0x2E, 0x5A, 0x7B, 0x46, 0x29, 0x3F, 0x29, 0x61, 0x6E,
0x5C, 0x35, 0x38, 0x62, 0x4E, 0x57, 0x5A, 0x6D, 0x23, 0x38,
0x69, 0x2E, 0x3F, 0x39, 0x30, 0x44, 0x56, 0x38, 0x5C, 0x41,
0x61, 0x3C, 0x3A, 0x25, 0x47, 0x76, 0x5B, 0x27, 0x4A, 0x7B,
0x54, 0x7A, 0x73, 0x37, 0x55, 0x25, 0x2B, 0x6F, 0x38, 0x52,
0x2F, 0x76, 0x57, 0x56, 0x57, 0x44, 0x5C, 0x7C, 0x29, 0x70,
0x36, 0x77, 0x52, 0x61, 0x21, 0x64, 0x42, 0x6A, 0x6D, 0x2D,
0x39, 0x2C, 0x3B, 0x5F, 0x42, 0x4B, 0x2C, 0x55, 0x7C, 0x5D,
0x2B, 0x4B, 0x30, 0x28, 0x25, 0x64, 0x6F, 0x51, 0x2C, 0x69,
0x34, 0x39, 0x26, 0x4E, 0x40, 0x37, 0x57, 0x26, 0x7A, 0x28,
0x3B, 0x5B, 0x7E, 0x41, 0x2B, 0x7C, 0x61, 0x3F, 0x62, 0x7C,
0x47, 0x4C, 0x34, 0x57, 0x71, 0x65, 0x65, 0x55, 0x46, 0x59,
0x26, 0x52, 0x48, 0x23, 0x31, 0x46, 0x6A, 0x39, 0x47, 0x67,
0x62, 0x32, 0x64, 0x5B, 0x60, 0x5A, 0x7A, 0x25, 0x3A, 0x7D,
0x49, 0x33, 0x3C, 0x6C, 0x41, 0x66, 0x36, 0x41, 0x37, 0x6A,
0x77, 0x79, 0x22, 0x3D, 0x5F, 0x4A, 0x59, 0x63, 0x55, 0x2A,
0x3E, 0x3C, 0x57, 0x45, 0x68, 0x2B, 0x28, 0x66, 0x26, 0x4B,
0x51, 0x71, 0x66, 0x39, 0x78, 0x70, 0x2C, 0x73, 0x50, 0x4C,
0x50, 0x43, 0x57, 0x66, 0x58, 0x6B, 0x2B, 0x57, 0x56, 0x36,
0x5A, 0x57, 0x6A, 0x34, 0x63, 0x7B, 0x65, 0x57, 0x77, 0x4F,
0x5F, 0x77, 0x29, 0x22, 0x37, 0x74, 0x6A, 0x68, 0x7D, 0x5C,
0x53, 0x72, 0x7A, 0x4A, 0x3C, 0x6F, 0x30, 0x75, 0x76, 0x52,
0x65, 0x7A, 0x48, 0x6D, 0x68, 0x7B, 0x25, 0x42, 0x3B, 0x6C,
0x2B, 0x41, 0x59, 0x49, 0x4F, 0x2B, 0x41, 0x5F, 0x5B, 0x60,
0x7C, 0x67, 0x74, 0x2C, 0x4C, 0x2A, 0x7A, 0x3C, 0x2A, 0x4F,
0x74, 0x58, 0x7E, 0x38, 0x32, 0x76, 0x53, 0x7A, 0x5B, 0x4D,
0x55, 0x28, 0x43, 0x72, 0x3C, 0x38, 0x79, 0x7E, 0x3F, 0x5F,
0x34, 0x2D, 0x3B, 0x7D, 0x46, 0x24, 0x2A, 0x7A, 0x3E, 0x7E,
0x33, 0x6D, 0x36, 0x5F, 0x21, 0x67, 0x57, 0x49, 0x66, 0x6C,
0x5C, 0x27, 0x48, 0x49, 0x25, 0x39, 0x23, 0x6B, 0x50, 0x30,
0x70, 0x30, 0x5E, 0x33, 0x46, 0x71, 0x5E, 0x5D, 0x3D, 0x35,
0x69, 0x4A, 0x36, 0x47, 0x7E, 0x3C, 0x61, 0x32, 0x60, 0x2A,
0x24, 0x78, 0x6D, 0x5B, 0x46, 0x3C, 0x2E, 0x27, 0x6B, 0x7B,
0x68, 0x6B, 0x4C, 0x3E, 0x7A, 0x40, 0x4A, 0x72, 0x49, 0x71,
0x77, 0x3D, 0x36, 0x77, 0x76, 0x2C, 0x65, 0x6F, 0x29, 0x72,
0x4D, 0x47, 0x7A, 0x4B, 0x5C, 0x77, 0x62, 0x60, 0x5C, 0x32,
0x32, 0x2F, 0x50, 0x2A, 0x3B, 0x2B, 0x79, 0x58, 0x58, 0x41,
0x3D, 0x72, 0x5D, 0x25, 0x48, 0x23, 0x61, 0x67, 0x61, 0x3C,
0x7B, 0x66, 0x57, 0x2F, 0x34, 0x70, 0x65, 0x7E, 0x6B, 0x43,
0x69, 0x45, 0x79, 0x75, 0x2E, 0x33, 0x39, 0x5D, 0x2F, 0x4B,
0x49, 0x2F, 0x53, 0x73, 0x2E, 0x31, 0x58, 0x55, 0x54, 0x56,
0x7A, 0x4E, 0x62, 0x2E, 0x26, 0x42, 0x2D, 0x26, 0x4E, 0x79,
0x32, 0x7D, 0x53, 0x40, 0x40, 0x3F, 0x71, 0x6B, 0x2D, 0x38,
0x51, 0x33, 0x33, 0x7C, 0x4D, 0x7E, 0x4F, 0x58, 0x6E, 0x6F,
0x5E, 0x50, 0x26, 0x2B, 0x7A, 0x5E, 0x54, 0x4C, 0x71, 0x35,
0x6C, 0x44, 0x6D, 0x49, 0x32, 0x39, 0x4B, 0x71, 0x3B, 0x5F,
0x45, 0x41, 0x39, 0x49, 0x59, 0x2E, 0x45, 0x60, 0x71, 0x67,
0x71, 0x70, 0x62, 0x76, 0x48, 0x4B, 0x68, 0x71, 0x2B, 0x42,
0x67, 0x7A, 0x5F, 0x4B, 0x76, 0x53, 0x78, 0x62, 0x5A, 0x72,
0x55, 0x48, 0x42, 0x48, 0x77, 0x2F, 0x37, 0x3D, 0x3C, 0x56,
0x61, 0x39, 0x56, 0x49, 0x47, 0x23, 0x33, 0x44, 0x29, 0x56,
0x61, 0x51, 0x50, 0x34, 0x6E, 0x5D, 0x4A, 0x4A, 0x37, 0x7A,
0x3F, 0x68, 0x7B, 0x4F, 0x72, 0x2A, 0x26, 0x6D, 0x53, 0x51,
0x45, 0x25, 0x50, 0x64, 0x53, 0x32, 0x24, 0x46, 0x65, 0x78,
0x64, 0x48, 0x7B, 0x70, 0x4E, 0x46, 0x27, 0x2D, 0x3A, 0x5F,
0x3B, 0x4F, 0x38, 0x29, 0x66, 0x70, 0x48, 0x4D, 0x2F, 0x29,
0x6F, 0x63, 0x31, 0x45, 0x42, 0x2B, 0x56, 0x6C, 0x6C, 0x60,
0x28, 0x7E, 0x77, 0x31, 0x21, 0x3F, 0x29, 0x6F, 0x34, 0x4A,
0x5C, 0x4A, 0x4E, 0x5E, 0x55, 0x26, 0x74, 0x44, 0x5B, 0x65,
0x2A, 0x6B, 0x7A, 0x48, 0x27, 0x62, 0x66, 0x3E, 0x5C, 0x36,
0x2A, 0x64, 0x5C, 0x66, 0x27, 0x3F, 0x49, 0x36, 0x75, 0x7D,
0x30, 0x3A, 0x2F, 0x7D, 0x79, 0x33, 0x72, 0x28, 0x4C, 0x61,
0x3A, 0x47, 0x56, 0x36, 0x33, 0x62, 0x4A, 0x70, 0x33, 0x71,
0x62, 0x22, 0x64, 0x39, 0x4F, 0x53, 0x51, 0x69, 0x6F, 0x26,
0x6E, 0x24, 0x39, 0x36, 0x3E, 0x25, 0x6B, 0x7C, 0x52, 0x70,
0x6D, 0x21, 0x36, 0x68, 0x50, 0x78, 0x4D, 0x3A, 0x24, 0x7D,
0x75, 0x6B, 0x40, 0x64, 0x2B, 0x5C, 0x3B, 0x7D, 0x64, 0x3F,
0x2E, 0x23, 0x28, 0x70, 0x77, 0x7D, 0x30, 0x78, 0x5E, 0x31,
0x39, 0x33, 0x3D, 0x56, 0x76, 0x4C, 0x6B, 0x44, 0x56, 0x4D,
0x4D, 0x55, 0x7B, 0x6A, 0x6D, 0x55, 0x62, 0x58, 0x54, 0x77,
0x7B, 0x5F, 0x5D, 0x6F, 0x56, 0x7E, 0x5B, 0x31, 0x65, 0x5E,
0x59, 0x7C, 0x35, 0x64, 0x45, 0x4D, 0x5D, 0x39, 0x2D, 0x44,
0x4A, 0x3B, 0x43, 0x2C, 0x35, 0x3E, 0x75, 0x6C, 0x67, 0x4D,
0x5C, 0x5A, 0x79, 0x5A, 0x6A, 0x22, 0x7D, 0x5B, 0x29, 0x30,
0x72, 0x27, 0x4F, 0x6B, 0x73, 0x51, 0x6B, 0x59, 0x58, 0x69,
0x23, 0x22, 0x47, 0x37, 0x39, 0x2F, 0x3E, 0x70, 0x44, 0x44,
0x67, 0x52, 0x65, 0x59, 0x34, 0x39, 0x26, 0x2B, 0x57, 0x6C,
0x7B, 0x68, 0x70, 0x66, 0x47, 0x41, 0x4A, 0x35, 0x31, 0x6E,
0x70, 0x74, 0x46, 0x48, 0x2E, 0x66, 0x34, 0x2F, 0x3C, 0x26,
0x2E, 0x41, 0x5A, 0x2A, 0x23, 0x54, 0x45, 0x25, 0x44, 0x2E,
0x4A, 0x60, 0x39, 0x52, 0x51, 0x31, 0x71, 0x3A, 0x2D, 0x5E,
0x36, 0x39, 0x5E, 0x61, 0x7C, 0x7D, 0x79, 0x2C, 0x4A, 0x49,
0x7B, 0x60, 0x3F, 0x4D, 0x5F, 0x77, 0x3C, 0x2D, 0x74, 0x42,
0x5B, 0x43, 0x51, 0x44, 0x7E, 0x39, 0x79, 0x2D, 0x72, 0x7A,
0x2C, 0x23, 0x2D, 0x7E, 0x43, 0x70, 0x3C, 0x57, 0x21, 0x30,
0x39, 0x67, 0x74, 0x27, 0x3E, 0x72, 0x4C, 0x77, 0x34, 0x74,
0x4F, 0x34, 0x6C, 0x55, 0x55, 0x61, 0x2A, 0x37, 0x67, 0x62,
0x6F, 0x43, 0x5C, 0x3A, 0x53, 0x46, 0x42, 0x2A, 0x55, 0x78,
0x25, 0x51, 0x3D, 0x31, 0x4C, 0x2B, 0x44, 0x5E, 0x74, 0x6B,
0x2C, 0x74, 0x3A, 0x70, 0x5D, 0x5D, 0x61, 0x34, 0x75, 0x78,
0x5F, 0x6D, 0x42, 0x4A, 0x65, 0x25, 0x2C, 0x2E, 0x76, 0x4A,
0x27, 0x63, 0x43, 0x48, 0x3A, 0x50, 0x4F, 0x40, 0x3A, 0x3D,
0x31, 0x51, 0x31, 0x41, 0x4F, 0x27, 0x6B, 0x5E, 0x7B, 0x2A,
0x7D, 0x6D, 0x4D, 0x7A, 0x3E, 0x2E, 0x2F, 0x4E, 0x42, 0x70,
0x48, 0x45, 0x70, 0x53, 0x23, 0x70, 0x6A, 0x32, 0x4C, 0x69,
0x57, 0x37, 0x32, 0x70, 0x5A, 0x35, 0x3E, 0x37, 0x41, 0x3C,
0x7D, 0x51, 0x34, 0x5E, 0x2F, 0x4C, 0x5E, 0x41, 0x5E, 0x4E,
0x23, 0x46, 0x3B, 0x6C, 0x48, 0x2A, 0x5D, 0x3D, 0x6F, 0x40,
0x7B, 0x24, 0x5B, 0x49, 0x4A, 0x7E, 0x27, 0x72, 0x4D, 0x40,
0x6F, 0x62, 0x37, 0x6E, 0x2C, 0x64, 0x35, 0x3F, 0x39, 0x7E,
0x5D, 0x6A, 0x42, 0x60, 0x44, 0x31, 0x52, 0x37, 0x7D, 0x30,
0x3C, 0x64, 0x53, 0x2C, 0x6B, 0x5B, 0x46, 0x70, 0x49, 0x2F,
0x76, 0x48, 0x71, 0x33, 0x6C, 0x6B, 0x25, 0x35, 0x4B, 0x3C,
0x74, 0x74, 0x54, 0x7D, 0x79, 0x73, 0x57, 0x63, 0x5C, 0x57,
0x67, 0x34, 0x34, 0x33, 0x6E, 0x4E, 0x44, 0x71, 0x7A, 0x7A,
0x2E, 0x24, 0x4F, 0x61, 0x63, 0x3B, 0x53, 0x36, 0x62, 0x7B,
0x23, 0x34, 0x59, 0x6D, 0x48, 0x41, 0x2B, 0x53, 0x4D, 0x51,
0x33, 0x37, 0x6D, 0x23, 0x6C, 0x4B, 0x68, 0x57, 0x56, 0x73,
0x73, 0x74, 0x6E, 0x31, 0x5A, 0x77, 0x4B, 0x39, 0x7B, 0x66,
0x5C, 0x4F, 0x6D, 0x5C, 0x2E, 0x54, 0x64, 0x76, 0x39, 0x48,
0x77, 0x33, 0x3F, 0x3B, 0x57, 0x26, 0x5D, 0x67, 0x6B, 0x37,
0x52, 0x2B, 0x60, 0x65, 0x6A, 0x4A, 0x56, 0x5F, 0x24, 0x5C,
0x7C, 0x36, 0x69, 0x27, 0x6F, 0x66, 0x29, 0x54, 0x45, 0x73,
0x21, 0x25, 0x63, 0x63, 0x66, 0x5B, 0x7C, 0x65, 0x39, 0x6D,
0x7C, 0x25, 0x46, 0x3D, 0x22, 0x55, 0x77, 0x58, 0x4A, 0x5C,
0x30, 0x69, 0x23, 0x64, 0x28, 0x39, 0x3E, 0x62, 0x3A, 0x53,
0x51, 0x51, 0x5D, 0x5B, 0x53, 0x70, 0x4F, 0x53, 0x5B, 0x66,
0x71, 0x45, 0x3D, 0x3C, 0x2D, 0x57, 0x31, 0x52, 0x55, 0x51,
0x6D, 0x3B, 0x4D, 0x79, 0x69, 0x34, 0x6D, 0x55, 0x30, 0x23,
0x71, 0x31, 0x43, 0x22, 0x76, 0x5A, 0x7E, 0x64, 0x4C, 0x6C,
0x4C, 0x48, 0x59, 0x43, 0x3B, 0x2E, 0x6E, 0x6E, 0x37, 0x3F,
0x5E, 0x79, 0x78, 0x49, 0x25, 0x71, 0x3F, 0x23, 0x37, 0x39,
0x40, 0x33, 0x53, 0x31, 0x66, 0x45, 0x38, 0x32, 0x3E, 0x27,
0x68, 0x3B, 0x48, 0x5A, 0x2E, 0x2E, 0x70, 0x69, 0x5B, 0x77,
0x2D, 0x45, 0x22, 0x42, 0x6A, 0x4A, 0x7B, 0x40, 0x56, 0x61,
0x7C, 0x2E, 0x76, 0x39, 0x59, 0x2C, 0x3D, 0x4F, 0x51, 0x31,
0x5A, 0x32, 0x54, 0x5A, 0x41, 0x2E, 0x78, 0x77, 0x6D, 0x21,
0x2E, 0x5A, 0x2C, 0x4A, 0x3C, 0x2E, 0x4B, 0x46, 0x6A, 0x6B,
0x65, 0x6D, 0x30, 0x43, 0x6B, 0x29, 0x2B, 0x7E, 0x7B, 0x29,
0x44, 0x37, 0x56, 0x44, 0x3C, 0x7D, 0x50, 0x3B, 0x58, 0x79,
0x76, 0x51, 0x36, 0x46, 0x69, 0x7C, 0x3A, 0x43, 0x4B, 0x34,
0x2F, 0x61, 0x5B, 0x2E, 0x2E, 0x69, 0x31, 0x2B, 0x39, 0x41,
0x75, 0x41, 0x3F, 0x6D, 0x4F, 0x53, 0x68, 0x38, 0x41, 0x27,
0x65, 0x57, 0x69, 0x22, 0x29, 0x50, 0x64, 0x59, 0x51, 0x61,
0x66, 0x50, 0x47, 0x57, 0x32, 0x22, 0x3E, 0x68, 0x31, 0x65,
0x35, 0x3B, 0x73, 0x63, 0x62, 0x75, 0x5A, 0x42, 0x38, 0x7C,
0x3F, 0x55, 0x65, 0x4D, 0x44, 0x25, 0x37, 0x2A, 0x25, 0x36,
0x6D, 0x48, 0x79, 0x3D, 0x65, 0x34, 0x5B, 0x61, 0x3F, 0x48,
0x79, 0x41, 0x4B, 0x60, 0x5E, 0x59, 0x7A, 0x4F, 0x24, 0x26,
0x21, 0x57, 0x53, 0x66, 0x29, 0x3D, 0x74, 0x48, 0x70, 0x62,
0x64, 0x62, 0x51, 0x66, 0x50, 0x2D, 0x4A, 0x44, 0x5D, 0x26,
0x5D, 0x55, 0x4B, 0x77, 0x7C, 0x27, 0x65, 0x41, 0x4A, 0x4A,
0x70, 0x48, 0x66, 0x3B, 0x40, 0x51, 0x58, 0x4D, 0x66, 0x36,
0x6B, 0x52, 0x41, 0x6B, 0x56, 0x33, 0x64, 0x24, 0x7B, 0x6A,
0x68, 0x65, 0x2C, 0x66, 0x5B, 0x3C, 0x4C, 0x26, 0x34, 0x5E,
0x67, 0x30, 0x56, 0x78, 0x54, 0x73, 0x7A, 0x79, 0x3E, 0x71,
0x40, 0x33, 0x46, 0x3E, 0x73, 0x46, 0x51, 0x4D, 0x72, 0x28,
0x33, 0x27, 0x3E, 0x24, 0x51, 0x2B, 0x32, 0x7C, 0x4C, 0x25,
0x4F, 0x4A, 0x6C, 0x50, 0x49, 0x3E, 0x3D, 0x7B, 0x72, 0x32,
0x58, 0x61, 0x4E, 0x7B, 0x68, 0x7A, 0x7C, 0x48, 0x22, 0x4D,
0x7E, 0x46, 0x42, 0x58, 0x44, 0x25, 0x5A, 0x31, 0x36, 0x70,
0x25, 0x4F, 0x6D, 0x31, 0x3F, 0x6E, 0x51, 0x6B, 0x6E, 0x3F,
0x75, 0x7C, 0x5A, 0x3E, 0x52, 0x57, 0x30, 0x79, 0x35, 0x2A,
0x70, 0x7D, 0x24, 0x24, 0x3C, 0x77, 0x38, 0x6D, 0x65, 0x6F,
0x36, 0x35, 0x23, 0x49, 0x67, 0x73, 0x69, 0x22, 0x65, 0x55,
0x7D, 0x2B, 0x28, 0x7E, 0x3D, 0x7D, 0x70, 0x38, 0x71, 0x6C,
0x7C, 0x49, 0x3A, 0x33, 0x2D, 0x47, 0x5F, 0x71, 0x44, 0x79,
0x4F, 0x59, 0x60, 0x41, 0x7A, 0x36, 0x2C, 0x58, 0x4C, 0x60,
0x79, 0x53, 0x52, 0x68, 0x38, 0x45, 0x2F, 0x40, 0x44, 0x2A,
0x46, 0x7A, 0x56, 0x3C, 0x7A, 0x55, 0x6F, 0x5E, 0x58, 0x79,
0x71, 0x6B, 0x2C, 0x41, 0x51, 0x42, 0x37, 0x41, 0x43, 0x44,
0x26, 0x60, 0x47, 0x2B, 0x5B, 0x35, 0x30, 0x5D, 0x3B, 0x5C,
0x74, 0x56, 0x40, 0x6F, 0x23, 0x41, 0x58, 0x52, 0x30, 0x21,
0x68, 0x3E, 0x26, 0x21, 0x3D, 0x64, 0x2D, 0x3B, 0x2E, 0x4D,
0x41, 0x4F, 0x7A, 0x37, 0x52, 0x63, 0x59, 0x22, 0x41, 0x66,
0x4E, 0x28, 0x6D, 0x25, 0x2C, 0x50, 0x67, 0x3E, 0x66, 0x70,
0x69, 0x49, 0x2A, 0x2E, 0x35, 0x28, 0x78, 0x5C, 0x66, 0x31,
0x55, 0x6A, 0x36, 0x74, 0x28, 0x5D, 0x46, 0x52, 0x69, 0x5A,
0x39, 0x30, 0x38, 0x43, 0x30, 0x39, 0x66, 0x6D, 0x48, 0x5D,
0x2E, 0x63, 0x77, 0x21, 0x68, 0x67, 0x21, 0x6E, 0x5C, 0x6D,
0x2A, 0x4A, 0x38, 0x5B, 0x66, 0x76, 0x25, 0x59, 0x27, 0x7A,
0x6C, 0x7D, 0x70, 0x5D, 0x59, 0x47, 0x36, 0x62, 0x32, 0x52,
0x22, 0x6D, 0x65, 0x41, 0x38, 0x2C, 0x72, 0x7E, 0x2D, 0x50,
0x7A, 0x52, 0x37, 0x50, 0x52, 0x7A, 0x5B, 0x73, 0x23, 0x44,
0x38, 0x46, 0x50, 0x71, 0x59, 0x63, 0x65, 0x33, 0x60, 0x27,
0x3F, 0x23, 0x2F, 0x25, 0x23, 0x7A, 0x53, 0x5A, 0x29, 0x44,
0x31, 0x39, 0x69, 0x46, 0x35, 0x60, 0x48, 0x76, 0x5C, 0x4E,
0x54, 0x31, 0x5A, 0x74, 0x5E, 0x2B, 0x59, 0x63, 0x7A, 0x6D,
0x43, 0x27, 0x27, 0x7A, 0x5C, 0x26, 0x66, 0x48, 0x65, 0x5B,
0x5C, 0x34, 0x3B, 0x74, 0x5A, 0x5A, 0x71, 0x7D, 0x6A, 0x28,
0x23, 0x26, 0x73, 0x79, 0x63, 0x78, 0x61, 0x70, 0x35, 0x2B,
0x50, 0x64, 0x69, 0x60, 0x48, 0x3D, 0x61, 0x3B, 0x71, 0x7A,
0x27, 0x4B, 0x7C, 0x30, 0x77, 0x72, 0x6F, 0x49, 0x60, 0x7D,
0x24, 0x27, 0x60, 0x29, 0x33, 0x3B, 0x32, 0x53, 0x4D, 0x60,
0x27, 0x5F, 0x58, 0x35, 0x63, 0x7E, 0x38, 0x59, 0x49, 0x79,
0x4C, 0x2F, 0x70, 0x30, 0x24, 0x5F, 0x3B, 0x32, 0x49, 0x37,
0x49, 0x7D, 0x5C, 0x4E, 0x47, 0x31, 0x24, 0x26, 0x34, 0x5F,
0x5E, 0x45, 0x52, 0x58, 0x73, 0x73, 0x22, 0x2D, 0x49, 0x27,
0x59, 0x24, 0x21, 0x48, 0x4F, 0x25, 0x2D, 0x59, 0x76, 0x61,
0x6D, 0x29, 0x21, 0x6F, 0x4D, 0x5C, 0x26, 0x78, 0x26, 0x3C,
0x5F, 0x78, 0x49, 0x51, 0x79, 0x2E, 0x50, 0x59, 0x28, 0x74,
0x2E, 0x76, 0x6F, 0x3B, 0x2B, 0x41, 0x56, 0x70, 0x70, 0x76,
0x40, 0x65, 0x58, 0x64, 0x65, 0x45, 0x7A, 0x40, 0x6A, 0x58,
0x5E, 0x53, 0x3D, 0x38, 0x72, 0x75, 0x5E, 0x46, 0x2A, 0x4B,
0x48, 0x5F, 0x77, 0x6B, 0x24, 0x77, 0x57, 0x6C, 0x77, 0x2F,
0x44, 0x2C, 0x27, 0x64, 0x5D, 0x69, 0x3B, 0x45, 0x47, 0x65,
0x75, 0x4F, 0x5B, 0x69, 0x5E, 0x4E, 0x32, 0x22, 0x31, 0x53,
0x56, 0x56, 0x45, 0x77, 0x46, 0x62, 0x6F, 0x2F, 0x48, 0x6A,
0x6B, 0x38, 0x25, 0x7B, 0x22, 0x62, 0x58, 0x45, 0x3B, 0x32,
0x60, 0x38, 0x2F, 0x2B, 0x48, 0x3A, 0x6A, 0x3E, 0x75, 0x22,
0x38, 0x2D, 0x2F, 0x74, 0x5A, 0x32, 0x6F, 0x2B, 0x35, 0x69,
0x6B, 0x33, 0x6C, 0x67, 0x76, 0x39, 0x54, 0x5D, 0x45, 0x44,
0x4E, 0x37, 0x37, 0x31, 0x27, 0x7D, 0x4F, 0x5F, 0x4D, 0x47,
0x7E, 0x24, 0x73, 0x2B, 0x68, 0x54, 0x7D, 0x73, 0x25, 0x65,
0x3A, 0x5C, 0x3C, 0x7E, 0x62, 0x73, 0x6E, 0x7C, 0x40, 0x39,
0x75, 0x6B, 0x59, 0x42, 0x24, 0x21, 0x70, 0x75, 0x58, 0x56,
0x5B, 0x2F, 0x77, 0x6B, 0x5E, 0x67, 0x6D, 0x46, 0x54, 0x7E,
0x38, 0x39, 0x3F, 0x54, 0x7D, 0x40, 0x70, 0x53, 0x77, 0x33,
0x7C, 0x2B, 0x62, 0x4D, 0x41, 0x7A, 0x4A, 0x37, 0x2B, 0x4A,
0x4C, 0x21, 0x60, 0x42, 0x6D, 0x30, 0x59, 0x2B, 0x3D, 0x30,
0x22, 0x28, 0x6E, 0x72, 0x41, 0x41, 0x33, 0x2F, 0x59, 0x6D,
0x64, 0x57, 0x65, 0x70, 0x49, 0x7D, 0x36, 0x56, 0x72, 0x7C,
0x58, 0x71, 0x4C, 0x77, 0x3A, 0x6C, 0x50, 0x61, 0x22, 0x6A,
0x6A, 0x62, 0x64, 0x6A, 0x65, 0x51, 0x43, 0x3F, 0x64, 0x63,
0x5C, 0x6E, 0x5C, 0x50, 0x4F, 0x2B, 0x27, 0x66, 0x69, 0x47,
0x30, 0x3E, 0x77, 0x78, 0x66, 0x2B, 0x67, 0x3A, 0x46, 0x3D,
0x40, 0x78, 0x28, 0x2E, 0x3D, 0x51, 0x56, 0x44, 0x72, 0x2E,
0x6C, 0x25, 0x67, 0x52, 0x64, 0x23, 0x36, 0x69, 0x4A, 0x2A,
0x25, 0x30, 0x61, 0x3B, 0x4D, 0x2C, 0x49, 0x59, 0x72, 0x5F,
0x54, 0x3D, 0x41, 0x23, 0x3C, 0x27, 0x34, 0x59, 0x56, 0x78,
0x4F, 0x64, 0x69, 0x5B, 0x26, 0x69, 0x2B, 0x7B, 0x41, 0x32,
0x60, 0x74, 0x6B, 0x27, 0x38, 0x35, 0x58, 0x6B, 0x45, 0x3A,
0x75, 0x33, 0x57, 0x46, 0x4F, 0x7D, 0x47, 0x5E, 0x45, 0x3E,
0x70, 0x52, 0x4B, 0x3D, 0x47, 0x3A, 0x53, 0x3D, 0x67, 0x34,
0x49, 0x66, 0x33, 0x6A, 0x6A, 0x60, 0x72, 0x6F, 0x4D, 0x2F,
0x6A, 0x35, 0x5F, 0x24, 0x4F, 0x35, 0x3F, 0x43, 0x77, 0x45,
0x34, 0x3F, 0x43, 0x45, 0x7B, 0x6F, 0x7B, 0x25, 0x21, 0x31,
0x44, 0x5B, 0x48, 0x62, 0x51, 0x72, 0x42, 0x73, 0x74, 0x4A,
0x44, 0x5C, 0x7C, 0x68, 0x37, 0x78, 0x47, 0x5D, 0x2C, 0x43,
0x55, 0x21, 0x62, 0x29, 0x50, 0x2F, 0x7B, 0x71, 0x61, 0x74,
0x6C, 0x4A, 0x76, 0x2A, 0x6A, 0x32, 0x3A, 0x6A, 0x63, 0x64,
0x60, 0x61, 0x2F, 0x67, 0x5C, 0x63, 0x26, 0x4E, 0x53, 0x73,
0x76, 0x73, 0x75, 0x23, 0x7E, 0x21, 0x3F, 0x2E, 0x2C, 0x26,
0x40, 0x56, 0x71, 0x2A, 0x6B, 0x62, 0x7D, 0x3C, 0x5D, 0x28,
0x60, 0x27, 0x3B, 0x43, 0x43, 0x51, 0x71, 0x77, 0x44, 0x57,
0x75, 0x42, 0x21, 0x6A, 0x68, 0x41, 0x4C, 0x62, 0x7B, 0x55,
0x35, 0x6B, 0x35, 0x36, 0x77, 0x58, 0x61, 0x2E, 0x21, 0x6D,
0x26, 0x71, 0x3B, 0x46, 0x21, 0x23, 0x4E, 0x59, 0x3C, 0x40,
0x40, 0x46, 0x68, 0x25, 0x37, 0x63, 0x23, 0x37, 0x64, 0x6F,
0x68, 0x4D, 0x7A, 0x27, 0x51, 0x50, 0x6F, 0x3A, 0x62, 0x7A,
0x63, 0x38, 0x38, 0x6B, 0x3D, 0x48, 0x56, 0x76, 0x4A, 0x5B,
0x68, 0x5B, 0x73, 0x59, 0x62, 0x42, 0x77, 0x78, 0x3A, 0x52,
0x44, 0x78, 0x58, 0x53, 0x30, 0x7C, 0x54, 0x2E, 0x33, 0x46,
0x3C, 0x77, 0x43, 0x62, 0x6A, 0x33, 0x4D, 0x37, 0x33, 0x5F,
0x3D, 0x6E, 0x65, 0x21, 0x4C, 0x61, 0x5C, 0x65, 0x55, 0x64,
0x6D, 0x41, 0x37, 0x35, 0x55, 0x3C, 0x71, 0x71, 0x5C, 0x7D,
0x76, 0x33, 0x4C, 0x75, 0x49, 0x25, 0x70, 0x25, 0x34, 0x27,
0x50, 0x76, 0x65, 0x75, 0x55, 0x66, 0x26, 0x62, 0x7B, 0x2D,
0x69, 0x6C, 0x5E, 0x54, 0x2B, 0x7B, 0x3D, 0x40, 0x3D, 0x2F,
0x2B, 0x62, 0x7D, 0x5A, 0x21, 0x6B, 0x3D, 0x3A, 0x72, 0x7D,
0x5F, 0x6C, 0x7E, 0x57, 0x6D, 0x2C, 0x4B, 0x29, 0x31, 0x53,
0x45, 0x49, 0x40, 0x78, 0x6D, 0x7C, 0x46, 0x43, 0x5A, 0x34,
0x31, 0x67, 0x50, 0x28, 0x72, 0x21, 0x2C, 0x5F, 0x52, 0x31,
0x5D, 0x3E, 0x50, 0x5E, 0x40, 0x4F, 0x2B, 0x26, 0x51, 0x43,
0x5B, 0x5D, 0x74, 0x79, 0x3B, 0x2B, 0x2B, 0x2A, 0x25, 0x5D,
0x5A, 0x7E, 0x70, 0x3F, 0x76, 0x26, 0x29, 0x51, 0x69, 0x53,
0x3A, 0x51, 0x72, 0x49, 0x64, 0x63, 0x3E, 0x6C, 0x66, 0x38,
0x6A, 0x68, 0x62, 0x2C, 0x4D, 0x70, 0x7C, 0x25, 0x5D, 0x33,
0x52, 0x7D, 0x5B, 0x4E, 0x6E, 0x4C, 0x45, 0x4D, 0x49, 0x46,
0x3E, 0x23, 0x72, 0x4D, 0x71, 0x5F, 0x40, 0x78, 0x27, 0x21,
0x4E, 0x3E, 0x5A, 0x59, 0x48, 0x6B, 0x30, 0x4A, 0x65, 0x48,
0x4F, 0x65, 0x6C, 0x6F, 0x31, 0x59, 0x2A, 0x34, 0x7E, 0x29,
0x63, 0x48, 0x24, 0x31, 0x7B, 0x25, 0x2B, 0x28, 0x39, 0x21,
0x45, 0x4E, 0x2F, 0x3B, 0x48, 0x62, 0x5A, 0x25, 0x2B, 0x73,
0x5E, 0x46, 0x74, 0x32, 0x5B, 0x4D, 0x3A, 0x7B, 0x50, 0x4F,
0x47, 0x33, 0x6E, 0x79, 0x49, 0x7C, 0x23, 0x23, 0x33, 0x51,
0x23, 0x65, 0x39, 0x32, 0x2B, 0x58, 0x30, 0x49, 0x39, 0x2A,
0x5D, 0x43, 0x2A, 0x62, 0x67, 0x64, 0x66, 0x36, 0x54, 0x41,
0x72, 0x46, 0x7C, 0x67, 0x2B, 0x31, 0x72, 0x43, 0x49, 0x3A,
0x25, 0x6F, 0x78, 0x3E, 0x57, 0x4A, 0x57, 0x2C, 0x55, 0x22,
0x5E, 0x5B, 0x52, 0x56, 0x7B, 0x7C, 0x64, 0x37, 0x72, 0x46,
0x56, 0x51, 0x48, 0x75, 0x22, 0x73, 0x75, 0x34, 0x62, 0x3D,
0x23, 0x48, 0x58, 0x43, 0x5F, 0x45, 0x58, 0x78, 0x73, 0x59,
0x74, 0x3F, 0x41, 0x45, 0x46, 0x3A, 0x3C, 0x5F, 0x3A, 0x79,
0x3F, 0x5B, 0x35, 0x36, 0x4F, 0x34, 0x3B, 0x43, 0x25, 0x54,
0x5B, 0x74, 0x24, 0x49, 0x26, 0x2C, 0x46, 0x59, 0x70, 0x75,
0x5D, 0x24, 0x61, 0x36, 0x5E, 0x4A, 0x27, 0x70, 0x69, 0x7E,
0x60, 0x25, 0x59, 0x42, 0x4D, 0x71, 0x72, 0x3F, 0x54, 0x3B,
0x41, 0x38, 0x57, 0x26, 0x5D, 0x6F, 0x26, 0x70, 0x2C, 0x45,
0x54, 0x3E, 0x4A, 0x65, 0x3C, 0x6B, 0x67, 0x5B, 0x78, 0x24,
0x58, 0x43, 0x51, 0x4E, 0x50, 0x57, 0x76, 0x39, 0x3E, 0x2B,
0x6C, 0x7D, 0x33, 0x76, 0x49, 0x5B, 0x5F, 0x33, 0x6E, 0x3C,
0x64, 0x33, 0x5E, 0x76, 0x22, 0x4C, 0x38, 0x2C, 0x57, 0x23,
0x50, 0x28, 0x76, 0x70, 0x25, 0x6E, 0x68, 0x4F, 0x25, 0x62,
0x4E, 0x49, 0x27, 0x6F, 0x3C, 0x73, 0x6F, 0x59, 0x39, 0x50,
0x4D, 0x7B, 0x60, 0x22, 0x58, 0x49, 0x61, 0x6F, 0x62, 0x77,
0x50, 0x72, 0x68, 0x60, 0x57, 0x35, 0x64, 0x39, 0x2A, 0x63,
0x27, 0x35, 0x52, 0x2E, 0x24, 0x3E, 0x49, 0x26, 0x76, 0x47,
0x57, 0x28, 0x61, 0x6E, 0x2C, 0x5C, 0x7E, 0x40, 0x50, 0x42,
0x31, 0x75, 0x79, 0x63, 0x34, 0x68, 0x2C, 0x4A, 0x42, 0x54,
0x6B, 0x68, 0x6A, 0x6A, 0x35, 0x4E, 0x5E, 0x37, 0x7C, 0x7A,
0x73, 0x45, 0x50, 0x7D, 0x5C, 0x50, 0x6C, 0x23, 0x43, 0x78,
0x6D, 0x25, 0x7A, 0x42, 0x51, 0x31, 0x3B, 0x6D, 0x46, 0x7A,
0x4E, 0x32, 0x59, 0x72, 0x3B, 0x39, 0x35, 0x5B, 0x61, 0x57,
0x45, 0x37, 0x3E, 0x67, 0x27, 0x45, 0x47, 0x6A, 0x26, 0x7E,
0x41, 0x77, 0x30, 0x69, 0x4D, 0x41, 0x69, 0x2B, 0x26, 0x46,
0x4B, 0x37, 0x3B, 0x36, 0x4F, 0x74, 0x52, 0x70, 0x57, 0x33,
0x44, 0x28, 0x45, 0x2B, 0x51, 0x50, 0x24, 0x42, 0x30, 0x70,
0x71, 0x7A, 0x78, 0x3B, 0x7A, 0x60, 0x2A, 0x31, 0x7D, 0x46,
0x33, 0x65, 0x4C, 0x42, 0x40, 0x60, 0x48, 0x78, 0x2D, 0x31,
0x3E, 0x4B, 0x2A, 0x43, 0x7A, 0x53, 0x31, 0x24, 0x5F, 0x5D,
0x31, 0x63, 0x42, 0x2C, 0x7D, 0x40, 0x47, 0x3C, 0x42, 0x2F,
0x2E, 0x36, 0x5A, 0x6E, 0x6D, 0x3D, 0x2D, 0x51, 0x63, 0x79,
0x58, 0x49, 0x68, 0x6F, 0x3D, 0x4E, 0x71, 0x65, 0x73, 0x57,
0x65, 0x72, 0x3A, 0x33, 0x4A, 0x56, 0x73, 0x43, 0x22, 0x5D,
0x46, 0x34, 0x6F, 0x5A, 0x77, 0x40, 0x37, 0x26, 0x73, 0x74,
0x7C, 0x66, 0x32, 0x24, 0x69, 0x2C, 0x4F, 0x61, 0x4F, 0x47,
0x7A, 0x53, 0x6E, 0x2A, 0x4E, 0x5B, 0x4B, 0x4B, 0x2C, 0x42,
0x5F, 0x23, 0x21, 0x32, 0x4F, 0x2F, 0x7D, 0x24, 0x2A, 0x22,
0x3C, 0x3F, 0x7B, 0x38, 0x43, 0x32, 0x4E, 0x45, 0x5E, 0x6A,
0x26, 0x7E, 0x2D, 0x62, 0x50, 0x74, 0x7E, 0x61, 0x3E, 0x2D,
0x55, 0x26, 0x44, 0x7E, 0x39, 0x6F, 0x5E, 0x21, 0x4A, 0x4B,
0x27, 0x6C, 0x2B, 0x4B, 0x22, 0x66, 0x47, 0x62, 0x77, 0x2C,
0x57, 0x5B, 0x74, 0x7A, 0x61, 0x71, 0x6F, 0x3D, 0x41, 0x33,
0x4C, 0x31, 0x7D, 0x25, 0x67, 0x35, 0x44, 0x5D, 0x6D, 0x3D,
0x3E, 0x34, 0x56, 0x41, 0x52, 0x47, 0x6B, 0x7D, 0x78, 0x75,
0x61, 0x44, 0x51, 0x26, 0x29, 0x64, 0x2E, 0x7B, 0x4A, 0x6E,
0x22, 0x23, 0x64, 0x74, 0x50, 0x78, 0x29, 0x66, 0x34, 0x37,
0x4F, 0x40, 0x72, 0x39, 0x54, 0x6F, 0x76, 0x45, 0x53, 0x4D,
0x6C, 0x48, 0x3A, 0x3E, 0x5F, 0x72, 0x69, 0x50, 0x28, 0x56,
0x2F, 0x79, 0x75, 0x2A, 0x41, 0x30, 0x54, 0x3F, 0x58, 0x5F,
0x3A, 0x71, 0x23, 0x34, 0x72, 0x62, 0x39, 0x73, 0x35, 0x28,
0x4A, 0x3C, 0x42, 0x76, 0x7C, 0x6D, 0x39, 0x6F, 0x46, 0x51,
0x27, 0x2D, 0x67, 0x4D, 0x48, 0x5D, 0x59, 0x34, 0x70, 0x4C,
0x73, 0x42, 0x25, 0x39, 0x3D, 0x23, 0x3B, 0x4D, 0x34, 0x72,
0x40, 0x36, 0x41, 0x6C, 0x71, 0x6B, 0x4A, 0x69, 0x7C, 0x39,
0x6D, 0x3A, 0x7E, 0x3C, 0x3A, 0x62, 0x69, 0x42, 0x24, 0x3F,
0x6B, 0x72, 0x66, 0x4F, 0x39, 0x5B, 0x4A, 0x5E, 0x49, 0x6B,
0x42, 0x78, 0x78, 0x29, 0x68, 0x30, 0x78, 0x6C, 0x49, 0x47,
0x78, 0x66, 0x44, 0x3B, 0x22, 0x39, 0x78, 0x5A, 0x79, 0x65,
0x7E, 0x63, 0x54, 0x44, 0x78, 0x65, 0x51, 0x44, 0x64, 0x4D,
0x6C, 0x53, 0x4C, 0x34, 0x75, 0x60, 0x5C, 0x61, 0x7B, 0x6F,
0x22, 0x6B, 0x2A, 0x41, 0x4B, 0x74, 0x7E, 0x31, 0x76, 0x3C,
0x6D, 0x52, 0x66, 0x55, 0x37, 0x46, 0x56, 0x3D, 0x46, 0x23,
0x33, 0x37, 0x56, 0x30, 0x6D, 0x22, 0x62, 0x27, 0x56, 0x58,
0x33, 0x35, 0x41, 0x36, 0x5B, 0x32, 0x41, 0x78, 0x3F, 0x2D,
0x76, 0x3F, 0x29, 0x42, 0x34, 0x31, 0x7B, 0x41, 0x66, 0x69,
0x38, 0x27, 0x7B, 0x36, 0x44, 0x29, 0x46, 0x53, 0x6D, 0x3B,
0x42, 0x38, 0x2E, 0x57, 0x38, 0x6D, 0x53, 0x4B, 0x7C, 0x4A,
0x4F, 0x50, 0x67, 0x36, 0x6E, 0x6E, 0x61, 0x76, 0x4C, 0x22,
0x4E, 0x69, 0x54, 0x48, 0x5B, 0x7E, 0x54, 0x63, 0x7E, 0x36,
0x46, 0x50, 0x66, 0x25, 0x5C, 0x60, 0x22, 0x36, 0x7B, 0x2E,
0x47, 0x5F, 0x3D, 0x73, 0x31, 0x60, 0x56, 0x43, 0x35, 0x25,
0x3D, 0x61, 0x49, 0x23, 0x27, 0x27, 0x54, 0x2E, 0x66, 0x3F,
0x28, 0x71, 0x2F, 0x5D, 0x3F, 0x59, 0x79, 0x57, 0x75, 0x79,
0x76, 0x77, 0x35, 0x35, 0x30, 0x4E, 0x2C, 0x34, 0x2A, 0x26,
0x6F, 0x68, 0x2C, 0x34, 0x4F, 0x6F, 0x62, 0x64, 0x40, 0x41,
0x62, 0x59, 0x68, 0x5D, 0x33, 0x77, 0x61, 0x5C, 0x7D, 0x2C,
0x4C, 0x33, 0x6B, 0x39, 0x6C, 0x2C, 0x4F, 0x2C, 0x22, 0x3E,
0x7E, 0x74, 0x7E, 0x59, 0x6C, 0x39, 0x3E, 0x55, 0x21, 0x4B,
0x72, 0x50, 0x70, 0x2D, 0x64, 0x21, 0x7A, 0x50, 0x22, 0x38,
0x7D, 0x66, 0x79, 0x60, 0x73, 0x50, 0x72, 0x36, 0x4F, 0x50,
0x2E, 0x26, 0x69, 0x4B, 0x5B, 0x58, 0x2F, 0x67, 0x55, 0x66,
0x30, 0x6F, 0x68, 0x5C, 0x5A, 0x46, 0x4F, 0x47, 0x44, 0x21,
0x62, 0x39, 0x56, 0x2C, 0x37, 0x3E, 0x7A, 0x26, 0x74, 0x56,
0x62, 0x28, 0x36, 0x40, 0x2F, 0x3C, 0x68, 0x60, 0x53, 0x6B,
0x45, 0x7E, 0x31, 0x76, 0x4D, 0x5C, 0x39, 0x3C, 0x66, 0x3E,
0x7A, 0x6D, 0x33, 0x74, 0x3D, 0x44, 0x78, 0x29, 0x2B, 0x57,
0x4C, 0x42, 0x50, 0x33, 0x33, 0x31, 0x3B, 0x4F, 0x55, 0x58,
0x6E, 0x74, 0x79, 0x30, 0x3D, 0x65, 0x59, 0x4A, 0x6D, 0x23,
0x38, 0x32, 0x61, 0x5D, 0x52, 0x7B, 0x74, 0x69, 0x36, 0x62,
0x26, 0x30, 0x3A, 0x7E, 0x36, 0x54, 0x24, 0x64, 0x78, 0x2C,
0x44, 0x28, 0x24, 0x49, 0x38, 0x69, 0x25, 0x4C, 0x32, 0x5E,
0x5C, 0x61, 0x76, 0x36, 0x4A, 0x6E, 0x2B, 0x3D, 0x57, 0x21,
0x58, 0x52, 0x48, 0x5A, 0x66, 0x2C, 0x55, 0x3B, 0x23, 0x3B,
0x5F, 0x6D, 0x25, 0x74, 0x40, 0x48, 0x7A, 0x47, 0x4A, 0x48,
0x79, 0x27, 0x6E, 0x22, 0x4E, 0x6E, 0x78, 0x39, 0x24, 0x5F,
0x3D, 0x5C, 0x55, 0x69, 0x74, 0x2E, 0x7B, 0x54, 0x3B, 0x65,
0x74, 0x48, 0x22, 0x27, 0x60, 0x6A, 0x5E, 0x53, 0x73, 0x7E,
0x4D, 0x24, 0x72, 0x72, 0x3F, 0x28, 0x59, 0x3A, 0x6C, 0x7A,
0x6A, 0x7B, 0x4B, 0x3F, 0x6C, 0x41, 0x5B, 0x49, 0x79, 0x79,
0x4B, 0x3B, 0x4C, 0x4C, 0x6B, 0x6F, 0x6C, 0x24, 0x63, 0x2E,
0x34, 0x6A, 0x4E, 0x60, 0x50, 0x39, 0x4D, 0x27, 0x6B, 0x53,
0x2D, 0x59, 0x5B, 0x22, 0x5E, 0x28, 0x72, 0x6A, 0x62, 0x21,
0x39, 0x61, 0x7E, 0x6F, 0x2D, 0x7C, 0x38, 0x4F, 0x37, 0x43,
0x4A, 0x51, 0x49, 0x45, 0x7A, 0x33, 0x29, 0x2A, 0x5F, 0x72,
0x4E, 0x77, 0x50, 0x3A, 0x73, 0x52, 0x6E, 0x4A, 0x41, 0x37,
0x5B, 0x78, 0x2C, 0x71, 0x50, 0x65, 0x25, 0x60, 0x26, 0x7E,
0x23, 0x3D, 0x6E, 0x5B, 0x21, 0x59, 0x6F, 0x29, 0x53, 0x3B,
0x31, 0x72, 0x62, 0x3A, 0x74, 0x57, 0x2E, 0x76, 0x78, 0x4F,
0x46, 0x24, 0x71, 0x64, 0x2D, 0x50, 0x26, 0x50, 0x3C, 0x47,
0x74, 0x36, 0x50, 0x38, 0x37, 0x31, 0x5A, 0x77, 0x21, 0x62,
0x76, 0x73, 0x69, 0x7D, 0x6C, 0x65, 0x4B, 0x55, 0x39, 0x36,
0x69, 0x2D, 0x69, 0x3D, 0x60, 0x50, 0x65, 0x4E, 0x54, 0x49,
0x69, 0x7C, 0x37, 0x54, 0x2D, 0x46, 0x29, 0x65, 0x50, 0x6F,
0x2B, 0x6B, 0x24, 0x30, 0x25, 0x47, 0x41, 0x43, 0x7E, 0x2A,
0x71, 0x5C, 0x58, 0x25, 0x6B, 0x72, 0x58, 0x6A, 0x23, 0x2E,
0x30, 0x45, 0x74, 0x4F, 0x69, 0x6E, 0x65, 0x2F, 0x6B, 0x76,
0x21, 0x52, 0x57, 0x3D, 0x67, 0x4A, 0x6E, 0x45, 0x4F, 0x6E,
0x7D, 0x6B, 0x3A, 0x66, 0x2E, 0x24, 0x24, 0x72, 0x49, 0x60,
0x7A, 0x6E, 0x64, 0x46, 0x31, 0x72, 0x70, 0x2F, 0x6F, 0x73,
0x4A, 0x41, 0x4D, 0x4E, 0x34, 0x4C, 0x46, 0x5D, 0x50, 0x25,
0x25, 0x31, 0x2D, 0x2D, 0x64, 0x44, 0x25, 0x4A, 0x50, 0x4A,
0x59, 0x6A, 0x34, 0x64, 0x7E, 0x55, 0x63, 0x50, 0x22, 0x5C,
0x4C, 0x36, 0x49, 0x73, 0x22, 0x53, 0x62, 0x7D, 0x58, 0x4D,
0x46, 0x33, 0x73, 0x2A, 0x6C, 0x36, 0x4B, 0x23, 0x6C, 0x3B,
0x45, 0x4E, 0x2C, 0x22, 0x45, 0x45, 0x7D, 0x44, 0x5A, 0x61,
0x29, 0x3A, 0x39, 0x6F, 0x7C, 0x7B, 0x34, 0x25, 0x73, 0x48,
0x70, 0x55, 0x38, 0x49, 0x3F, 0x66, 0x45, 0x4E, 0x79, 0x61,
0x69, 0x52, 0x68, 0x23, 0x4D, 0x44, 0x2B, 0x3A, 0x6D, 0x62,
0x69, 0x7D, 0x5A, 0x79, 0x6F, 0x7A, 0x2A, 0x34, 0x2E, 0x61,
0x25, 0x39, 0x6F, 0x3E, 0x33, 0x61, 0x46, 0x37, 0x38, 0x53,
0x38, 0x63, 0x4E, 0x3B, 0x52, 0x5D, 0x67, 0x7C, 0x24, 0x6A,
0x29, 0x49, 0x2B, 0x41, 0x55, 0x6F, 0x56, 0x7C, 0x5C, 0x45,
0x76, 0x5F, 0x3A, 0x6F, 0x3C, 0x54, 0x21, 0x64, 0x6B, 0x35,
0x41, 0x70, 0x28, 0x60, 0x39, 0x7B, 0x75, 0x53, 0x56, 0x47,
0x2D, 0x61, 0x78, 0x69, 0x30, 0x35, 0x35, 0x50, 0x47, 0x36,
0x4C, 0x76, 0x34, 0x35, 0x4E, 0x49, 0x3D, 0x71, 0x33, 0x32,
0x47, 0x3D, 0x68, 0x53, 0x4F, 0x27, 0x2E, 0x29, 0x43, 0x79,
0x2F, 0x35, 0x4B, 0x24, 0x42, 0x29, 0x21, 0x57, 0x5F, 0x2F,
0x66, 0x73, 0x42, 0x57, 0x74, 0x28, 0x3C, 0x78, 0x6D, 0x40,
0x50, 0x58, 0x25, 0x7B, 0x37, 0x29, 0x3F, 0x5C, 0x70, 0x72,
0x33, 0x4D, 0x54, 0x51, 0x22, 0x61, 0x6E, 0x50, 0x4E, 0x67,
0x6E, 0x7D, 0x2E, 0x42, 0x7D, 0x59, 0x6F, 0x28, 0x40, 0x52,
0x3F, 0x26, 0x64, 0x51, 0x4D, 0x3F, 0x4B, 0x23, 0x4E, 0x63,
0x49, 0x50, 0x74, 0x5A, 0x66, 0x57, 0x53, 0x61, 0x5D, 0x46,
0x25, 0x6C, 0x34, 0x5F, 0x7E, 0x2E, 0x5F, 0x68, 0x4C, 0x31,
0x3A, 0x25, 0x2E, 0x4C, 0x7E, 0x55, 0x77, 0x5A, 0x61, 0x55,
0x5E, 0x2A, 0x45, 0x6C, 0x4E, 0x57, 0x3C, 0x65, 0x4A, 0x37,
0x50, 0x4A, 0x34, 0x5B, 0x64, 0x6E, 0x6E, 0x63, 0x7E, 0x6E,
0x64, 0x5F, 0x6F, 0x4A, 0x5E, 0x7B, 0x78, 0x48, 0x64, 0x6C,
0x46, 0x61, 0x48, 0x5D, 0x22, 0x4C, 0x65, 0x6A, 0x61, 0x26,
0x73, 0x2C, 0x37, 0x42, 0x41, 0x42, 0x75, 0x25, 0x25, 0x7C,
0x76, 0x3A, 0x7A, 0x44, 0x4A, 0x58, 0x3C, 0x5A, 0x42, 0x66,
0x3F, 0x57, 0x58, 0x3D, 0x2C, 0x65, 0x6E, 0x47, 0x57, 0x4D,
0x54, 0x78, 0x41, 0x68, 0x30, 0x58, 0x24, 0x50, 0x4F, 0x3C,
0x5B, 0x3A, 0x4F, 0x29, 0x40, 0x6B, 0x67, 0x5C, 0x62, 0x2D,
0x46, 0x54, 0x25, 0x38, 0x50, 0x5F, 0x70, 0x25, 0x30, 0x54,
0x62, 0x6C, 0x28, 0x4B, 0x23, 0x73, 0x61, 0x50, 0x2F, 0x31,
0x71, 0x65, 0x46, 0x21, 0x48, 0x7C, 0x71, 0x6D, 0x4C, 0x34,
0x3C, 0x3B, 0x3B, 0x49, 0x32, 0x69, 0x23, 0x7B, 0x76, 0x7C,
0x4D, 0x7B, 0x64, 0x34, 0x4A, 0x32, 0x53, 0x2C, 0x48, 0x50,
0x73, 0x2E, 0x2B, 0x2B, 0x6E, 0x35, 0x2C, 0x23, 0x60, 0x47,
0x3E, 0x58, 0x22, 0x5E, 0x22, 0x53, 0x62, 0x67, 0x29, 0x7A,
0x28, 0x44, 0x26, 0x53, 0x64, 0x36, 0x47, 0x6C, 0x21, 0x6E,
0x4B, 0x29, 0x27, 0x30, 0x6A, 0x30, 0x6F, 0x27, 0x2D, 0x28,
0x48, 0x69, 0x24, 0x6B, 0x60, 0x74, 0x54, 0x3B, 0x52, 0x5F,
0x2E, 0x46, 0x4B, 0x6E, 0x6A, 0x37, 0x5C, 0x79, 0x58, 0x7B,
0x3B, 0x36, 0x7A, 0x72, 0x29, 0x24, 0x68, 0x4E, 0x3C, 0x4F,
0x4D, 0x5E, 0x53, 0x3F, 0x6B, 0x79, 0x47, 0x65, 0x79, 0x35,
0x65, 0x48, 0x7B, 0x6C, 0x53, 0x7C, 0x4A, 0x37, 0x70, 0x3F,
0x3F, 0x29, 0x78, 0x26, 0x38, 0x65, 0x46, 0x35, 0x60, 0x70,
0x77, 0x37, 0x5B, 0x52, 0x7E, 0x65, 0x77, 0x79, 0x3A, 0x5E,
0x27, 0x5E, 0x2C, 0x77, 0x78, 0x48, 0x22, 0x5B, 0x21, 0x4E,
0x24, 0x33, 0x6C, 0x6B, 0x6B, 0x7D, 0x3A, 0x54, 0x28, 0x68,
0x27, 0x22, 0x21, 0x66, 0x28, 0x6C, 0x4A, 0x39, 0x5F, 0x7B,
0x28, 0x5E, 0x5F, 0x44, 0x3A, 0x6C, 0x3E, 0x29, 0x4B, 0x50,
0x66, 0x54, 0x26, 0x74, 0x7D, 0x2F, 0x4A, 0x70, 0x29, 0x4A,
0x30, 0x69, 0x38, 0x2F, 0x49, 0x5B, 0x33, 0x71, 0x51, 0x55,
0x48, 0x55, 0x44, 0x60, 0x58, 0x50, 0x3F, 0x43, 0x49, 0x6C,
0x5F, 0x38, 0x67, 0x67, 0x46, 0x28, 0x77, 0x4F, 0x56, 0x6A,
0x42, 0x2B, 0x42, 0x5D, 0x61, 0x76, 0x52, 0x43, 0x25, 0x32,
0x4A, 0x47, 0x39, 0x4A, 0x4C, 0x36, 0x29, 0x60, 0x4C, 0x72,
0x2D, 0x30, 0x7E, 0x68, 0x37, 0x4F, 0x7A, 0x4D, 0x4F, 0x2B,
0x7C, 0x79, 0x4C, 0x68, 0x37, 0x23, 0x3C, 0x27, 0x48, 0x4E,
0x6F, 0x77, 0x6B, 0x4F, 0x3B, 0x62, 0x68, 0x77, 0x50, 0x36,
0x21, 0x3E, 0x21, 0x48, 0x58, 0x71, 0x73, 0x79, 0x53, 0x2E,
0x58, 0x2C, 0x79, 0x42, 0x35, 0x2D, 0x67, 0x7B, 0x63, 0x61,
0x6D, 0x2F, 0x7B, 0x22, 0x63, 0x3C, 0x48, 0x7A, 0x6C, 0x79,
0x53, 0x2C, 0x44, 0x40, 0x4D, 0x41, 0x6D, 0x7C, 0x7E, 0x27,
0x5A, 0x2F, 0x37, 0x7B, 0x72, 0x3B, 0x2B, 0x32, 0x74, 0x25,
0x37, 0x3A, 0x53, 0x56, 0x3E, 0x2C, 0x29, 0x52, 0x5F, 0x57,
0x37, 0x56, 0x2D, 0x4E, 0x5E, 0x62, 0x2F, 0x24, 0x26, 0x58,
0x5F, 0x70, 0x67, 0x59, 0x35, 0x46, 0x5D, 0x2E, 0x7D, 0x70,
0x58, 0x76, 0x71, 0x31, 0x36, 0x3F, 0x25, 0x42, 0x6B, 0x3A,
0x75, 0x78, 0x2C, 0x32, 0x79, 0x6C, 0x50, 0x77, 0x2C, 0x29,
0x55, 0x30, 0x33, 0x49, 0x34, 0x61, 0x4B, 0x75, 0x75, 0x6B,
0x7C, 0x5B, 0x36, 0x5B, 0x37, 0x35, 0x30, 0x40, 0x7A, 0x21,
0x58, 0x69, 0x46, 0x63, 0x44, 0x29, 0x6C, 0x28, 0x3A, 0x2B,
0x77, 0x7D, 0x74, 0x33, 0x38, 0x5C, 0x25, 0x5F, 0x41, 0x30,
0x31, 0x43, 0x54, 0x33, 0x66, 0x4B, 0x6D, 0x59, 0x2B, 0x22,
0x41, 0x22, 0x45, 0x3A, 0x50, 0x45, 0x3B, 0x6C, 0x21, 0x71,
0x3E, 0x7E, 0x59, 0x52, 0x6C, 0x53, 0x78, 0x6F, 0x76, 0x2B,
0x6B, 0x4A, 0x63, 0x3E, 0x2C, 0x5D, 0x53, 0x60, 0x71, 0x28,
0x4D, 0x4B, 0x43, 0x37, 0x7C, 0x76, 0x79, 0x6F, 0x62, 0x40,
0x49, 0x51, 0x2B, 0x5A, 0x42, 0x53, 0x22, 0x50, 0x3E, 0x52,
0x5E, 0x63, 0x37, 0x21, 0x66, 0x57, 0x33, 0x4A, 0x58, 0x2E,
0x55, 0x2A, 0x2C, 0x29, 0x7C, 0x4B, 0x66, 0x7C, 0x57, 0x7E,
0x2F, 0x48, 0x33, 0x3D, 0x79, 0x4F, 0x3E, 0x67, 0x50, 0x3F,
0x73, 0x2C, 0x55, 0x76, 0x7D, 0x50, 0x5D, 0x48, 0x22, 0x63,
0x5A, 0x64, 0x61, 0x74, 0x3F, 0x54, 0x38, 0x6B, 0x6B, 0x3E,
0x51, 0x43, 0x51, 0x36, 0x3F, 0x53, 0x4B, 0x3A, 0x50, 0x3D,
0x32, 0x49, 0x67, 0x45, 0x53, 0x29, 0x75, 0x5C, 0x26, 0x4C,
0x3A, 0x35, 0x57, 0x31, 0x65, 0x4A, 0x55, 0x63, 0x22, 0x27,
0x2E, 0x51, 0x25, 0x3D, 0x6A, 0x56, 0x5C, 0x2A, 0x27, 0x3C,
0x2C, 0x74, 0x5C, 0x42, 0x3C, 0x44, 0x2A, 0x5F, 0x2A, 0x28,
0x38, 0x61, 0x30, 0x49, 0x43, 0x4E, 0x71, 0x5C, 0x76, 0x31,
0x5A, 0x57, 0x68, 0x50, 0x78, 0x30, 0x57, 0x2C, 0x50, 0x63,
0x36, 0x24, 0x54, 0x6F, 0x2B, 0x5F, 0x21, 0x59, 0x3F, 0x5F,
0x24, 0x44, 0x3B, 0x57, 0x28, 0x3D, 0x4F, 0x62, 0x50, 0x5C,
0x24, 0x50, 0x76, 0x74, 0x2D, 0x70, 0x3D, 0x2E, 0x42, 0x4E,
0x3E, 0x3B, 0x63, 0x25, 0x2D, 0x77, 0x68, 0x51, 0x43, 0x54,
0x23, 0x21, 0x45, 0x2A, 0x40, 0x2C, 0x30, 0x7E, 0x54, 0x54,
0x25, 0x60, 0x36, 0x27, 0x75, 0x6B, 0x3E, 0x37, 0x24, 0x6A,
0x5E, 0x69, 0x58, 0x53, 0x57, 0x5A, 0x35, 0x23, 0x67, 0x6E,
0x7E, 0x70, 0x45, 0x27, 0x6F, 0x4C, 0x75, 0x69, 0x51, 0x52,
0x5D, 0x5A, 0x5C, 0x32, 0x73, 0x2D, 0x22, 0x49, 0x6E, 0x26,
0x5C, 0x60, 0x68, 0x27, 0x52, 0x48, 0x45, 0x4C, 0x7C, 0x58,
0x21, 0x7C, 0x47, 0x41, 0x49, 0x27, 0x51, 0x27, 0x4D, 0x7A,
0x73, 0x71, 0x51, 0x58, 0x5C, 0x64, 0x4C, 0x6A, 0x24, 0x2B,
0x49, 0x2A, 0x2E, 0x47, 0x56, 0x72, 0x52, 0x3A, 0x21, 0x21,
0x52, 0x55, 0x22, 0x37, 0x60, 0x72, 0x76, 0x5F, 0x5F, 0x31,
0x2E, 0x53, 0x52, 0x37, 0x26, 0x32, 0x57, 0x32, 0x2C, 0x61,
0x56, 0x33, 0x2E, 0x4A, 0x6D, 0x72, 0x5B, 0x5A, 0x58, 0x22,
0x58, 0x25, 0x26, 0x29, 0x2D, 0x64, 0x6C, 0x6A, 0x60, 0x2B,
0x6C, 0x38, 0x7A, 0x34, 0x5E, 0x38, 0x29, 0x4D, 0x39, 0x38,
0x36, 0x58, 0x7D, 0x51, 0x43, 0x28, 0x35, 0x26, 0x73, 0x52,
0x59, 0x76, 0x3D, 0x4A, 0x62, 0x65, 0x57, 0x46, 0x2A, 0x4F,
0x7A, 0x6B, 0x6A, 0x22, 0x37, 0x4F, 0x2B, 0x44, 0x46, 0x40,
0x2C, 0x65, 0x25, 0x4B, 0x68, 0x49, 0x70, 0x64, 0x51, 0x72,
0x64, 0x4F, 0x5B, 0x5E, 0x65, 0x79, 0x46, 0x7D, 0x55, 0x37,
0x53, 0x63, 0x7A, 0x77, 0x7E, 0x4C, 0x76, 0x2D, 0x73, 0x2C,
0x66, 0x6D, 0x72, 0x4C, 0x7A, 0x29, 0x5A, 0x7B, 0x7D, 0x6E,
0x66, 0x38, 0x30, 0x7B, 0x77, 0x79, 0x72, 0x74, 0x59, 0x5F,
0x48, 0x76, 0x4D, 0x76, 0x76, 0x6C, 0x27, 0x49, 0x64, 0x24,
0x5E, 0x44, 0x36, 0x7C, 0x79, 0x3C, 0x21, 0x42, 0x6D, 0x78,
0x2D, 0x3E, 0x28, 0x32, 0x47, 0x4E, 0x5A, 0x6B, 0x77, 0x3F,
0x51, 0x60, 0x43, 0x4A, 0x60, 0x50, 0x53, 0x45, 0x25, 0x32,
0x71, 0x64, 0x36, 0x3E, 0x26, 0x3F, 0x3E, 0x4C, 0x62, 0x5F,
0x2C, 0x54, 0x37, 0x43, 0x7C, 0x64, 0x3D, 0x29, 0x49, 0x32,
0x37, 0x34, 0x71, 0x62, 0x5A, 0x55, 0x77, 0x78, 0x4C, 0x46,
0x61, 0x77, 0x66, 0x2B, 0x3A, 0x30, 0x47, 0x3A, 0x3A, 0x39,
0x75, 0x54, 0x7E, 0x2B, 0x5B, 0x4E, 0x5C, 0x4B, 0x26, 0x34,
0x7E, 0x7C, 0x4C, 0x22, 0x6F, 0x60, 0x3A, 0x7A, 0x54, 0x50,
0x43, 0x4D, 0x68, 0x31, 0x39, 0x4A, 0x7A, 0x7A, 0x4F, 0x39,
0x4C, 0x5A, 0x21, 0x5B, 0x60, 0x54, 0x60, 0x33, 0x5F, 0x51,
0x23, 0x6E, 0x54, 0x48, 0x2A, 0x50, 0x38, 0x54, 0x41, 0x34,
0x30, 0x66, 0x78, 0x49, 0x2B, 0x30, 0x42, 0x46, 0x6E, 0x71,
0x3B, 0x53, 0x37, 0x43, 0x3B, 0x2A, 0x3D, 0x5D, 0x49, 0x36,
0x71, 0x73, 0x43, 0x27, 0x2D, 0x4C, 0x5C, 0x3B, 0x3E, 0x2B,
0x26, 0x5E, 0x77, 0x52, 0x6A, 0x2C, 0x64, 0x53, 0x2F, 0x6C,
0x63, 0x6E, 0x31, 0x75, 0x45, 0x26, 0x24, 0x25, 0x2D, 0x49,
0x63, 0x5A, 0x76, 0x4E, 0x73, 0x3B, 0x24, 0x59, 0x62, 0x70,
0x49, 0x61, 0x2B, 0x5F, 0x72, 0x61, 0x54, 0x52, 0x43, 0x5F,
0x74, 0x75, 0x23, 0x47, 0x33, 0x7C, 0x31, 0x61, 0x6E, 0x6A,
0x76, 0x69, 0x79, 0x74, 0x2A, 0x33, 0x55, 0x7B, 0x38, 0x5E,
0x75, 0x3D, 0x61, 0x34, 0x71, 0x57, 0x6B, 0x3D, 0x35, 0x66,
0x61, 0x2F, 0x31, 0x55, 0x42, 0x68, 0x71, 0x72, 0x24, 0x4F,
0x35, 0x57, 0x26, 0x63, 0x6E, 0x2F, 0x33, 0x31, 0x2B, 0x4C,
0x4E, 0x5F, 0x41, 0x4A, 0x64, 0x41, 0x5B, 0x67, 0x44, 0x78,
0x3B, 0x4B, 0x66, 0x4D, 0x2F, 0x2A, 0x5A, 0x5D, 0x25, 0x7D,
0x32, 0x6B, 0x55, 0x40, 0x52, 0x58, 0x64, 0x6E, 0x79, 0x60,
0x56, 0x6F, 0x68, 0x49, 0x5E, 0x35, 0x43, 0x42, 0x74, 0x78,
0x2A, 0x2A, 0x22, 0x3B, 0x31, 0x27, 0x62, 0x4A, 0x71, 0x67,
0x68, 0x42, 0x27, 0x26, 0x2C, 0x4A, 0x2F, 0x34, 0x52, 0x3A,
0x4B, 0x2B, 0x6B, 0x47, 0x35, 0x6D, 0x5B, 0x4D, 0x7E, 0x22,
0x25, 0x5C, 0x61, 0x39, 0x5D, 0x55, 0x41, 0x7B, 0x67, 0x3F,
0x7B, 0x26, 0x24, 0x48, 0x3E, 0x26, 0x55, 0x28, 0x5A, 0x5B,
0x2E, 0x41, 0x71, 0x55, 0x69, 0x7E, 0x75, 0x29, 0x56, 0x6C,
0x3A, 0x7D, 0x30, 0x52, 0x49, 0x5D, 0x72, 0x6B, 0x32, 0x27,
0x47, 0x39, 0x7E, 0x3B, 0x32, 0x69, 0x44, 0x7C, 0x5A, 0x52,
0x23, 0x58, 0x7C, 0x6B, 0x74, 0x22, 0x38, 0x7E, 0x7C, 0x52,
0x34, 0x6F, 0x5D, 0x70, 0x60, 0x4E, 0x4E, 0x6C, 0x35, 0x22,
0x7B, 0x3C, 0x48, 0x77, 0x54, 0x76, 0x2D, 0x65, 0x4F, 0x24,
0x58, 0x35, 0x37, 0x34, 0x58, 0x28, 0x3C, 0x5C, 0x51, 0x7B,
0x68, 0x2D, 0x28, 0x54, 0x75, 0x70, 0x4B, 0x34, 0x52, 0x74,
0x2D, 0x41, 0x28, 0x3A, 0x77, 0x4F, 0x45, 0x4D, 0x75, 0x24,
0x21, 0x4F, 0x61, 0x75, 0x57, 0x54, 0x21, 0x6A, 0x75, 0x62,
0x2E, 0x2D, 0x61, 0x41, 0x7E, 0x47, 0x30, 0x49, 0x48, 0x53,
0x6C, 0x30, 0x76, 0x4F, 0x58, 0x31, 0x21, 0x2B, 0x3C, 0x37,
0x60, 0x60, 0x30, 0x24, 0x30, 0x4F, 0x5F, 0x39, 0x3D, 0x37,
0x46, 0x5A, 0x3C, 0x27, 0x4A, 0x7C, 0x63, 0x22, 0x75, 0x40,
0x43, 0x46, 0x21, 0x52, 0x59, 0x61, 0x21, 0x38, 0x45, 0x55,
0x42, 0x2A, 0x63, 0x46, 0x42, 0x2F, 0x54, 0x79, 0x74, 0x72,
0x6E, 0x27, 0x3D, 0x45, 0x27, 0x45, 0x5A, 0x77, 0x76, 0x26,
0x7E, 0x40, 0x26, 0x44, 0x39, 0x64, 0x61, 0x67, 0x4E, 0x7C,
0x61, 0x56, 0x59, 0x21, 0x55, 0x70, 0x74, 0x66, 0x5B, 0x5B,
0x64, 0x30, 0x41, 0x49, 0x6E, 0x24, 0x5F, 0x4A, 0x32, 0x48,
0x60, 0x2B, 0x38, 0x41, 0x4E, 0x4B, 0x63, 0x26, 0x38, 0x2B,
0x24, 0x33, 0x3C, 0x32, 0x57, 0x64, 0x25, 0x5D, 0x59, 0x4A,
0x33, 0x44, 0x2C, 0x41, 0x33, 0x76, 0x22, 0x5A, 0x5A, 0x45,
0x41, 0x3C, 0x4E, 0x60, 0x4D, 0x7D, 0x58, 0x28, 0x66, 0x71,
0x7B, 0x3B, 0x6B, 0x60, 0x28, 0x7A, 0x3C, 0x4A, 0x66, 0x5F,
0x35, 0x78, 0x59, 0x70, 0x7B, 0x22, 0x67, 0x62, 0x2C, 0x50,
0x48, 0x2C, 0x23, 0x22, 0x38, 0x6A, 0x6E, 0x3F, 0x6E, 0x5B,
0x3A, 0x37, 0x79, 0x62, 0x77, 0x2B, 0x31, 0x7B, 0x60, 0x3F,
0x69, 0x26, 0x51, 0x68, 0x4A, 0x21, 0x5D, 0x22, 0x30, 0x73,
0x41, 0x7A, 0x44, 0x2E, 0x26, 0x61, 0x7C, 0x27, 0x38, 0x6D,
0x70, 0x4C, 0x7E, 0x69, 0x6F, 0x2C, 0x4A, 0x76, 0x60, 0x30,
0x40, 0x58, 0x52, 0x46, 0x42, 0x3E, 0x61, 0x76, 0x31, 0x43,
0x4A, 0x61, 0x3B, 0x5A, 0x34, 0x21, 0x77, 0x3A, 0x6A, 0x4F,
0x3E, 0x6F, 0x6F, 0x46, 0x3B, 0x42, 0x46, 0x52, 0x5D, 0x3D,
0x50, 0x7A, 0x71, 0x25, 0x3B, 0x37, 0x61, 0x24, 0x44, 0x37,
0x31, 0x49, 0x5F, 0x2E, 0x2D, 0x23, 0x3F, 0x51, 0x58, 0x2F,
0x36, 0x63, 0x70, 0x40, 0x62, 0x5D, 0x36, 0x3F, 0x63, 0x29,
0x39, 0x55, 0x55, 0x21, 0x63, 0x65, 0x4D, 0x66, 0x58, 0x27,
0x37, 0x32, 0x53, 0x26, 0x3D, 0x24, 0x59, 0x46, 0x58, 0x3B,
0x43, 0x5B, 0x78, 0x38, 0x5A, 0x46, 0x3B, 0x2E, 0x42, 0x72,
0x2F, 0x64, 0x4F, 0x3D, 0x42, 0x37, 0x63, 0x35, 0x50, 0x7C,
0x28, 0x73, 0x25, 0x49, 0x2A, 0x31, 0x33, 0x73, 0x31, 0x37,
0x68, 0x58, 0x7A, 0x4F, 0x71, 0x63, 0x61, 0x2A, 0x70, 0x70,
0x4C, 0x31, 0x21, 0x62, 0x41, 0x48, 0x3A, 0x67, 0x21, 0x27,
0x3A, 0x44, 0x2D, 0x55, 0x42, 0x6B, 0x41, 0x4E, 0x76, 0x27,
0x51, 0x58, 0x52, 0x3A, 0x4C, 0x5B, 0x2E, 0x4D, 0x6C, 0x56,
0x57, 0x50, 0x6A, 0x30, 0x44, 0x29, 0x51, 0x60, 0x3B, 0x4F,
0x46, 0x74, 0x6A, 0x74, 0x6A, 0x23, 0x64, 0x6E, 0x79, 0x21,
0x2A, 0x74, 0x3F, 0x36, 0x59, 0x44, 0x75, 0x6E, 0x24, 0x38,
0x28, 0x2F, 0x5A, 0x69, 0x25, 0x75, 0x5A, 0x6B, 0x7C, 0x75,
0x75, 0x66, 0x49, 0x44, 0x5C, 0x6A, 0x36, 0x62, 0x6C, 0x64,
0x30, 0x26, 0x51, 0x5E, 0x62, 0x57, 0x65, 0x6B, 0x2B, 0x43,
0x27, 0x74, 0x6D, 0x6B, 0x24, 0x50, 0x63, 0x4B, 0x32, 0x23,
0x52, 0x77, 0x2A, 0x27, 0x5B, 0x29, 0x5D, 0x27, 0x58, 0x4F,
0x68, 0x3A, 0x6E, 0x76, 0x53, 0x67, 0x42, 0x21, 0x6E, 0x56,
0x22, 0x31, 0x34, 0x77, 0x22, 0x4F, 0x50, 0x6B, 0x35, 0x46,
0x38, 0x2E, 0x6B, 0x3C, 0x7A, 0x7E, 0x60, 0x25, 0x35, 0x41,
0x4E, 0x70, 0x41, 0x43, 0x3C, 0x48, 0x3A, 0x26, 0x3F, 0x50,
0x67, 0x30, 0x39, 0x46, 0x6F, 0x45, 0x5F, 0x34, 0x23, 0x76,
0x33, 0x35, 0x5C, 0x56, 0x78, 0x6E, 0x27, 0x5D, 0x53, 0x5A,
0x47, 0x5B, 0x41, 0x6B, 0x3E, 0x3F, 0x24, 0x6F, 0x60, 0x70,
0x55, 0x6B, 0x47, 0x2C, 0x3E, 0x60, 0x75, 0x57, 0x45, 0x35,
0x5C, 0x63, 0x65, 0x44, 0x7E, 0x51, 0x39, 0x3C, 0x73, 0x24,
0x33, 0x44, 0x53, 0x48, 0x65, 0x45, 0x70, 0x4D, 0x64, 0x68,
0x53, 0x2B, 0x6C, 0x50, 0x6B, 0x62, 0x2C, 0x71, 0x4C, 0x5F,
0x4C, 0x55, 0x35, 0x6B, 0x36, 0x2F, 0x35, 0x40, 0x7C, 0x4A,
0x74, 0x74, 0x5E, 0x2C, 0x62, 0x3A, 0x2E, 0x78, 0x5B, 0x27,
0x39, 0x7E, 0x7D, 0x33, 0x2E, 0x49, 0x49, 0x38, 0x67, 0x75,
0x52, 0x28, 0x33, 0x5E, 0x3A, 0x32, 0x3F, 0x6E, 0x73, 0x68,
0x44, 0x6A, 0x79, 0x6B, 0x55, 0x4A, 0x78, 0x7E, 0x61, 0x3F,
0x3D, 0x78, 0x67, 0x64, 0x77, 0x70, 0x4F, 0x78, 0x24, 0x73,
0x77, 0x78, 0x34, 0x42, 0x23, 0x2E, 0x79, 0x7D, 0x39, 0x79,
0x2D, 0x44, 0x3B, 0x28, 0x56, 0x57, 0x5A, 0x77, 0x46, 0x6D,
0x24, 0x6B, 0x6D, 0x2B, 0x4F, 0x26, 0x28, 0x75, 0x37, 0x5B,
0x73, 0x6F, 0x61, 0x4A, 0x2A, 0x60, 0x3D, 0x62, 0x60, 0x54,
0x2D, 0x7D, 0x76, 0x35, 0x28, 0x3F, 0x41, 0x55, 0x37, 0x28,
0x7E, 0x69, 0x4A, 0x65, 0x3C, 0x2C, 0x3F, 0x35, 0x23, 0x42,
0x50, 0x5B, 0x50, 0x3E, 0x4D, 0x7E, 0x6C, 0x6A, 0x57, 0x79,
0x6D, 0x2B, 0x56, 0x5E, 0x40, 0x52, 0x3C, 0x30, 0x48, 0x3D,
0x24, 0x47, 0x37, 0x40, 0x39, 0x25, 0x28, 0x50, 0x3F, 0x37,
0x43, 0x7E, 0x6F, 0x4F, 0x32, 0x3D, 0x78, 0x58, 0x72, 0x3E,
0x4F, 0x48, 0x60, 0x67, 0x7E, 0x32, 0x77, 0x5F, 0x3D, 0x42,
0x3F, 0x75, 0x32, 0x27, 0x63, 0x5D, 0x45, 0x54, 0x72, 0x65,
0x79, 0x6D, 0x43, 0x3A, 0x3C, 0x2F, 0x7A, 0x4A, 0x3D, 0x28,
0x54, 0x3B, 0x3D, 0x4D, 0x36, 0x50, 0x4A, 0x40, 0x2E, 0x61,
0x42, 0x36, 0x4C, 0x26, 0x6A, 0x7C, 0x7B, 0x39, 0x51, 0x5C,
0x46, 0x73, 0x5A, 0x54, 0x59, 0x55, 0x4B, 0x58, 0x48, 0x30,
0x39, 0x4A, 0x67, 0x72, 0x3C, 0x4C, 0x40, 0x2D, 0x5E, 0x35,
0x5E, 0x7E, 0x44, 0x53, 0x59, 0x7B, 0x2E, 0x75, 0x2E, 0x5D,
0x74, 0x28, 0x4C, 0x4B, 0x22, 0x3E, 0x4C, 0x53, 0x22, 0x7A,
0x6D, 0x24, 0x2D, 0x2F, 0x5A, 0x6D, 0x57, 0x24, 0x3A, 0x66,
0x30, 0x75, 0x30, 0x7A, 0x34, 0x6A, 0x67, 0x5B, 0x4C, 0x4B,
0x2F, 0x60, 0x57, 0x40, 0x6A, 0x52, 0x38, 0x70, 0x79, 0x58,
0x5A, 0x75, 0x66, 0x76, 0x7B, 0x6F, 0x6F, 0x4C, 0x79, 0x42,
0x78, 0x6D, 0x57, 0x41, 0x4E, 0x5B, 0x5B, 0x34, 0x34, 0x41,
0x68, 0x63, 0x2C, 0x7A, 0x21, 0x3C, 0x50, 0x22, 0x43, 0x74,
0x4F, 0x28, 0x5E, 0x77, 0x38, 0x28, 0x5C, 0x68, 0x4C, 0x4D,
0x7A, 0x2C, 0x58, 0x27, 0x43, 0x50, 0x56, 0x4C, 0x61, 0x6B,
0x77, 0x5A, 0x57, 0x76, 0x46, 0x74, 0x24, 0x6B, 0x40, 0x3B,
0x47, 0x4D, 0x4A, 0x73, 0x7D, 0x2E, 0x69, 0x4B, 0x55, 0x67,
0x5F, 0x37, 0x2C, 0x6E, 0x35, 0x46, 0x37, 0x22, 0x4F, 0x2F,
0x57, 0x70, 0x4E, 0x6F, 0x3E, 0x4C, 0x34, 0x44, 0x54, 0x69,
0x2F, 0x40, 0x36, 0x3C, 0x4C, 0x48, 0x55, 0x7B, 0x57, 0x3D,
0x6E, 0x4C, 0x3B, 0x52, 0x52, 0x4C, 0x38, 0x5B, 0x5F, 0x4E,
0x28, 0x31, 0x72, 0x47, 0x2C, 0x43, 0x70, 0x30, 0x37, 0x4A,
0x23, 0x43, 0x74, 0x3F, 0x42, 0x7D, 0x4F, 0x57, 0x53, 0x42,
0x42, 0x3A, 0x44, 0x4D, 0x69, 0x2C, 0x56, 0x28, 0x61, 0x4F,
0x71, 0x4C, 0x32, 0x33, 0x47, 0x45, 0x59, 0x42, 0x37, 0x60,
0x36, 0x2D, 0x3A, 0x50, 0x63, 0x29, 0x2C, 0x7E, 0x7D, 0x5C,
0x34, 0x6E, 0x57, 0x55, 0x5D, 0x77, 0x3F, 0x35, 0x30, 0x6B,
0x3A, 0x6F, 0x49, 0x7D, 0x56, 0x25, 0x37, 0x2E, 0x51, 0x6F,
0x21, 0x2D, 0x64, 0x5F, 0x36, 0x72, 0x67, 0x5A, 0x79, 0x3B,
0x53, 0x4B, 0x32, 0x3C, 0x22, 0x4B, 0x54, 0x6E, 0x56, 0x52,
0x40, 0x23, 0x21, 0x40, 0x68, 0x2F, 0x3F, 0x6F, 0x24, 0x4D,
0x58, 0x29, 0x64, 0x7E, 0x66, 0x23, 0x42, 0x53, 0x4C, 0x6A,
0x38, 0x7A, 0x5D, 0x50, 0x71, 0x30, 0x65, 0x79, 0x23, 0x3A,
0x28, 0x45, 0x2C, 0x56, 0x2C, 0x67, 0x64, 0x49, 0x6C, 0x3C,
0x5F, 0x73, 0x4B, 0x78, 0x70, 0x3D, 0x6B, 0x57, 0x38, 0x39,
0x71, 0x29, 0x78, 0x29, 0x7A, 0x37, 0x6F, 0x2E, 0x53, 0x6D,
0x6A, 0x27, 0x48, 0x72, 0x52, 0x65, 0x73, 0x6C, 0x6D, 0x32,
0x2B, 0x2A, 0x74, 0x4B, 0x2E, 0x78, 0x25, 0x72, 0x36, 0x71,
0x6E, 0x55, 0x7A, 0x51, 0x75, 0x67, 0x3C, 0x39, 0x2C, 0x71,
0x4C, 0x47, 0x50, 0x33, 0x7E, 0x65, 0x75, 0x67, 0x32, 0x7A,
0x26, 0x48, 0x6C, 0x65, 0x47, 0x36, 0x2C, 0x78, 0x31, 0x78,
0x4F, 0x6E, 0x2F, 0x3B, 0x30, 0x22, 0x6C, 0x40, 0x44, 0x7E,
0x7B, 0x34, 0x77, 0x51, 0x39, 0x5C, 0x4D, 0x3A, 0x79, 0x21,
0x69, 0x6C, 0x6E, 0x46, 0x4F, 0x41, 0x6A, 0x4C, 0x4A, 0x5B,
0x76, 0x23, 0x70, 0x4D, 0x72, 0x79, 0x30, 0x41, 0x6C, 0x76,
0x48, 0x4E, 0x55, 0x45, 0x31, 0x2E, 0x46, 0x49, 0x33, 0x48,
0x32, 0x6F, 0x26, 0x6C, 0x58, 0x71, 0x63, 0x46, 0x26, 0x2B,
0x48, 0x51, 0x5A, 0x4E, 0x65, 0x6C, 0x67, 0x3A, 0x49, 0x55,
0x60, 0x22, 0x59, 0x49, 0x32, 0x4E, 0x42, 0x67, 0x28, 0x48,
0x4A, 0x49, 0x50, 0x60, 0x42, 0x4B, 0x26, 0x5F, 0x47, 0x70,
0x2F, 0x4F, 0x59, 0x25, 0x52, 0x2C, 0x7A, 0x38, 0x77, 0x2B,
0x2B, 0x51, 0x4F, 0x6E, 0x5E, 0x4F, 0x67, 0x3F, 0x59, 0x62,
0x6B, 0x7E, 0x7A, 0x2A, 0x79, 0x34, 0x72, 0x34, 0x30, 0x74,
0x65, 0x69, 0x62, 0x6F, 0x41, 0x7B, 0x30, 0x65, 0x7D, 0x60,
0x31, 0x36, 0x3C, 0x2B, 0x62, 0x41, 0x6F, 0x72, 0x48, 0x22,
0x47, 0x23, 0x7A, 0x6E, 0x72, 0x36, 0x65, 0x69, 0x4D, 0x3B,
0x36, 0x62, 0x6D, 0x7E, 0x5D, 0x68, 0x54, 0x30, 0x57, 0x4D,
0x3E, 0x78, 0x6B, 0x24, 0x75, 0x30, 0x79, 0x6C, 0x53, 0x4E,
0x34, 0x78, 0x41, 0x56, 0x46, 0x45, 0x35, 0x3B, 0x4F, 0x54,
0x73, 0x23, 0x76, 0x66, 0x4E, 0x2D, 0x28, 0x73, 0x6D, 0x64,
0x66, 0x67, 0x5B, 0x6C, 0x3F, 0x5C, 0x47, 0x51, 0x2B, 0x36,
0x61, 0x3F, 0x2D, 0x52, 0x7A, 0x50, 0x79, 0x58, 0x36, 0x31,
0x36, 0x39, 0x40, 0x2E, 0x51, 0x44, 0x40, 0x5D, 0x2E, 0x4B,
0x23, 0x70, 0x42, 0x47, 0x2D, 0x3A, 0x2E, 0x2F, 0x78, 0x24,
0x52, 0x2B, 0x7B, 0x6F, 0x2D, 0x2F, 0x54, 0x36, 0x44, 0x4C,
0x2D, 0x34, 0x7E, 0x52, 0x2E, 0x71, 0x7E, 0x4E, 0x54, 0x37,
0x39, 0x31, 0x38, 0x5C, 0x33, 0x78, 0x63, 0x6C, 0x3C, 0x4F,
0x21, 0x5C, 0x63, 0x4F, 0x25, 0x7A, 0x2C, 0x67, 0x45, 0x5F,
0x74, 0x7B, 0x44, 0x60, 0x45, 0x55, 0x7C, 0x2C, 0x6F, 0x78,
0x77, 0x3C, 0x5C, 0x3E, 0x5A, 0x6F, 0x28, 0x64, 0x2F, 0x54,
0x42, 0x5E, 0x36, 0x52, 0x34, 0x25, 0x21, 0x3F, 0x2D, 0x73,
0x71, 0x7B, 0x7A, 0x51, 0x5B, 0x21, 0x48, 0x24, 0x4D, 0x5A,
0x43, 0x78, 0x38, 0x4F, 0x2D, 0x37, 0x77, 0x28, 0x40, 0x65,
0x2F, 0x44, 0x76, 0x32, 0x7B, 0x5A, 0x57, 0x46, 0x28, 0x67,
0x51, 0x3A, 0x4E, 0x6E, 0x27, 0x71, 0x64, 0x30, 0x6B, 0x4C,
0x7A, 0x61, 0x6A, 0x72, 0x2F, 0x22, 0x6E, 0x45, 0x6B, 0x5A,
0x2D, 0x3E, 0x26, 0x25, 0x2E, 0x6D, 0x63, 0x57, 0x67, 0x5B,
0x2C, 0x4F, 0x53, 0x5A, 0x49, 0x55, 0x23, 0x28, 0x54, 0x43,
0x2F, 0x47, 0x42, 0x2B, 0x77, 0x48, 0x40, 0x73, 0x60, 0x25,
0x65, 0x4E, 0x4D, 0x30, 0x28, 0x67, 0x2F, 0x7E, 0x50, 0x68,
0x3F, 0x5B, 0x37, 0x4F, 0x78, 0x70, 0x54, 0x36, 0x5D, 0x77,
0x3E, 0x37, 0x73, 0x53, 0x5C, 0x4F, 0x61, 0x30, 0x66, 0x33,
0x63, 0x3A, 0x74, 0x3B, 0x51, 0x5D, 0x74, 0x4B, 0x52, 0x48,
0x7D, 0x69, 0x35, 0x28, 0x47, 0x4B, 0x2D, 0x3A, 0x4F, 0x62,
0x2C, 0x72, 0x54, 0x46, 0x33, 0x24, 0x60, 0x6F, 0x62, 0x7C,
0x7B, 0x42, 0x29, 0x62, 0x31, 0x63, 0x58, 0x34, 0x26, 0x2C,
0x53, 0x2F, 0x3C, 0x36, 0x6B, 0x48, 0x42, 0x2C, 0x5D, 0x57,
0x5C, 0x61, 0x66, 0x74, 0x2F, 0x51, 0x27, 0x42, 0x73, 0x41,
0x4B, 0x72, 0x76, 0x27, 0x76, 0x31, 0x5E, 0x57, 0x60, 0x29,
0x30, 0x58, 0x43, 0x43, 0x57, 0x6F, 0x36, 0x5D, 0x31, 0x52,
0x47, 0x2D, 0x77, 0x79, 0x21, 0x43, 0x3C, 0x75, 0x61, 0x7D,
0x70, 0x7D, 0x7B, 0x6B, 0x3C, 0x47, 0x30, 0x59, 0x3F, 0x43,
0x4E, 0x2D, 0x6F, 0x38, 0x43, 0x50, 0x4F, 0x51, 0x56, 0x75,
0x3F, 0x72, 0x55, 0x2C, 0x56, 0x5C, 0x3B, 0x7B, 0x37, 0x5E,
0x34, 0x64, 0x7D, 0x70, 0x3F, 0x26, 0x52, 0x40, 0x34, 0x69,
0x26, 0x2E, 0x4D, 0x65, 0x6B, 0x7B, 0x76, 0x70, 0x56, 0x38,
0x35, 0x5E, 0x71, 0x37, 0x6C, 0x6D, 0x48, 0x64, 0x6A, 0x25,
0x5F, 0x5E, 0x61, 0x34, 0x60, 0x50, 0x3E, 0x38, 0x27, 0x43,
0x79, 0x45, 0x4D, 0x4B, 0x51, 0x32, 0x23, 0x4F, 0x72, 0x54,
0x67, 0x5E, 0x5F, 0x38, 0x5F, 0x51, 0x5E, 0x32, 0x71, 0x67,
0x75, 0x31, 0x64, 0x6F, 0x32, 0x30, 0x5F, 0x54, 0x7B, 0x74,
0x40, 0x75, 0x65, 0x73, 0x30, 0x32, 0x33, 0x47, 0x3C, 0x64,
0x3B, 0x48, 0x35, 0x66, 0x6C, 0x79, 0x3D, 0x55, 0x77, 0x78,
0x78, 0x3B, 0x66, 0x6A, 0x25, 0x64, 0x32, 0x2D, 0x67, 0x6F,
0x7D, 0x79, 0x65, 0x4F, 0x2A, 0x3A, 0x2D, 0x75, 0x4B, 0x6B,
0x3C, 0x39, 0x3F, 0x33, 0x42, 0x28, 0x77, 0x5E, 0x7D, 0x78,
0x60, 0x62, 0x4F, 0x6A, 0x32, 0x35, 0x4F, 0x36, 0x61, 0x3C,
0x6E, 0x44, 0x23, 0x78, 0x46, 0x78, 0x3A, 0x62, 0x3A, 0x49,
0x26, 0x23, 0x28, 0x32, 0x42, 0x3C, 0x30, 0x31, 0x49, 0x64,
0x3E, 0x41, 0x27, 0x58, 0x39, 0x58, 0x4A, 0x70, 0x5D, 0x72,
0x64, 0x7C, 0x34, 0x3F, 0x7D, 0x75, 0x2A, 0x34, 0x22, 0x3E,
0x3D, 0x7C, 0x74, 0x41, 0x59, 0x69, 0x4F, 0x5B, 0x6A, 0x40,
0x59, 0x29, 0x6B, 0x6C, 0x2E, 0x47, 0x44, 0x73, 0x6C, 0x26,
0x3A, 0x37, 0x5F, 0x43, 0x57, 0x4C, 0x3F, 0x27, 0x36, 0x47,
0x74, 0x72, 0x77, 0x63, 0x62, 0x49, 0x5A, 0x31, 0x6E, 0x53,
0x2A, 0x23, 0x42, 0x62, 0x3F, 0x27, 0x64, 0x28, 0x45, 0x23,
0x47, 0x23, 0x7E, 0x7D, 0x57, 0x38, 0x66, 0x46, 0x5B, 0x46,
0x71, 0x50, 0x71, 0x67, 0x5C, 0x42, 0x4F, 0x37, 0x21, 0x50,
0x35, 0x28, 0x67, 0x71, 0x2B, 0x21, 0x3C, 0x42, 0x27, 0x70,
0x67, 0x24, 0x39, 0x2F, 0x25, 0x71, 0x51, 0x69, 0x2A, 0x6D,
0x5C, 0x6F, 0x3E, 0x79, 0x77, 0x23, 0x43, 0x60, 0x48, 0x35,
0x29, 0x5F, 0x6E, 0x5A, 0x22, 0x59, 0x5F, 0x60, 0x56, 0x61,
0x3A, 0x51, 0x61, 0x78, 0x3E, 0x3B, 0x3D, 0x6D, 0x6E, 0x70,
0x70, 0x36, 0x33, 0x35, 0x2D, 0x52, 0x46, 0x59, 0x46, 0x4A,
0x29, 0x40, 0x52, 0x54, 0x39, 0x79, 0x23, 0x2C, 0x44, 0x23,
0x65, 0x45, 0x3F, 0x77, 0x3B, 0x2E, 0x38, 0x61, 0x2E, 0x7C,
0x71, 0x4F, 0x27, 0x26, 0x4A, 0x76, 0x72, 0x41, 0x75, 0x7E,
0x28, 0x3A, 0x31, 0x55, 0x58, 0x36, 0x4E, 0x49, 0x7D, 0x31,
0x57, 0x3E, 0x40, 0x42, 0x55, 0x35, 0x30, 0x31, 0x5A, 0x60,
0x73, 0x71, 0x50, 0x36, 0x3D, 0x3D, 0x62, 0x76, 0x68, 0x78,
0x63, 0x35, 0x2C, 0x25, 0x26, 0x3C, 0x57, 0x65, 0x36, 0x41,
0x42, 0x3D, 0x53, 0x7C, 0x50, 0x2F, 0x60, 0x7A, 0x3F, 0x33,
0x41, 0x21, 0x54, 0x57, 0x52, 0x7B, 0x21, 0x72, 0x78, 0x6C,
0x5B, 0x70, 0x32, 0x67, 0x76, 0x3A, 0x3A, 0x50, 0x4E, 0x4D,
0x5C, 0x34, 0x5F, 0x48, 0x23, 0x40, 0x6C, 0x5B, 0x4A, 0x21,
0x73, 0x38, 0x51, 0x70, 0x64, 0x37, 0x54, 0x61, 0x29, 0x4F,
0x70, 0x5F, 0x3E, 0x27, 0x2C, 0x30, 0x7A, 0x49, 0x50, 0x64,
0x6D, 0x68, 0x46, 0x4F, 0x44, 0x4E, 0x60, 0x6E, 0x2B, 0x43,
0x2F, 0x53, 0x55, 0x50, 0x40, 0x3B, 0x65, 0x3B, 0x58, 0x2C,
0x58, 0x7B, 0x7D, 0x34, 0x6B, 0x37, 0x4A, 0x4A, 0x51, 0x24,
0x31, 0x46, 0x6F, 0x77, 0x5F, 0x45, 0x78, 0x6B, 0x22, 0x48,
0x50, 0x41, 0x3E, 0x32, 0x3A, 0x47, 0x2F, 0x6B, 0x43, 0x6C,
0x51, 0x58, 0x68, 0x7A, 0x38, 0x3A, 0x53, 0x2B, 0x2D, 0x4A,
0x67, 0x68, 0x6D, 0x47, 0x78, 0x5D, 0x56, 0x71, 0x6C, 0x73,
0x6F, 0x55, 0x3E, 0x78, 0x23, 0x5E, 0x23, 0x7E, 0x74, 0x2C,
0x4E, 0x73, 0x37, 0x70, 0x53, 0x3E, 0x3A, 0x3C, 0x63, 0x6F,
0x6D, 0x29, 0x53, 0x7E, 0x29, 0x53, 0x7B, 0x32, 0x5C, 0x70,
0x37, 0x60, 0x34, 0x52, 0x23, 0x7D, 0x78, 0x4B, 0x2D, 0x5C,
0x31, 0x45, 0x73, 0x57, 0x50, 0x3A, 0x29, 0x47, 0x65, 0x56,
0x78, 0x32, 0x6B, 0x2D, 0x4C, 0x2D, 0x75, 0x3E, 0x62, 0x79,
0x51, 0x7D, 0x5F, 0x69, 0x47, 0x21, 0x68, 0x48, 0x4E, 0x36,
0x6F, 0x62, 0x7E, 0x42, 0x44, 0x2F, 0x7C, 0x22, 0x46, 0x71,
0x4F, 0x3A, 0x28, 0x4A, 0x40, 0x7C, 0x55, 0x76, 0x5A, 0x55,
0x2D, 0x6F, 0x68, 0x77, 0x43, 0x55, 0x77, 0x41, 0x34, 0x66,
0x33, 0x2F, 0x66, 0x61, 0x4E, 0x2D, 0x7A, 0x4E, 0x3E, 0x6C,
0x64, 0x56, 0x22, 0x2C, 0x6E, 0x52, 0x5C, 0x25, 0x25, 0x3B,
0x44, 0x77, 0x44, 0x70, 0x44, 0x3B, 0x58, 0x24, 0x7E, 0x25,
0x57, 0x73, 0x2F, 0x68, 0x60, 0x70, 0x73, 0x25, 0x5D, 0x55,
0x2C, 0x41, 0x74, 0x4A, 0x26, 0x4B, 0x2E, 0x77, 0x65, 0x2E,
0x54, 0x3A, 0x4E, 0x4D, 0x57, 0x7D, 0x37, 0x69, 0x45, 0x30,
0x30, 0x2A, 0x43, 0x32, 0x7B, 0x5F, 0x66, 0x46, 0x6C, 0x7A,
0x30, 0x67, 0x72, 0x71, 0x4B, 0x25, 0x32, 0x3A, 0x46, 0x30,
0x38, 0x41, 0x52, 0x5A, 0x2E, 0x79, 0x2E, 0x78, 0x58, 0x37,
0x53, 0x78, 0x28, 0x29, 0x7E, 0x5A, 0x52, 0x40, 0x27, 0x50,
0x53, 0x4B, 0x6C, 0x6B, 0x52, 0x5C, 0x36, 0x26, 0x56, 0x7D,
0x7C, 0x2E, 0x43, 0x5F, 0x73, 0x67, 0x37, 0x37, 0x67, 0x53,
0x54, 0x74, 0x33, 0x32, 0x79, 0x59, 0x47, 0x7A, 0x32, 0x4D,
0x55, 0x6E, 0x2C, 0x77, 0x74, 0x3A, 0x32, 0x4A, 0x5F, 0x30,
0x4C, 0x3F, 0x48, 0x3F, 0x3B, 0x21, 0x74, 0x35, 0x6E, 0x4B,
0x3C, 0x55, 0x74, 0x60, 0x4B, 0x5F, 0x5B, 0x66, 0x48, 0x73,
0x4F, 0x55, 0x2C, 0x5B, 0x67, 0x2E, 0x77, 0x2D, 0x5E, 0x66,
0x73, 0x36, 0x34, 0x2B, 0x32, 0x3E, 0x62, 0x65, 0x27, 0x27,
0x7E, 0x48, 0x54, 0x2C, 0x42, 0x32, 0x42, 0x3A, 0x40, 0x5F,
0x48, 0x40, 0x2F, 0x28, 0x41, 0x6E, 0x6F, 0x4B, 0x48, 0x37,
0x31, 0x5D, 0x79, 0x77, 0x37, 0x26, 0x3A, 0x74, 0x24, 0x56,
0x24, 0x5A, 0x63, 0x59, 0x3E, 0x64, 0x47, 0x42, 0x7C, 0x67,
0x3C, 0x37, 0x4C, 0x58, 0x43, 0x40, 0x3E, 0x39, 0x47, 0x5F,
0x3F, 0x52, 0x2B, 0x59, 0x77, 0x3D, 0x50, 0x70, 0x40, 0x57,
0x28, 0x5C, 0x3E, 0x6F, 0x2C, 0x26, 0x5A, 0x65, 0x71, 0x65,
0x76, 0x5E, 0x32, 0x33, 0x7E, 0x67, 0x45, 0x6B, 0x32, 0x42,
0x3A, 0x32, 0x71, 0x4A, 0x3E, 0x32, 0x7B, 0x76, 0x7A, 0x43,
0x5F, 0x3F, 0x49, 0x42, 0x28, 0x62, 0x29, 0x43, 0x30, 0x61,
0x2B, 0x4A, 0x28, 0x3E, 0x68, 0x47, 0x4C, 0x2E, 0x7D, 0x70,
0x23, 0x57, 0x4D, 0x31, 0x6F, 0x55, 0x51, 0x5C, 0x44, 0x61,
0x38, 0x76, 0x2F, 0x3A, 0x62, 0x36, 0x60, 0x6E, 0x21, 0x77,
0x52, 0x2C, 0x66, 0x71, 0x71, 0x33, 0x26, 0x6F, 0x25, 0x59,
0x58, 0x57, 0x75, 0x7A, 0x2A, 0x65, 0x3F, 0x78, 0x3C, 0x41,
0x67, 0x74, 0x33, 0x71, 0x66, 0x27, 0x2C, 0x5E, 0x49, 0x24,
0x6C, 0x38, 0x51, 0x25, 0x52, 0x40, 0x2A, 0x75, 0x7E, 0x5A,
0x3D, 0x6C, 0x58, 0x4F, 0x6A, 0x6C, 0x78, 0x22, 0x47, 0x60,
0x7C, 0x6B, 0x60, 0x50, 0x57, 0x41, 0x78, 0x67, 0x2C, 0x32,
0x6E, 0x74, 0x3C, 0x5C, 0x4B, 0x41, 0x2D, 0x4A, 0x6D, 0x69,
0x6E, 0x31, 0x48, 0x48, 0x54, 0x46, 0x34, 0x67, 0x22, 0x44,
0x69, 0x7D, 0x4E, 0x32, 0x67, 0x75, 0x74, 0x61, 0x6E, 0x53,
0x51, 0x5B, 0x6F, 0x7B, 0x25, 0x4B, 0x3A, 0x65, 0x6E, 0x41,
0x4B, 0x47, 0x25, 0x23, 0x70, 0x3C, 0x31, 0x51, 0x62, 0x2A,
0x56, 0x7E, 0x73, 0x76, 0x66, 0x63, 0x48, 0x3C, 0x6C, 0x29,
0x2B, 0x74, 0x35, 0x77, 0x36, 0x59, 0x5B, 0x5D, 0x79, 0x61,
0x52, 0x40, 0x77, 0x52, 0x6A, 0x36, 0x69, 0x33, 0x56, 0x3C,
0x49, 0x44, 0x54, 0x36, 0x78, 0x34, 0x59, 0x62, 0x60, 0x73,
0x22, 0x2D, 0x50, 0x7E, 0x36, 0x25, 0x68, 0x4F, 0x50, 0x73,
0x74, 0x2E, 0x57, 0x48, 0x5C, 0x35, 0x4E, 0x5D, 0x40, 0x37,
0x5C, 0x5A, 0x48, 0x35, 0x57, 0x36, 0x43, 0x62, 0x6F, 0x7B,
0x21, 0x6E, 0x5A, 0x7E, 0x6B, 0x56, 0x5E, 0x34, 0x34, 0x6A,
0x2A, 0x65, 0x4D, 0x5C, 0x69, 0x45, 0x2B, 0x7A, 0x36, 0x36,
0x69, 0x7E, 0x25, 0x36, 0x6E, 0x2C, 0x21, 0x42, 0x29, 0x41,
0x22, 0x35, 0x77, 0x2C, 0x78, 0x4E, 0x4C, 0x37, 0x4F, 0x4E,
0x5B, 0x28, 0x50, 0x48, 0x6D, 0x60, 0x67, 0x41, 0x44, 0x50,
0x73, 0x58, 0x5D, 0x7B, 0x34, 0x48, 0x4F, 0x33, 0x50, 0x4B,
0x25, 0x4B, 0x73, 0x51, 0x4B, 0x5E, 0x68, 0x71, 0x2D, 0x72,
0x31, 0x51, 0x3E, 0x73, 0x4C, 0x63, 0x23, 0x64, 0x5B, 0x2A,
0x6E, 0x2C, 0x79, 0x3F, 0x4D, 0x74, 0x7C, 0x4A, 0x34, 0x36,
0x65, 0x6F, 0x31, 0x62, 0x32, 0x21, 0x60, 0x77, 0x54, 0x49,
0x6D, 0x77, 0x78, 0x61, 0x25, 0x7E, 0x5E, 0x31, 0x49, 0x61,
0x2F, 0x22, 0x4D, 0x23, 0x48, 0x32, 0x77, 0x3A, 0x40, 0x42,
0x6A, 0x5E, 0x6E, 0x42, 0x35, 0x6E, 0x38, 0x41, 0x32, 0x30,
0x41, 0x51, 0x25, 0x5E, 0x67, 0x6A, 0x26, 0x4A, 0x61, 0x69,
0x41, 0x45, 0x72, 0x42, 0x22, 0x7C, 0x33, 0x39, 0x39, 0x5F,
0x48, 0x75, 0x60, 0x23, 0x21, 0x3C, 0x6C, 0x51, 0x76, 0x76,
0x53, 0x3F, 0x44, 0x31, 0x77, 0x3D, 0x3A, 0x63, 0x77, 0x68,
0x41, 0x31, 0x6F, 0x34, 0x3B, 0x76, 0x38, 0x6E, 0x67, 0x24,
0x39, 0x7D, 0x50, 0x41, 0x75, 0x7E, 0x41, 0x48, 0x36, 0x60,
0x6A, 0x6C, 0x66, 0x6F, 0x52, 0x47, 0x70, 0x77, 0x38, 0x57,
0x40, 0x33, 0x53, 0x3B, 0x47, 0x4B, 0x70, 0x5A, 0x6E, 0x26,
0x7A, 0x6E, 0x47, 0x62, 0x26, 0x41, 0x5F, 0x57, 0x6A, 0x29,
0x3C, 0x35, 0x6A, 0x64, 0x42, 0x34, 0x43, 0x2E, 0x64, 0x34,
0x53, 0x4C, 0x6C, 0x24, 0x3C, 0x35, 0x4B, 0x4E, 0x55, 0x3F,
0x47, 0x35, 0x2F, 0x3A, 0x4A, 0x4E, 0x50, 0x2C, 0x23, 0x4D,
0x54, 0x26, 0x41, 0x54, 0x44, 0x74, 0x23, 0x6D, 0x51, 0x4C,
0x4F, 0x62, 0x2D, 0x7B, 0x40, 0x50, 0x42, 0x68, 0x2A, 0x2C,
0x4A, 0x62, 0x67, 0x3E, 0x60, 0x2E, 0x30, 0x2B, 0x6E, 0x28,
0x5D, 0x30, 0x29, 0x4A, 0x41, 0x6F, 0x4D, 0x45, 0x5D, 0x56,
0x4D, 0x66, 0x64, 0x3F, 0x50, 0x71, 0x3F, 0x75, 0x33, 0x48,
0x64, 0x40, 0x2F, 0x33, 0x60, 0x55, 0x37, 0x2B, 0x6A, 0x2D,
0x63, 0x5A, 0x2D, 0x47, 0x78, 0x5B, 0x62, 0x5C, 0x28, 0x39,
0x4B, 0x79, 0x66, 0x3F, 0x38, 0x75, 0x2F, 0x2E, 0x40, 0x2B,
0x24, 0x3B, 0x72, 0x61, 0x5F, 0x7E, 0x4B, 0x6E, 0x3D, 0x62,
0x38, 0x48, 0x38, 0x75, 0x5C, 0x40, 0x26, 0x61, 0x2B, 0x39,
0x70, 0x2C, 0x51, 0x65, 0x6D, 0x58, 0x75, 0x42, 0x52, 0x4F,
0x3A, 0x34, 0x4D, 0x4F, 0x58, 0x78, 0x37, 0x2A, 0x3D, 0x52,
0x4D, 0x4A, 0x4B, 0x39, 0x29, 0x26, 0x5E, 0x3E, 0x2E, 0x61,
0x31, 0x54, 0x70, 0x58, 0x7D, 0x4B, 0x66, 0x5F, 0x29, 0x35,
0x74, 0x52, 0x57, 0x62, 0x4C, 0x5B, 0x7B, 0x62, 0x27, 0x28,
0x27, 0x2F, 0x47, 0x3E, 0x71, 0x4A, 0x53, 0x5C, 0x7E, 0x50,
0x31, 0x34, 0x65, 0x53, 0x53, 0x36, 0x36, 0x7D, 0x74, 0x44,
0x26, 0x52, 0x5C, 0x78, 0x2E, 0x71, 0x4F, 0x64, 0x33, 0x5F,
0x56, 0x4F, 0x59, 0x74, 0x59, 0x4B, 0x5D, 0x30, 0x65, 0x3F,
0x2B, 0x6F, 0x6D, 0x5D, 0x67, 0x79, 0x27, 0x66, 0x54, 0x54,
0x36, 0x43, 0x73, 0x33, 0x5A, 0x7D, 0x77, 0x54, 0x27, 0x2D,
0x6C, 0x74, 0x7E, 0x58, 0x62, 0x46, 0x3E, 0x38, 0x2D, 0x50,
0x52, 0x55, 0x55, 0x63, 0x31, 0x74, 0x3F, 0x63, 0x36, 0x3A,
0x6E, 0x36, 0x7E, 0x33, 0x28, 0x7B, 0x66, 0x77, 0x28, 0x6A,
0x46, 0x7B, 0x39, 0x75, 0x70, 0x48, 0x2B, 0x72, 0x50, 0x30,
0x6B, 0x57, 0x63, 0x50, 0x49, 0x43, 0x2E, 0x25, 0x42, 0x52,
0x52, 0x63, 0x5D, 0x7A, 0x5E, 0x21, 0x47, 0x2C, 0x5D, 0x2C,
0x75, 0x53, 0x69, 0x6F, 0x44, 0x28, 0x30, 0x54, 0x29, 0x64,
0x2D, 0x38, 0x6C, 0x24, 0x24, 0x6A, 0x2B, 0x34, 0x46, 0x44,
0x49, 0x6D, 0x7A, 0x72, 0x67, 0x62, 0x4C, 0x6C, 0x75, 0x3D,
0x35, 0x35, 0x6B, 0x3B, 0x7C, 0x69, 0x61, 0x61, 0x58, 0x53,
0x50, 0x46, 0x33, 0x2C, 0x3F, 0x2A, 0x4B, 0x44, 0x72, 0x3E,
0x51, 0x51, 0x5B, 0x35, 0x2B, 0x55, 0x45, 0x53, 0x77, 0x28,
0x65, 0x3C, 0x36, 0x56, 0x60, 0x28, 0x6F, 0x21, 0x5D, 0x7A,
0x62, 0x33, 0x49, 0x40, 0x5D, 0x43, 0x2E, 0x67, 0x66, 0x3C,
0x4F, 0x7C, 0x40, 0x7C, 0x3A, 0x4E, 0x56, 0x72, 0x44, 0x5C,
0x21, 0x78, 0x29, 0x41, 0x52, 0x70, 0x46, 0x27, 0x7E, 0x2A,
0x61, 0x36, 0x2D, 0x33, 0x33, 0x47, 0x50, 0x2F, 0x47, 0x5A,
0x7A, 0x6F, 0x30, 0x3D, 0x27, 0x52, 0x27, 0x24, 0x4F, 0x4E,
0x54, 0x2C, 0x79, 0x3E, 0x23, 0x39, 0x59, 0x65, 0x3F, 0x55,
0x75, 0x24, 0x6B, 0x40, 0x71, 0x4F, 0x61, 0x79, 0x3F, 0x3E,
0x35, 0x2A, 0x21, 0x76, 0x2D, 0x22, 0x6B, 0x5B, 0x55, 0x36,
0x34, 0x5E, 0x2E, 0x24, 0x23, 0x5D, 0x7E, 0x24, 0x64, 0x3D,
0x3C, 0x69, 0x64, 0x65, 0x5A, 0x6C, 0x40, 0x6A, 0x41, 0x52,
0x50, 0x7C, 0x61, 0x3D, 0x61, 0x31, 0x7B, 0x2B, 0x74, 0x3A,
0x75, 0x35, 0x4C, 0x2E, 0x79, 0x57, 0x68, 0x56, 0x57, 0x3C,
0x65, 0x4A, 0x46, 0x75, 0x70, 0x7C, 0x67, 0x7A, 0x6F, 0x36,
0x4F, 0x7A, 0x69, 0x5A, 0x2E, 0x51, 0x72, 0x28, 0x50, 0x69,
0x46, 0x2A, 0x3D, 0x24, 0x46, 0x3C, 0x35, 0x5C, 0x59, 0x3A,
0x76, 0x24, 0x64, 0x25, 0x79, 0x32, 0x49, 0x4E, 0x6C, 0x73,
0x29, 0x68, 0x2B, 0x5F, 0x55, 0x31, 0x72, 0x4B, 0x2B, 0x76,
0x38, 0x5D, 0x68, 0x72, 0x6E, 0x2C, 0x45, 0x79, 0x7C, 0x67,
0x58, 0x45, 0x2E, 0x47, 0x61, 0x69, 0x2C, 0x7E, 0x69, 0x47,
0x34, 0x2B, 0x36, 0x24, 0x39, 0x21, 0x43, 0x42, 0x48, 0x62,
0x7E, 0x59, 0x5F, 0x69, 0x66, 0x7E, 0x43, 0x5C, 0x5A, 0x35,
0x64, 0x5C, 0x55, 0x7D, 0x3B, 0x3F, 0x4A, 0x21, 0x4B, 0x2E,
0x31, 0x68, 0x4D, 0x65, 0x6E, 0x2D, 0x7B, 0x57, 0x73, 0x43,
0x76, 0x69, 0x3E, 0x6E, 0x69, 0x66, 0x31, 0x4D, 0x52, 0x58,
0x2B, 0x60, 0x48, 0x43, 0x5B, 0x79, 0x7E, 0x29, 0x3B, 0x70,
0x4E, 0x43, 0x55, 0x42, 0x53, 0x68, 0x2A, 0x53, 0x4C, 0x68,
0x46, 0x72, 0x24, 0x23, 0x44, 0x45, 0x6D, 0x41, 0x35, 0x7D,
0x37, 0x39, 0x7B, 0x7E, 0x5B, 0x74, 0x5D, 0x36, 0x74, 0x52,
0x6C, 0x68, 0x5D, 0x27, 0x3A, 0x41, 0x75, 0x3A, 0x60, 0x25,
0x57, 0x53, 0x3F, 0x76, 0x6A, 0x3B, 0x37, 0x21, 0x63, 0x7B,
0x2D, 0x56, 0x69, 0x6E, 0x2B, 0x35, 0x4E, 0x60, 0x7E, 0x51,
0x43, 0x38, 0x4C, 0x38, 0x68, 0x5A, 0x46, 0x48, 0x63, 0x72,
0x65, 0x24, 0x6E, 0x62, 0x5A, 0x25, 0x72, 0x26, 0x27, 0x39,
0x6A, 0x34, 0x6C, 0x24, 0x35, 0x2D, 0x53, 0x27, 0x74, 0x51,
0x2D, 0x73, 0x4F, 0x53, 0x47, 0x74, 0x3F, 0x6F, 0x3F, 0x32,
0x56, 0x24, 0x78, 0x35, 0x4A, 0x53, 0x31, 0x22, 0x4D, 0x76,
0x33, 0x30, 0x54, 0x60, 0x2B, 0x53, 0x66, 0x5A, 0x32, 0x74,
0x3D, 0x41, 0x5A, 0x28, 0x58, 0x57, 0x23, 0x70, 0x6F, 0x2F,
0x78, 0x57, 0x5E, 0x34, 0x6A, 0x24, 0x6E, 0x32, 0x78, 0x4B,
0x25, 0x5D, 0x74, 0x35, 0x4A, 0x61, 0x4D, 0x77, 0x6D, 0x3F,
0x22, 0x52, 0x53, 0x6D, 0x57, 0x63, 0x32, 0x2B, 0x71, 0x41,
0x49, 0x3E, 0x3A, 0x50, 0x5C, 0x3A, 0x51, 0x79, 0x59, 0x70,
0x6B, 0x50, 0x58, 0x62, 0x37, 0x43, 0x4C, 0x6C, 0x3D, 0x39,
0x2A, 0x71, 0x79, 0x69, 0x5D, 0x26, 0x43, 0x44, 0x24, 0x7D,
0x6C, 0x63, 0x51, 0x49, 0x6D, 0x6D, 0x7D, 0x4F, 0x25, 0x4E,
0x43, 0x4A, 0x60, 0x28, 0x34, 0x34, 0x56, 0x74, 0x7C, 0x77,
0x2D, 0x66, 0x21, 0x21, 0x69, 0x34, 0x5D, 0x2B, 0x4C, 0x54,
0x77, 0x34, 0x55, 0x48, 0x38, 0x61, 0x65, 0x36, 0x25, 0x7D,
0x24, 0x71, 0x38, 0x39, 0x28, 0x28, 0x4F, 0x60, 0x6D, 0x4C,
0x70, 0x5F, 0x64, 0x21, 0x5C, 0x39, 0x52, 0x53, 0x26, 0x7B,
0x66, 0x76, 0x54, 0x23, 0x6F, 0x3D, 0x30, 0x45, 0x23, 0x25,
0x60, 0x6F, 0x61, 0x59, 0x55, 0x4B, 0x39, 0x37, 0x4E, 0x5B,
0x53, 0x75, 0x4A, 0x23, 0x78, 0x73, 0x49, 0x48, 0x78, 0x4A,
0x3B, 0x32, 0x58, 0x76, 0x77, 0x34, 0x47, 0x4C, 0x45, 0x47,
0x69, 0x51, 0x7E, 0x40, 0x36, 0x2A, 0x66, 0x5D, 0x75, 0x34,
0x54, 0x3E, 0x35, 0x49, 0x29, 0x3C, 0x50, 0x3C, 0x44, 0x5B,
0x3D, 0x25, 0x29, 0x6A, 0x23, 0x57, 0x67, 0x41, 0x3D, 0x6C,
0x62, 0x4A, 0x26, 0x3F, 0x76, 0x6D, 0x3B, 0x36, 0x26, 0x3D,
0x2F, 0x31, 0x34, 0x54, 0x45, 0x39, 0x7E, 0x52, 0x27, 0x65,
0x54, 0x51, 0x7A, 0x53, 0x48, 0x5B, 0x45, 0x30, 0x59, 0x4D,
0x68, 0x2C, 0x5E, 0x3C, 0x24, 0x40, 0x5E, 0x57, 0x65, 0x7D,
0x43, 0x21, 0x57, 0x65, 0x35, 0x32, 0x58, 0x3E, 0x56, 0x75,
0x23, 0x7E, 0x6B, 0x3A, 0x5E, 0x6E, 0x46, 0x53, 0x4E, 0x71,
0x4D, 0x74, 0x22, 0x2F, 0x2F, 0x34, 0x45, 0x42, 0x58, 0x42,
0x62, 0x5B, 0x7B, 0x62, 0x27, 0x2D, 0x68, 0x5F, 0x46, 0x6F,
0x21, 0x3E, 0x79, 0x63, 0x62, 0x4F, 0x5A, 0x34, 0x5F, 0x3C,
0x31, 0x33, 0x75, 0x49, 0x77, 0x2B, 0x77, 0x29, 0x7B, 0x54,
0x27, 0x3A, 0x7B, 0x3E, 0x76, 0x78, 0x2E, 0x40, 0x3F, 0x3D,
0x26, 0x3C, 0x63, 0x7B, 0x2D, 0x74, 0x47, 0x60, 0x2E, 0x75,
0x51, 0x22, 0x2E, 0x6A, 0x6C, 0x67, 0x37, 0x6B, 0x2F, 0x61,
0x59, 0x74, 0x5D, 0x60, 0x50, 0x72, 0x42, 0x57, 0x58, 0x4E,
0x79, 0x4E, 0x24, 0x41, 0x60, 0x4A, 0x77, 0x7E, 0x3A, 0x3E,
0x49, 0x3B, 0x7B, 0x4B, 0x53, 0x5D, 0x2C, 0x7D, 0x69, 0x3A,
0x70, 0x25, 0x7B, 0x76, 0x42, 0x2E, 0x41, 0x6C, 0x5F, 0x4C,
0x2D, 0x62, 0x36, 0x7A, 0x63, 0x5E, 0x55, 0x3E, 0x3C, 0x3B,
0x76, 0x6A, 0x71, 0x59, 0x41, 0x61, 0x3F, 0x3C, 0x41, 0x5A,
0x56, 0x53, 0x4A, 0x38, 0x3C, 0x66, 0x50, 0x41, 0x6B, 0x3A,
0x54, 0x24, 0x7D, 0x4B, 0x58, 0x5E, 0x29, 0x68, 0x30, 0x54,
0x5F, 0x33, 0x4B, 0x76, 0x59, 0x2C, 0x5C, 0x62, 0x38, 0x3C,
0x7D, 0x3C, 0x44, 0x61, 0x4F, 0x4E, 0x3A, 0x4F, 0x38, 0x4D,
0x52, 0x5E, 0x42, 0x23, 0x5F, 0x34, 0x33, 0x2B, 0x21, 0x4D,
0x24, 0x4F, 0x67, 0x77, 0x7B, 0x52, 0x44, 0x44, 0x58, 0x37,
0x44, 0x6A, 0x4E, 0x3D, 0x73, 0x49, 0x51, 0x25, 0x3C, 0x65,
0x5A, 0x38, 0x26, 0x64, 0x7D, 0x55, 0x49, 0x60, 0x61, 0x46,
0x45, 0x23, 0x52, 0x28, 0x58, 0x57, 0x3C, 0x21, 0x27, 0x5B,
0x2A, 0x6B, 0x2A, 0x2B, 0x48, 0x38, 0x78, 0x47, 0x31, 0x3C,
0x7C, 0x2F, 0x36, 0x34, 0x3A, 0x2F, 0x24, 0x78, 0x7C, 0x65,
0x63, 0x7A, 0x26, 0x23, 0x40, 0x7C, 0x25, 0x28, 0x61, 0x7B,
0x33, 0x50, 0x4F, 0x30, 0x22, 0x23, 0x5B, 0x39, 0x5F, 0x4A,
0x60, 0x65, 0x34, 0x4C, 0x62, 0x23, 0x2F, 0x42, 0x39, 0x74,
0x71, 0x4B, 0x74, 0x51, 0x5C, 0x27, 0x5B, 0x66, 0x3E, 0x40,
0x25, 0x35, 0x21, 0x4D, 0x5E, 0x4C, 0x33, 0x72, 0x72, 0x6D,
0x64, 0x5E, 0x27, 0x4D, 0x5A, 0x3C, 0x56, 0x62, 0x55, 0x27,
0x3D, 0x62, 0x7C, 0x2A, 0x4E, 0x52, 0x60, 0x6C, 0x3F, 0x5A,
0x46, 0x2A, 0x73, 0x49, 0x5E, 0x69, 0x5C, 0x3C, 0x42, 0x55,
0x44, 0x7B, 0x33, 0x21, 0x75, 0x26, 0x68, 0x53, 0x22, 0x56,
0x7D, 0x49, 0x51, 0x48, 0x42, 0x78, 0x5A, 0x74, 0x58, 0x55,
0x63, 0x6A, 0x75, 0x4D, 0x37, 0x59, 0x7B, 0x2D, 0x26, 0x39,
0x34, 0x62, 0x73, 0x44, 0x5A, 0x2F, 0x5B, 0x31, 0x21, 0x43,
0x27, 0x22, 0x69, 0x34, 0x3D, 0x57, 0x36, 0x40, 0x3E, 0x36,
0x45, 0x4D, 0x7A, 0x52, 0x64, 0x6F, 0x65, 0x2E, 0x7A, 0x55,
0x7D, 0x26, 0x7E, 0x4F, 0x35, 0x2C, 0x25, 0x50, 0x62, 0x39,
0x6E, 0x6C, 0x7B, 0x7E, 0x5E, 0x7A, 0x52, 0x7E, 0x42, 0x77,
0x45, 0x48, 0x22, 0x61, 0x7A, 0x6B, 0x72, 0x21, 0x35, 0x3A,
0x35, 0x5D, 0x24, 0x69, 0x23, 0x56, 0x59, 0x5C, 0x5F, 0x4A,
0x72, 0x32, 0x4C, 0x53, 0x34, 0x49, 0x5E, 0x39, 0x71, 0x38,
0x29, 0x5C, 0x23, 0x6D, 0x44, 0x3A, 0x55, 0x4C, 0x2A, 0x5F,
0x54, 0x6A, 0x53, 0x30, 0x7B, 0x49, 0x34, 0x2F, 0x74, 0x61,
0x3E, 0x29, 0x67, 0x35, 0x76, 0x2C, 0x6F, 0x32, 0x4C, 0x2A,
0x63, 0x2E, 0x31, 0x6F, 0x56, 0x31, 0x21, 0x29, 0x61, 0x3A,
0x70, 0x4B, 0x26, 0x44, 0x79, 0x22, 0x6B, 0x22, 0x36, 0x72,
0x2F, 0x36, 0x59, 0x56, 0x67, 0x57, 0x3C, 0x39, 0x59, 0x40,
0x64, 0x7E, 0x5D, 0x2C, 0x72, 0x66, 0x52, 0x64, 0x2B, 0x3B,
0x4B, 0x6E, 0x2A, 0x4F, 0x63, 0x4C, 0x55, 0x33, 0x70, 0x4C,
0x55, 0x43, 0x28, 0x69, 0x69, 0x38, 0x69, 0x69, 0x5C, 0x7D,
0x30, 0x34, 0x6D, 0x39, 0x78, 0x6B, 0x6C, 0x48, 0x39, 0x44,
0x56, 0x51, 0x6C, 0x5D, 0x62, 0x6F, 0x67, 0x70, 0x4B, 0x2C,
0x37, 0x6E, 0x71, 0x77, 0x75, 0x74, 0x38, 0x42, 0x26, 0x33,
0x2B, 0x7A, 0x22, 0x2C, 0x35, 0x42, 0x67, 0x3A, 0x32, 0x2F,
0x35, 0x35, 0x49, 0x2D, 0x2A, 0x44, 0x67, 0x22, 0x5C, 0x52,
0x4C, 0x56, 0x78, 0x21, 0x35, 0x57, 0x7C, 0x47, 0x6F, 0x54,
0x6F, 0x6F, 0x60, 0x7E, 0x2B, 0x46, 0x33, 0x40, 0x21, 0x51,
0x45, 0x36, 0x63, 0x48, 0x7C, 0x78, 0x33, 0x4C, 0x3D, 0x42,
0x68, 0x26, 0x77, 0x7E, 0x6B, 0x3F, 0x72, 0x28, 0x3E, 0x7D,
0x6C, 0x59, 0x56, 0x45, 0x40, 0x38, 0x26, 0x74, 0x7A, 0x3C,
0x33, 0x60, 0x41, 0x74, 0x35, 0x28, 0x6E, 0x6E, 0x7B, 0x61,
0x31, 0x3F, 0x62, 0x6E, 0x76, 0x6B, 0x54, 0x39, 0x48, 0x29,
0x61, 0x25, 0x22, 0x3A, 0x22, 0x35, 0x66, 0x74, 0x49, 0x4C,
0x58, 0x39, 0x28, 0x24, 0x3E, 0x2F, 0x71, 0x59, 0x53, 0x69,
0x78, 0x70, 0x5A, 0x3D, 0x6A, 0x48, 0x2A, 0x67, 0x41, 0x39,
0x3B, 0x7C, 0x3D, 0x51, 0x70, 0x2C, 0x66, 0x2A, 0x56, 0x5F,
0x73, 0x5C, 0x6A, 0x60, 0x2C, 0x41, 0x28, 0x27, 0x34, 0x73,
0x7B, 0x5D, 0x74, 0x2C, 0x47, 0x65, 0x62, 0x3B, 0x66, 0x6C,
0x31, 0x4E, 0x26, 0x72, 0x2D, 0x47, 0x58, 0x42, 0x7E, 0x4E,
0x5E, 0x30, 0x43, 0x30, 0x3D, 0x4E, 0x60, 0x53, 0x32, 0x44,
0x5F, 0x4A, 0x71, 0x70, 0x74, 0x5B, 0x75, 0x48, 0x53, 0x2F,
0x73, 0x48, 0x69, 0x28, 0x24, 0x41, 0x57, 0x58, 0x73, 0x77,
0x32, 0x53, 0x66, 0x73, 0x5D, 0x5C, 0x5B, 0x6C, 0x22, 0x5C,
0x78, 0x55, 0x31, 0x62, 0x6A, 0x7D, 0x21, 0x2C, 0x49, 0x2A,
0x7A, 0x6D, 0x64, 0x43, 0x6A, 0x63, 0x48, 0x5C, 0x45, 0x40,
0x7C, 0x31, 0x51, 0x6D, 0x77, 0x42, 0x7B, 0x28, 0x64, 0x64,
0x47, 0x41, 0x6E, 0x37, 0x2E, 0x38, 0x5D, 0x6F, 0x21, 0x65,
0x59, 0x6C, 0x68, 0x28, 0x6F, 0x65, 0x54, 0x76, 0x37, 0x33,
0x59, 0x7B, 0x55, 0x52, 0x4E, 0x41, 0x43, 0x40, 0x45, 0x74,
0x60, 0x38, 0x2D, 0x4D, 0x68, 0x62, 0x52, 0x5B, 0x64, 0x53,
0x78, 0x24, 0x3F, 0x4A, 0x75, 0x2B, 0x5E, 0x69, 0x5D, 0x54,
0x27, 0x57, 0x72, 0x66, 0x7A, 0x27, 0x35, 0x7C, 0x75, 0x73,
0x28, 0x61, 0x75, 0x49, 0x40, 0x44, 0x26, 0x6B, 0x7E, 0x40,
0x73, 0x3A, 0x21, 0x2E, 0x6C, 0x3D, 0x29, 0x48, 0x69, 0x30,
0x3E, 0x2C, 0x46, 0x4D, 0x25, 0x45, 0x64, 0x6C, 0x45, 0x4B,
0x2E, 0x3F, 0x78, 0x44, 0x48, 0x23, 0x7A, 0x5F, 0x4B, 0x36,
0x4F, 0x5E, 0x2F, 0x4A, 0x27, 0x6D, 0x46, 0x64, 0x6C, 0x78,
0x6D, 0x53, 0x59, 0x7C, 0x36, 0x46, 0x77, 0x53, 0x3A, 0x6C,
0x57, 0x79, 0x55, 0x7D, 0x4D, 0x72, 0x33, 0x73, 0x5E, 0x69,
0x7D, 0x6F, 0x53, 0x7E, 0x78, 0x78, 0x70, 0x7C, 0x4E, 0x4D,
0x27, 0x49, 0x26, 0x38, 0x7C, 0x55, 0x28, 0x67, 0x28, 0x24,
0x55, 0x4E, 0x36, 0x4F, 0x7D, 0x75, 0x65, 0x6B, 0x57, 0x64,
0x46, 0x22, 0x3A, 0x2E, 0x61, 0x4C, 0x3C, 0x6D, 0x2E, 0x74,
0x27, 0x5C, 0x7D, 0x57, 0x63, 0x5C, 0x3A, 0x57, 0x55, 0x57,
0x71, 0x67, 0x32, 0x38, 0x27, 0x5A, 0x63, 0x2F, 0x70, 0x33,
0x41, 0x32, 0x77, 0x60, 0x73, 0x55, 0x22, 0x29, 0x2A, 0x41,
0x6D, 0x57, 0x72, 0x5D, 0x4C, 0x4F, 0x46, 0x6B, 0x79, 0x4A,
0x28, 0x5A, 0x3B, 0x5A, 0x26, 0x73, 0x6A, 0x3D, 0x7C, 0x74,
0x27, 0x72, 0x30, 0x72, 0x2D, 0x41, 0x75, 0x58, 0x50, 0x2A,
0x4B, 0x29, 0x53, 0x5D, 0x26, 0x7C, 0x25, 0x6A, 0x3F, 0x30,
0x35, 0x51, 0x58, 0x31, 0x3A, 0x6F, 0x4E, 0x25, 0x42, 0x7E,
0x2D, 0x6A, 0x3E, 0x75, 0x30, 0x5F, 0x4B, 0x26, 0x58, 0x73,
0x29, 0x7B, 0x32, 0x73, 0x5E, 0x3D, 0x4B, 0x66, 0x27, 0x6C,
0x7B, 0x5D, 0x35, 0x31, 0x53, 0x65, 0x44, 0x49, 0x22, 0x57,
0x60, 0x35, 0x3A, 0x4E, 0x26, 0x70, 0x51, 0x3D, 0x72, 0x5B,
0x2F, 0x31, 0x41, 0x5F, 0x4C, 0x23, 0x6E, 0x7C, 0x46, 0x7D,
0x5E, 0x29, 0x48, 0x58, 0x3C, 0x47, 0x38, 0x58, 0x51, 0x6E,
0x27, 0x79, 0x2F, 0x65, 0x33, 0x4D, 0x6A, 0x3E, 0x3C, 0x3A,
0x5F, 0x55, 0x3A, 0x60, 0x7D, 0x27, 0x6E, 0x46, 0x22, 0x4D,
0x7D, 0x5D, 0x36, 0x47, 0x6B, 0x22, 0x7E, 0x53, 0x23, 0x55,
0x52, 0x28, 0x79, 0x7D, 0x65, 0x3C, 0x7B, 0x28, 0x7D, 0x5A,
0x73, 0x40, 0x51, 0x34, 0x39, 0x3F, 0x49, 0x27, 0x40, 0x45,
0x42, 0x35, 0x75, 0x79, 0x3B, 0x4E, 0x62, 0x35, 0x67, 0x23,
0x35, 0x5C, 0x30, 0x7C, 0x43, 0x3C, 0x6F, 0x24, 0x36, 0x7D,
0x60, 0x54, 0x2D, 0x57, 0x49, 0x59, 0x71, 0x35, 0x21, 0x56,
0x69, 0x7E, 0x2B, 0x4C, 0x39, 0x71, 0x37, 0x67, 0x6A, 0x6F,
0x5E, 0x64, 0x58, 0x45, 0x6F, 0x3F, 0x69, 0x49, 0x50, 0x39,
0x57, 0x50, 0x6A, 0x5F, 0x44, 0x74, 0x41, 0x44, 0x69, 0x54,
0x34, 0x4D, 0x2B, 0x39, 0x32, 0x63, 0x59, 0x23, 0x55, 0x6C,
0x79, 0x77, 0x2B, 0x57, 0x40, 0x37, 0x27, 0x6F, 0x5D, 0x79,
0x52, 0x50, 0x73, 0x74, 0x74, 0x7C, 0x26, 0x6D, 0x57, 0x69,
0x6F, 0x4A, 0x2F, 0x21, 0x77, 0x41, 0x3E, 0x40, 0x26, 0x49,
0x32, 0x2E, 0x68, 0x7D, 0x65, 0x45, 0x34, 0x21, 0x29, 0x58,
0x48, 0x6A, 0x7E, 0x7E, 0x7D, 0x68, 0x22, 0x78, 0x48, 0x73,
0x48, 0x2B, 0x5E, 0x2F, 0x3E, 0x3F, 0x50, 0x66, 0x33, 0x3F,
0x7A, 0x69, 0x2E, 0x7E, 0x36, 0x4E, 0x5F, 0x41, 0x23, 0x3B,
0x75, 0x32, 0x7C, 0x60, 0x31, 0x79, 0x3C, 0x2A, 0x54, 0x65,
0x70, 0x29, 0x51, 0x2E, 0x64, 0x4B, 0x51, 0x25, 0x60, 0x7E,
0x75, 0x2C, 0x7E, 0x21, 0x71, 0x70, 0x32, 0x59, 0x7D, 0x65,
0x4E, 0x50, 0x29, 0x79, 0x5F, 0x76, 0x71, 0x26, 0x7C, 0x5A,
0x5D, 0x48, 0x36, 0x75, 0x5F, 0x7A, 0x48, 0x7B, 0x79, 0x47,
0x56, 0x65, 0x2E, 0x34, 0x58, 0x4D, 0x3B, 0x5F, 0x52, 0x66,
0x69, 0x49, 0x38, 0x58, 0x4C, 0x69, 0x7B, 0x23, 0x4D, 0x41,
0x60, 0x4C, 0x52, 0x49, 0x66, 0x7A, 0x56, 0x70, 0x6D, 0x54,
0x26, 0x2C, 0x3D, 0x24, 0x6B, 0x73, 0x6B, 0x2F, 0x29, 0x7C,
0x36, 0x5D, 0x4F, 0x38, 0x4E, 0x77, 0x5C, 0x61, 0x47, 0x36,
0x3D, 0x24, 0x41, 0x37, 0x5B, 0x79, 0x73, 0x25, 0x3C, 0x7B,
0x37, 0x62, 0x44, 0x51, 0x2B, 0x5D, 0x6A, 0x69, 0x50, 0x63,
0x4B, 0x48, 0x70, 0x56, 0x27, 0x32, 0x66, 0x40, 0x46, 0x40,
0x79, 0x6B, 0x50, 0x31, 0x30, 0x3F, 0x4D, 0x5D, 0x22, 0x57,
0x68, 0x41, 0x50, 0x5E, 0x7C, 0x44, 0x7D, 0x62, 0x46, 0x74,
0x52, 0x47, 0x3C, 0x47, 0x40, 0x34, 0x6D, 0x68, 0x4E, 0x2B,
0x25, 0x3E, 0x54, 0x6B, 0x21, 0x21, 0x75, 0x2A, 0x5C, 0x45,
0x39, 0x52, 0x43, 0x54, 0x55, 0x7C, 0x6D, 0x70, 0x49, 0x4F,
0x63, 0x50, 0x38, 0x3E, 0x71, 0x6D, 0x49, 0x3D, 0x45, 0x6F,
0x47, 0x7B, 0x2B, 0x54, 0x29, 0x57, 0x25, 0x33, 0x4C, 0x68,
0x4E, 0x5F, 0x5F, 0x56, 0x58, 0x25, 0x31, 0x5E, 0x49, 0x72,
0x79, 0x72, 0x7C, 0x7E, 0x24, 0x52, 0x7C, 0x74, 0x49, 0x7B,
0x76, 0x25, 0x22, 0x27, 0x59, 0x53, 0x5B, 0x3B, 0x3F, 0x6B,
0x69, 0x60, 0x54, 0x28, 0x5C, 0x21, 0x71, 0x49, 0x6F, 0x3D,
0x5C, 0x73, 0x44, 0x69, 0x54, 0x71, 0x7A, 0x5B, 0x57, 0x55,
0x75, 0x62, 0x24, 0x3F, 0x2E, 0x54, 0x40, 0x74, 0x45, 0x4F,
0x5D, 0x77, 0x37, 0x58, 0x5C, 0x4F, 0x40, 0x3A, 0x6A, 0x6C,
0x34, 0x72, 0x66, 0x6C, 0x4D, 0x26, 0x47, 0x54, 0x73, 0x71,
0x26, 0x5C, 0x4F, 0x64, 0x6F, 0x3F, 0x35, 0x56, 0x7D, 0x5B,
0x4F, 0x58, 0x40, 0x50, 0x4A, 0x2B, 0x43, 0x4E, 0x4F, 0x6B,
0x6B, 0x48, 0x79, 0x3E, 0x68, 0x6B, 0x53, 0x68, 0x4F, 0x24,
0x5B, 0x6C, 0x7B, 0x3C, 0x4D, 0x71, 0x79, 0x60, 0x72, 0x32,
0x36, 0x6A, 0x2B, 0x74, 0x49, 0x6F, 0x54, 0x23, 0x3F, 0x6D,
0x25, 0x39, 0x2B, 0x33, 0x22, 0x7E, 0x3F, 0x74, 0x47, 0x2D,
0x55, 0x28, 0x5D, 0x33, 0x45, 0x7C, 0x3A, 0x51, 0x6C, 0x5A,
0x48, 0x2A, 0x75, 0x53, 0x7B, 0x5A, 0x74, 0x69, 0x27, 0x71,
0x2E, 0x79, 0x5F, 0x35, 0x36, 0x5A, 0x3D, 0x4E, 0x24, 0x7A,
0x3C, 0x64, 0x2E, 0x49, 0x44, 0x62, 0x37, 0x4E, 0x45, 0x35,
0x27, 0x2D, 0x5C, 0x66, 0x2D, 0x38, 0x5E, 0x4F, 0x2E, 0x65,
0x60, 0x2E, 0x6F, 0x4E, 0x4A, 0x5A, 0x2C, 0x70, 0x45, 0x5A,
0x45, 0x43, 0x6B, 0x3F, 0x59, 0x3B, 0x78, 0x5D, 0x6C, 0x66,
0x71, 0x22, 0x27, 0x6E, 0x27, 0x36, 0x2D, 0x5B, 0x23, 0x4D,
0x48, 0x32, 0x33, 0x32, 0x4A, 0x78, 0x38, 0x73, 0x37, 0x6B,
0x72, 0x4E, 0x3B, 0x76, 0x4E, 0x77, 0x40, 0x48, 0x40, 0x40,
0x50, 0x42, 0x26, 0x68, 0x5C, 0x70, 0x5D, 0x47, 0x3F, 0x34,
0x5B, 0x53, 0x34, 0x66, 0x73, 0x43, 0x6D, 0x5C, 0x79, 0x6E,
0x47, 0x2B, 0x6E, 0x7A, 0x24, 0x3D, 0x74, 0x42, 0x34, 0x4B,
0x3C, 0x3F, 0x7B, 0x4E, 0x35, 0x6C, 0x56, 0x7A, 0x32, 0x73,
0x4F, 0x5B, 0x51, 0x69, 0x43, 0x38, 0x6B, 0x28, 0x45, 0x74,
0x32, 0x3C, 0x29, 0x2E, 0x31, 0x5D, 0x74, 0x45, 0x44, 0x22,
0x31, 0x40, 0x44, 0x4E, 0x38, 0x71, 0x5D, 0x79, 0x58, 0x5C,
0x46, 0x42, 0x5D, 0x5A, 0x22, 0x4F, 0x2A, 0x48, 0x39, 0x7A,
0x4D, 0x62, 0x69, 0x50, 0x23, 0x75, 0x25, 0x40, 0x65, 0x7B,
0x2E, 0x3B, 0x74, 0x78, 0x71, 0x37, 0x4C, 0x7B, 0x79, 0x30,
0x3D, 0x44, 0x46, 0x4B, 0x40, 0x40, 0x38, 0x7D, 0x3C, 0x49,
0x2B, 0x47, 0x63, 0x64, 0x4C, 0x31, 0x67, 0x26, 0x5B, 0x35,
0x4D, 0x38, 0x62, 0x3D, 0x21, 0x56, 0x78, 0x2F, 0x4F, 0x63,
0x59, 0x5A, 0x35, 0x50, 0x3D, 0x5B, 0x52, 0x53, 0x68, 0x60,
0x28, 0x44, 0x68, 0x22, 0x56, 0x26, 0x73, 0x56, 0x79, 0x76,
0x3E, 0x5A, 0x66, 0x69, 0x46, 0x57, 0x45, 0x71, 0x77, 0x34,
0x3E, 0x5C, 0x6D, 0x61, 0x3E, 0x55, 0x6D, 0x4A, 0x75, 0x34,
0x6B, 0x42, 0x43, 0x43, 0x78, 0x62, 0x59, 0x23, 0x44, 0x5D,
0x74, 0x3C, 0x33, 0x57, 0x2A, 0x66, 0x76, 0x4A, 0x4D, 0x38,
0x39, 0x32, 0x3B, 0x7D, 0x6F, 0x22, 0x49, 0x56, 0x2E, 0x2D,
0x5B, 0x71, 0x75, 0x6A, 0x41, 0x3C, 0x38, 0x65, 0x2E, 0x23,
0x25, 0x26, 0x47, 0x61, 0x39, 0x65, 0x3F, 0x6F, 0x66, 0x6B,
0x7D, 0x48, 0x77, 0x42, 0x52, 0x66, 0x5E, 0x63, 0x28, 0x72,
0x7A, 0x32, 0x62, 0x6E, 0x39, 0x66, 0x5D, 0x65, 0x22, 0x27,
0x4C, 0x6C, 0x6C, 0x29, 0x26, 0x68, 0x71, 0x71, 0x55, 0x60,
0x55, 0x5A, 0x7C, 0x45, 0x4A, 0x5E, 0x33, 0x7B, 0x2C, 0x46,
0x39, 0x6F, 0x7D, 0x65, 0x44, 0x73, 0x38, 0x23, 0x55, 0x78,
0x36, 0x24, 0x29, 0x23, 0x2F, 0x53, 0x70, 0x3B, 0x21, 0x49,
0x21, 0x67, 0x33, 0x46, 0x38, 0x2B, 0x34, 0x31, 0x2F, 0x28,
0x6E, 0x27, 0x62, 0x6A, 0x7C, 0x78, 0x60, 0x4E, 0x7D, 0x6D,
0x49, 0x36, 0x59, 0x37, 0x2F, 0x32, 0x33, 0x60, 0x76, 0x2B,
0x52, 0x7D, 0x5A, 0x74, 0x5C, 0x6A, 0x54, 0x47, 0x51, 0x76,
0x70, 0x4B, 0x46, 0x77, 0x25, 0x32, 0x7E, 0x7B, 0x53, 0x75,
0x63, 0x45, 0x6E, 0x62, 0x4B, 0x6B, 0x61, 0x46, 0x7C, 0x36,
0x77, 0x5A, 0x7A, 0x27, 0x41, 0x3F, 0x6C, 0x5F, 0x6F, 0x44,
0x6B, 0x36, 0x3B, 0x6C, 0x3F, 0x49, 0x24, 0x6C, 0x53, 0x24,
0x30, 0x79, 0x27, 0x4B, 0x6B, 0x66, 0x69, 0x70, 0x67, 0x34,
0x2E, 0x31, 0x6B, 0x5B, 0x63, 0x27, 0x5B, 0x52, 0x78, 0x3F,
0x3C, 0x2F, 0x53, 0x74, 0x2B, 0x5C, 0x7D, 0x22, 0x38, 0x6C,
0x28, 0x2C, 0x22, 0x71, 0x2C, 0x64, 0x4A, 0x36, 0x30, 0x68,
0x59, 0x71, 0x38, 0x7C, 0x3C, 0x61, 0x64, 0x72, 0x78, 0x55,
0x5D, 0x2E, 0x3D, 0x79, 0x4E, 0x66, 0x3E, 0x78, 0x28, 0x2D,
0x3E, 0x5F, 0x65, 0x64, 0x31, 0x59, 0x24, 0x4F, 0x42, 0x6B,
0x47, 0x67, 0x24, 0x2A, 0x3C, 0x61, 0x25, 0x23, 0x5A, 0x42,
0x3C, 0x27, 0x4A, 0x3F, 0x41, 0x74, 0x2A, 0x27, 0x7B, 0x54,
0x6A, 0x5C, 0x24, 0x42, 0x25, 0x62, 0x54, 0x7E, 0x2C, 0x45,
0x45, 0x26, 0x77, 0x71, 0x3A, 0x3B, 0x55, 0x6F, 0x62, 0x26,
0x46, 0x5E, 0x6D, 0x39, 0x2E, 0x4E, 0x43, 0x3F, 0x67, 0x35,
0x4E, 0x64, 0x5D, 0x4F, 0x39, 0x56, 0x2A, 0x6F, 0x57, 0x3F,
0x6C, 0x43, 0x53, 0x7C, 0x5E, 0x65, 0x21, 0x51, 0x27, 0x62,
0x22, 0x58, 0x77, 0x7D, 0x67, 0x51, 0x2C, 0x70, 0x75, 0x35,
0x50, 0x5C, 0x56, 0x71, 0x5A, 0x72, 0x54, 0x6D, 0x3A, 0x2D,
0x56, 0x45, 0x66, 0x4A, 0x44, 0x40, 0x64, 0x3C, 0x7C, 0x79,
0x6F, 0x46, 0x35, 0x72, 0x6D, 0x7A, 0x58, 0x70, 0x23, 0x71,
0x7B, 0x36, 0x69, 0x58, 0x71, 0x6F, 0x40, 0x52, 0x32, 0x7E,
0x67, 0x38, 0x28, 0x5C, 0x68, 0x6B, 0x68, 0x45, 0x7E, 0x57,
0x73, 0x70, 0x3E, 0x66, 0x77, 0x6D, 0x4E, 0x75, 0x4A, 0x54,
0x40, 0x7D, 0x42, 0x48, 0x57, 0x69, 0x41, 0x6D, 0x25, 0x27,
0x24, 0x39, 0x4E, 0x27, 0x4F, 0x22, 0x2F, 0x7D, 0x5A, 0x2E,
0x62, 0x4D, 0x74, 0x42, 0x41, 0x5F, 0x72, 0x65, 0x29, 0x54,
0x6A, 0x33, 0x46, 0x3E, 0x4D, 0x26, 0x74, 0x55, 0x3C, 0x62,
0x79, 0x45, 0x66, 0x40, 0x2C, 0x72, 0x49, 0x3F, 0x57, 0x23,
0x53, 0x38, 0x35, 0x69, 0x28, 0x55, 0x32, 0x7C, 0x45, 0x69,
0x7D, 0x55, 0x37, 0x4E, 0x76, 0x34, 0x66, 0x6F, 0x67, 0x30,
0x6A, 0x5A, 0x5B, 0x3F, 0x38, 0x7E, 0x6A, 0x24, 0x63, 0x70,
0x70, 0x33, 0x3C, 0x29, 0x5A, 0x4A, 0x4C, 0x2A, 0x31, 0x4A,
0x5A, 0x65, 0x53, 0x41, 0x2D, 0x4A, 0x6E, 0x55, 0x3F, 0x75,
0x2D, 0x22, 0x30, 0x2A, 0x79, 0x56, 0x6B, 0x55, 0x55, 0x72,
0x5C, 0x55, 0x4F, 0x72, 0x2F, 0x54, 0x59, 0x2A, 0x51, 0x48,
0x22, 0x7B, 0x6E, 0x29, 0x68, 0x42, 0x41, 0x72, 0x61, 0x3F,
0x27, 0x72, 0x72, 0x64, 0x27, 0x45, 0x3D, 0x31, 0x78, 0x3C,
0x79, 0x6F, 0x7C, 0x5B, 0x33, 0x2D, 0x54, 0x46, 0x33, 0x66,
0x53, 0x55, 0x69, 0x50, 0x55, 0x39, 0x4D, 0x5E, 0x43, 0x44,
0x6E, 0x33, 0x72, 0x49, 0x3E, 0x79, 0x32, 0x31, 0x74, 0x27,
0x3D, 0x61, 0x40, 0x66, 0x79, 0x23, 0x3C, 0x42, 0x6A, 0x56,
0x62, 0x2D, 0x72, 0x37, 0x66, 0x7E, 0x61, 0x34, 0x5B, 0x37,
0x23, 0x55, 0x62, 0x34, 0x4F, 0x7D, 0x57, 0x2D, 0x69, 0x57,
0x2C, 0x57, 0x46, 0x21, 0x7D, 0x3F, 0x4B, 0x5A, 0x51, 0x7D,
0x46, 0x42, 0x21, 0x33, 0x21, 0x33, 0x2C, 0x70, 0x59, 0x3B,
0x42, 0x37, 0x36, 0x3F, 0x6D, 0x78, 0x54, 0x66, 0x50, 0x78,
0x7E, 0x60, 0x5D, 0x49, 0x6F, 0x61, 0x2F, 0x40, 0x37, 0x7C,
0x38, 0x27, 0x3A, 0x62, 0x59, 0x7C, 0x4B, 0x73, 0x66, 0x77,
0x2C, 0x58, 0x45, 0x45, 0x3F, 0x30, 0x2E, 0x33, 0x21, 0x4F,
0x44, 0x7A, 0x35, 0x2E, 0x4C, 0x5B, 0x73, 0x47, 0x7C, 0x36,
0x2A, 0x4A, 0x46, 0x64, 0x35, 0x31, 0x67, 0x24, 0x61, 0x24,
0x57, 0x78, 0x63, 0x42, 0x69, 0x2F, 0x48, 0x62, 0x48, 0x37,
0x58, 0x4F, 0x55, 0x4F, 0x4A, 0x67, 0x24, 0x3F, 0x67, 0x32,
0x36, 0x56, 0x79, 0x3A, 0x69, 0x5C, 0x3F, 0x4C, 0x53, 0x6D,
0x40, 0x50, 0x71, 0x7A, 0x5B, 0x27, 0x5C, 0x63, 0x71, 0x2B,
0x63, 0x6C, 0x4B, 0x22, 0x75, 0x44, 0x31, 0x5E, 0x65, 0x6F,
0x49, 0x3D, 0x46, 0x70, 0x48, 0x61, 0x61, 0x21, 0x29, 0x72,
0x40, 0x22, 0x62, 0x5F, 0x65, 0x5F, 0x3B, 0x76, 0x3A, 0x63,
0x36, 0x4B, 0x46, 0x4B, 0x62, 0x2D, 0x40, 0x43, 0x22, 0x3B,
0x2A, 0x63, 0x45, 0x3D, 0x63, 0x32, 0x64, 0x64, 0x46, 0x7D,
0x3F, 0x4D, 0x33, 0x3A, 0x7B, 0x6F, 0x43, 0x3F, 0x51, 0x45,
0x6F, 0x65, 0x41, 0x37, 0x65, 0x64, 0x72, 0x25, 0x3C, 0x76,
0x33, 0x33, 0x75, 0x5D, 0x6D, 0x21, 0x25, 0x32, 0x42, 0x65,
0x7C, 0x6B, 0x25, 0x3D, 0x52, 0x7E, 0x21, 0x7D, 0x6F, 0x4D,
0x5C, 0x4A, 0x3F, 0x7E, 0x61, 0x6F, 0x37, 0x7E, 0x6E, 0x52,
0x26, 0x30, 0x37, 0x27, 0x5F, 0x75, 0x33, 0x46, 0x72, 0x40,
0x29, 0x5B, 0x3C, 0x28, 0x57, 0x35, 0x43, 0x6B, 0x4E, 0x63,
0x44, 0x76, 0x54, 0x64, 0x68, 0x76, 0x6F, 0x47, 0x7C, 0x26,
0x36, 0x69, 0x5A, 0x44, 0x69, 0x68, 0x33, 0x5A, 0x29, 0x66,
0x49, 0x72, 0x31, 0x77, 0x28, 0x44, 0x49, 0x7C, 0x4D, 0x5A,
0x33, 0x7C, 0x64, 0x49, 0x25, 0x57, 0x5A, 0x2A, 0x63, 0x7B,
0x7D, 0x3F, 0x50, 0x36, 0x76, 0x6F, 0x73, 0x2B, 0x2B, 0x21,
0x36, 0x2C, 0x7B, 0x62, 0x4B, 0x3D, 0x59, 0x70, 0x26, 0x3F,
0x33, 0x4D, 0x30, 0x50, 0x54, 0x57, 0x66, 0x4F, 0x3A, 0x46,
0x6A, 0x51, 0x6D, 0x60, 0x50, 0x49, 0x68, 0x66, 0x6A, 0x68,
0x4B, 0x79, 0x63, 0x21, 0x5F, 0x33, 0x76, 0x3F, 0x7A, 0x6B,
0x5B, 0x67, 0x26, 0x5C, 0x6A, 0x45, 0x37, 0x41, 0x64, 0x6B,
0x5E, 0x46, 0x58, 0x75, 0x44, 0x23, 0x57, 0x77, 0x65, 0x5A,
0x70, 0x79, 0x74, 0x7E, 0x53, 0x60, 0x6E, 0x2F, 0x26, 0x36,
0x64, 0x3A, 0x6B, 0x6E, 0x70, 0x6A, 0x62, 0x71, 0x4A, 0x6E,
0x41, 0x68, 0x40, 0x23, 0x79, 0x4C, 0x45, 0x37, 0x28, 0x2F,
0x44, 0x3A, 0x53, 0x79, 0x4F, 0x51, 0x40, 0x7E, 0x4C, 0x47,
0x47, 0x33, 0x75, 0x2B, 0x4B, 0x69, 0x46, 0x41, 0x7C, 0x5B,
0x4C, 0x60, 0x61, 0x6F, 0x47, 0x6B, 0x77, 0x4E, 0x3F, 0x55,
0x34, 0x6E, 0x33, 0x32, 0x7C, 0x35, 0x55, 0x5A, 0x4C, 0x50,
0x5B, 0x61, 0x2A, 0x59, 0x6C, 0x4F, 0x59, 0x2F, 0x3F, 0x2B,
0x4A, 0x3C, 0x4C, 0x7D, 0x65, 0x63, 0x75, 0x41, 0x56, 0x67,
0x21, 0x3D, 0x30, 0x3D, 0x76, 0x5B, 0x52, 0x4C, 0x27, 0x2B,
0x46, 0x3E, 0x23, 0x2E, 0x35, 0x51, 0x67, 0x59, 0x42, 0x24,
0x34, 0x48, 0x53, 0x65, 0x5B, 0x7D, 0x7D, 0x58, 0x76, 0x5F,
0x29, 0x2F, 0x2F, 0x76, 0x66, 0x34, 0x3D, 0x34, 0x2E, 0x75,
0x3D, 0x41, 0x47, 0x2F, 0x5A, 0x79, 0x6D, 0x5F, 0x79, 0x78,
0x70, 0x26, 0x54, 0x42, 0x78, 0x36, 0x54, 0x51, 0x30, 0x31,
0x29, 0x6F, 0x2D, 0x38, 0x36, 0x31, 0x3F, 0x32, 0x73, 0x21,
0x6D, 0x4C, 0x2C, 0x7A, 0x6B, 0x7D, 0x68, 0x53, 0x3D, 0x79,
0x75, 0x62, 0x36, 0x79, 0x22, 0x41, 0x2A, 0x4C, 0x60, 0x35,
0x2F, 0x40, 0x39, 0x27, 0x72, 0x7B, 0x7A, 0x6C, 0x3C, 0x5D,
0x26, 0x2E, 0x3F, 0x21, 0x2C, 0x58, 0x6D, 0x38, 0x47, 0x2E,
0x3F, 0x5B, 0x2D, 0x5F, 0x5D, 0x5F, 0x23, 0x21, 0x6C, 0x40,
0x29, 0x2B, 0x40, 0x38, 0x46, 0x32, 0x4B, 0x35, 0x75, 0x22,
0x61, 0x24, 0x4C, 0x63, 0x69, 0x32, 0x63, 0x55, 0x5F, 0x34,
0x2B, 0x27, 0x68, 0x52, 0x46, 0x4E, 0x3B, 0x72, 0x7B, 0x3E,
0x70, 0x51, 0x42, 0x2B, 0x2B, 0x63, 0x6C, 0x7D, 0x5A, 0x29,
0x69, 0x27, 0x31, 0x3C, 0x5C, 0x42, 0x75, 0x7E, 0x6E, 0x74,
0x38, 0x43, 0x5F, 0x61, 0x68, 0x41, 0x55, 0x5B, 0x78, 0x35,
0x6C, 0x6E, 0x3F, 0x3A, 0x75, 0x7B, 0x56, 0x4A, 0x41, 0x76,
0x56, 0x2E, 0x3F, 0x27, 0x52, 0x49, 0x4A, 0x74, 0x2A, 0x24,
0x38, 0x41, 0x75, 0x45, 0x71, 0x6B, 0x63, 0x66, 0x75, 0x43,
0x65, 0x4D, 0x78, 0x5D, 0x79, 0x59, 0x5E, 0x58, 0x6F, 0x4C,
0x42, 0x57, 0x51, 0x52, 0x5E, 0x63, 0x31, 0x6A, 0x69, 0x59,
0x2C, 0x6B, 0x46, 0x7E, 0x6F, 0x27, 0x30, 0x65, 0x63, 0x2C,
0x4D, 0x32, 0x2A, 0x3D, 0x44, 0x3F, 0x61, 0x67, 0x55, 0x2D,
0x2F, 0x7D, 0x42, 0x58, 0x51, 0x69, 0x70, 0x31, 0x5F, 0x6A,
0x3B, 0x7D, 0x2A, 0x59, 0x67, 0x22, 0x67, 0x3E, 0x4B, 0x60,
0x3A, 0x63, 0x7D, 0x44, 0x3A, 0x51, 0x59, 0x4E, 0x5A, 0x60,
0x68, 0x46, 0x72, 0x52, 0x79, 0x39, 0x31, 0x57, 0x60, 0x39,
0x71, 0x27, 0x60, 0x52, 0x64, 0x2B, 0x79, 0x6F, 0x23, 0x33,
0x54, 0x27, 0x2E, 0x2A, 0x70, 0x4C, 0x7A, 0x25, 0x79, 0x3D,
0x76, 0x56, 0x6C, 0x3E, 0x76, 0x74, 0x49, 0x3C, 0x49, 0x6B,
0x26, 0x7B, 0x28, 0x3F, 0x2C, 0x32, 0x2B, 0x69, 0x22, 0x64,
0x5F, 0x7E, 0x67, 0x29, 0x7A, 0x35, 0x66, 0x27, 0x4A, 0x66,
0x76, 0x3C, 0x2A, 0x2A, 0x34, 0x2B, 0x39, 0x53, 0x42, 0x4E,
0x41, 0x66, 0x47, 0x67, 0x2D, 0x3A, 0x31, 0x6D, 0x23, 0x3D,
0x62, 0x74, 0x7B, 0x2C, 0x28, 0x35, 0x6F, 0x27, 0x6E, 0x5F,
0x4C, 0x5A, 0x4C, 0x52, 0x2F, 0x5A, 0x74, 0x73, 0x5F, 0x49,
0x7E, 0x63, 0x33, 0x6E, 0x4A, 0x75, 0x6B, 0x70, 0x54, 0x5C,
0x6E, 0x49, 0x5B, 0x6D, 0x3E, 0x3B, 0x35, 0x4F, 0x2D, 0x36,
0x32, 0x66, 0x4B, 0x7C, 0x46, 0x3D, 0x6C, 0x7A, 0x49, 0x5D,
0x69, 0x23, 0x26, 0x57, 0x33, 0x28, 0x25, 0x6D, 0x48, 0x48,
0x42, 0x4A, 0x79, 0x26, 0x4D, 0x49, 0x4C, 0x4A, 0x67, 0x76,
0x39, 0x26, 0x6E, 0x6D, 0x79, 0x7B, 0x26, 0x78, 0x79, 0x5B,
0x39, 0x74, 0x51, 0x5B, 0x30, 0x6E, 0x4C, 0x57, 0x34, 0x51,
0x60, 0x74, 0x4D, 0x6F, 0x6A, 0x3F, 0x4F, 0x50, 0x55, 0x56,
0x72, 0x42, 0x71, 0x69, 0x2C, 0x61, 0x4B, 0x2E, 0x2A, 0x26,
0x35, 0x71, 0x63, 0x25, 0x4E, 0x69, 0x5A, 0x22, 0x39, 0x3F,
0x25, 0x47, 0x45, 0x33, 0x64, 0x5F, 0x50, 0x7A, 0x3F, 0x21,
0x2C, 0x33, 0x3C, 0x6C, 0x57, 0x5B, 0x48, 0x65, 0x2B, 0x68,
0x53, 0x36, 0x27, 0x7D, 0x65, 0x5F, 0x2B, 0x23, 0x3C, 0x7A,
0x2B, 0x43, 0x74, 0x34, 0x7A, 0x6C, 0x4D, 0x6F, 0x60, 0x71,
0x43, 0x2C, 0x72, 0x53, 0x2A, 0x36, 0x7E, 0x58, 0x39, 0x4E,
0x2B, 0x30, 0x7C, 0x7C, 0x79, 0x33, 0x79, 0x7C, 0x77, 0x61,
0x45, 0x4F, 0x4E, 0x44, 0x4B, 0x29, 0x35, 0x41, 0x4E, 0x31,
0x67, 0x60, 0x33, 0x4A, 0x2A, 0x34, 0x63, 0x45, 0x3C, 0x59,
0x74, 0x60, 0x4B, 0x62, 0x2D, 0x3E, 0x58, 0x51, 0x44, 0x32,
0x3A, 0x54, 0x62, 0x44, 0x66, 0x71, 0x59, 0x3F, 0x3A, 0x50,
0x4E, 0x5A, 0x3D, 0x65, 0x23, 0x5C, 0x55, 0x37, 0x4D, 0x5F,
0x43, 0x3B, 0x64, 0x48, 0x52, 0x2E, 0x21, 0x2D, 0x60, 0x4F,
0x68, 0x33, 0x57, 0x41, 0x6D, 0x27, 0x78, 0x67, 0x31, 0x48,
0x4C, 0x74, 0x44, 0x49, 0x6D, 0x70, 0x7A, 0x23, 0x5F, 0x23,
0x68, 0x46, 0x2F, 0x59, 0x7C, 0x32, 0x4F, 0x2D, 0x43, 0x41,
0x30, 0x22, 0x5A, 0x71, 0x5F, 0x54, 0x24, 0x24, 0x6A, 0x79,
0x66, 0x54, 0x55, 0x6B, 0x55, 0x6A, 0x74, 0x68, 0x22, 0x59,
0x43, 0x43, 0x3D, 0x55, 0x2E, 0x75, 0x65, 0x49, 0x38, 0x3C,
0x7D, 0x4F, 0x62, 0x28, 0x39, 0x3B, 0x6C, 0x6F, 0x3F, 0x49,
0x40, 0x5E, 0x4E, 0x6E, 0x3A, 0x3A, 0x48, 0x29, 0x56, 0x3A,
0x69, 0x3A, 0x3E, 0x3C, 0x4F, 0x46, 0x23, 0x47, 0x5A, 0x29,
0x36, 0x77, 0x49, 0x2B, 0x4E, 0x47, 0x28, 0x67, 0x59, 0x7C,
0x68, 0x7D, 0x41, 0x77, 0x37, 0x67, 0x2B, 0x68, 0x39, 0x5A,
0x6B, 0x51, 0x4D, 0x6E, 0x7D, 0x67, 0x4B, 0x26, 0x6B, 0x3F,
0x31, 0x2A, 0x68, 0x24, 0x56, 0x3D, 0x59, 0x64, 0x39, 0x5E,
0x4B, 0x23, 0x67, 0x67, 0x6D, 0x2F, 0x3E, 0x46, 0x6A, 0x2A,
0x63, 0x3F, 0x37, 0x4B, 0x75, 0x5F, 0x60, 0x56, 0x4F, 0x21,
0x39, 0x57, 0x72, 0x3E, 0x46, 0x77, 0x53, 0x6E, 0x38, 0x48,
0x35, 0x5A, 0x6C, 0x21, 0x3A, 0x46, 0x2C, 0x7D, 0x3C, 0x2C,
0x70, 0x73, 0x5D, 0x49, 0x28, 0x42, 0x4B, 0x40, 0x75, 0x7E,
0x42, 0x6A, 0x67, 0x4B, 0x40, 0x25, 0x6A, 0x43, 0x59, 0x6A,
0x73, 0x21, 0x6A, 0x45, 0x5B, 0x67, 0x21, 0x4B, 0x25, 0x77,
0x5F, 0x46, 0x4B, 0x32, 0x51, 0x72, 0x41, 0x2E, 0x7A, 0x30,
0x69, 0x54, 0x7B, 0x5B, 0x26, 0x41, 0x21, 0x45, 0x27, 0x5C,
0x39, 0x74, 0x52, 0x60, 0x3E, 0x60, 0x33, 0x59, 0x2F, 0x3B,
0x58, 0x64, 0x2A, 0x78, 0x70, 0x56, 0x22, 0x5B, 0x25, 0x6F,
0x2D, 0x22, 0x62, 0x44, 0x47, 0x54, 0x45, 0x52, 0x71, 0x68,
0x29, 0x7B, 0x26, 0x44, 0x70, 0x44, 0x49, 0x63, 0x4A, 0x54,
0x29, 0x65, 0x2B, 0x32, 0x34, 0x66, 0x4A, 0x72, 0x79, 0x7B,
0x4C, 0x7C, 0x5B, 0x36, 0x2B, 0x6B, 0x33, 0x4C, 0x33, 0x7D,
0x50, 0x36, 0x79, 0x48, 0x7C, 0x56, 0x4A, 0x48, 0x45, 0x5C,
0x47, 0x4F, 0x5E, 0x72, 0x36, 0x7D, 0x27, 0x57, 0x5E, 0x72,
0x76, 0x36, 0x6C, 0x26, 0x3E, 0x35, 0x2D, 0x26, 0x40, 0x5E,
0x44, 0x32, 0x74, 0x56, 0x4B, 0x6A, 0x4E, 0x58, 0x4B, 0x44,
0x34, 0x59, 0x6C, 0x50, 0x71, 0x41, 0x3C, 0x69, 0x3A, 0x28,
0x69, 0x39, 0x25, 0x4A, 0x42, 0x39, 0x63, 0x67, 0x2C, 0x7E,
0x4E, 0x59, 0x6D, 0x74, 0x31, 0x76, 0x22, 0x62, 0x78, 0x48,
0x55, 0x77, 0x6C, 0x24, 0x2D, 0x55, 0x59, 0x66, 0x68, 0x21,
0x41, 0x31, 0x77, 0x21, 0x5F, 0x5D, 0x44, 0x48, 0x45, 0x3A,
0x64, 0x4A, 0x56, 0x33, 0x47, 0x40, 0x5E, 0x4B, 0x63, 0x29,
0x51, 0x71, 0x57, 0x67, 0x2B, 0x79, 0x57, 0x30, 0x26, 0x26,
0x55, 0x4E, 0x44, 0x69, 0x78, 0x2C, 0x5E, 0x6D, 0x2E, 0x5A,
0x68, 0x59, 0x32, 0x5F, 0x2F, 0x29, 0x4D, 0x74, 0x5C, 0x27,
0x67, 0x2A, 0x52, 0x3F, 0x60, 0x70, 0x37, 0x5A, 0x3C, 0x5C,
0x4B, 0x7A, 0x46, 0x58, 0x50, 0x6D, 0x55, 0x7E, 0x78, 0x5A,
0x29, 0x6F, 0x48, 0x76, 0x2B, 0x67, 0x4D, 0x4E, 0x34, 0x64,
0x37, 0x2F, 0x41, 0x69, 0x4E, 0x4A, 0x30, 0x70, 0x2C, 0x61,
0x24, 0x61, 0x4F, 0x3D, 0x79, 0x22, 0x3A, 0x32, 0x56, 0x55,
0x68, 0x36, 0x75, 0x5E, 0x33, 0x42, 0x72, 0x26, 0x56, 0x2E,
0x3A, 0x49, 0x33, 0x62, 0x63, 0x3C, 0x55, 0x27, 0x63, 0x69,
0x41, 0x5B, 0x3C, 0x76, 0x7D, 0x31, 0x7B, 0x30, 0x78, 0x4E,
0x52, 0x50, 0x3A, 0x72, 0x7D, 0x5A, 0x53, 0x79, 0x67, 0x55,
0x2E, 0x72, 0x71, 0x45, 0x29, 0x6F, 0x63, 0x68, 0x6E, 0x57,
0x3E, 0x2E, 0x5C, 0x6D, 0x3E, 0x71, 0x62, 0x6F, 0x27, 0x54,
0x2D, 0x44, 0x43, 0x3C, 0x2E, 0x7D, 0x57, 0x68, 0x42, 0x3D,
0x50, 0x54, 0x59, 0x38, 0x22, 0x3D, 0x5B, 0x7D, 0x28, 0x41,
0x2E, 0x58, 0x36, 0x58, 0x5A, 0x56, 0x63, 0x24, 0x43, 0x49,
0x64, 0x59, 0x4C, 0x52, 0x3B, 0x57, 0x57, 0x21, 0x60, 0x6E,
0x52, 0x3A, 0x67, 0x47, 0x70, 0x3B, 0x49, 0x64, 0x58, 0x44,
0x24, 0x7E, 0x5B, 0x59, 0x5D, 0x4E, 0x4C, 0x79, 0x43, 0x43,
0x35, 0x3C, 0x59, 0x6B, 0x72, 0x61, 0x53, 0x37, 0x2C, 0x30,
0x49, 0x45, 0x31, 0x24, 0x62, 0x7A, 0x37, 0x2B, 0x4B, 0x3C,
0x40, 0x59, 0x6B, 0x3D, 0x63, 0x2D, 0x47, 0x4E, 0x3E, 0x45,
0x24, 0x4A, 0x49, 0x52, 0x2E, 0x6D, 0x2F, 0x63, 0x70, 0x65,
0x4E, 0x4C, 0x29, 0x5C, 0x23, 0x27, 0x6C, 0x4A, 0x57, 0x36,
0x59, 0x52, 0x2E, 0x50, 0x3F, 0x67, 0x79, 0x72, 0x61, 0x62,
0x41, 0x2C, 0x2C, 0x74, 0x52, 0x3F, 0x4D, 0x67, 0x49, 0x63,
0x43, 0x2E, 0x3C, 0x3B, 0x3E, 0x45, 0x7A, 0x5D, 0x5F, 0x4F,
0x64, 0x65, 0x6F, 0x74, 0x5D, 0x43, 0x23, 0x7B, 0x59, 0x61,
0x25, 0x71, 0x32, 0x75, 0x4A, 0x37, 0x63, 0x61, 0x2A, 0x4B,
0x6C, 0x64, 0x63, 0x43, 0x4D, 0x79, 0x39, 0x25, 0x43, 0x31,
0x4E, 0x31, 0x50, 0x72, 0x67, 0x52, 0x60, 0x64, 0x2E, 0x74,
0x6E, 0x2A, 0x36, 0x3B, 0x6B, 0x53, 0x79, 0x5C, 0x62, 0x3F,
0x54, 0x7C, 0x32, 0x7B, 0x74, 0x57, 0x64, 0x68, 0x43, 0x28,
0x2B, 0x4B, 0x4A, 0x48, 0x21, 0x38, 0x5F, 0x28, 0x22, 0x66,
0x5E, 0x29, 0x43, 0x5C, 0x6D, 0x75, 0x24, 0x4C, 0x38, 0x53,
0x44, 0x79, 0x5B, 0x4C, 0x46, 0x76, 0x4F, 0x56, 0x46, 0x26,
0x6A, 0x61, 0x42, 0x62, 0x4D, 0x33, 0x3F, 0x69, 0x44, 0x66,
0x66, 0x2F, 0x52, 0x3B, 0x65, 0x3A, 0x30, 0x46, 0x2F, 0x5C,
0x36, 0x47, 0x5A, 0x5C, 0x66, 0x6D, 0x5D, 0x32, 0x2E, 0x76,
0x21, 0x79, 0x37, 0x2F, 0x36, 0x21, 0x33, 0x47, 0x44, 0x4C,
0x59, 0x72, 0x74, 0x62, 0x43, 0x7E, 0x40, 0x43, 0x37, 0x23,
0x39, 0x6C, 0x64, 0x34, 0x58, 0x39, 0x6E, 0x51, 0x5B, 0x3B,
0x6E, 0x50, 0x69, 0x51, 0x71, 0x5E, 0x62, 0x31, 0x4A, 0x7B,
0x2C, 0x58, 0x50, 0x71, 0x5A, 0x3B, 0x4F, 0x5D, 0x40, 0x76,
0x5A, 0x41, 0x5F, 0x29, 0x42, 0x66, 0x36, 0x4C, 0x39, 0x3D,
0x4E, 0x4F, 0x33, 0x45, 0x64, 0x7B, 0x34, 0x21, 0x5E, 0x37,
0x63, 0x58, 0x39, 0x2D, 0x58, 0x24, 0x32, 0x38, 0x7E, 0x66,
0x58, 0x51, 0x71, 0x77, 0x34, 0x3C, 0x6A, 0x69, 0x25, 0x73,
0x5F, 0x34, 0x3B, 0x2F, 0x69, 0x37, 0x27, 0x46, 0x29, 0x40,
0x65, 0x7B, 0x74, 0x39, 0x35, 0x5A, 0x7D, 0x45, 0x5E, 0x57,
0x72, 0x50, 0x39, 0x54, 0x58, 0x2D, 0x60, 0x29, 0x41, 0x72,
0x23, 0x32, 0x73, 0x78, 0x21, 0x2B, 0x3D, 0x39, 0x32, 0x31,
0x47, 0x52, 0x51, 0x71, 0x4C, 0x26, 0x35, 0x4E, 0x7A, 0x29,
0x34, 0x62, 0x63, 0x74, 0x4C, 0x61, 0x5F, 0x76, 0x39, 0x62,
0x30, 0x70, 0x6E, 0x4A, 0x73, 0x58, 0x41, 0x4E, 0x71, 0x2F,
0x67, 0x55, 0x55, 0x2A, 0x6F, 0x6B, 0x3D, 0x70, 0x33, 0x6B,
0x2A, 0x24, 0x3C, 0x31, 0x34, 0x3F, 0x77, 0x59, 0x37, 0x37,
0x6C, 0x4C, 0x5F, 0x7D, 0x59, 0x41, 0x48, 0x2B, 0x2B, 0x46,
0x35, 0x4D, 0x3F, 0x57, 0x29, 0x78, 0x3C, 0x69, 0x5C, 0x40,
0x40, 0x4A, 0x32, 0x58, 0x32, 0x21, 0x4E, 0x2D, 0x32, 0x6C,
0x2F, 0x4B, 0x3B, 0x67, 0x29, 0x56, 0x5F, 0x4E, 0x51, 0x66,
0x2C, 0x37, 0x4A, 0x7A, 0x53, 0x3A, 0x75, 0x2F, 0x22, 0x4C,
0x64, 0x3C, 0x3D, 0x5C, 0x44, 0x5D, 0x4D, 0x63, 0x4B, 0x32,
0x3F, 0x40, 0x74, 0x6A, 0x2B, 0x5F, 0x35, 0x23, 0x3A, 0x5E,
0x7D, 0x5B, 0x33, 0x25, 0x5D, 0x5F, 0x6F, 0x25, 0x68, 0x21,
0x5F, 0x2A, 0x22, 0x42, 0x40, 0x55, 0x66, 0x76, 0x77, 0x60,
0x24, 0x6C, 0x6C, 0x77, 0x5D, 0x3D, 0x4A, 0x27, 0x41, 0x47,
0x5E, 0x72, 0x62, 0x6D, 0x3B, 0x61, 0x65, 0x7E, 0x67, 0x39,
0x34, 0x56, 0x5D, 0x30, 0x62, 0x6E, 0x28, 0x5D, 0x5F, 0x3E,
0x68, 0x78, 0x64, 0x64, 0x56, 0x6B, 0x77, 0x3C, 0x70, 0x58,
0x46, 0x33, 0x32, 0x65, 0x42, 0x6F, 0x7A, 0x30, 0x40, 0x2B,
0x42, 0x55, 0x46, 0x48, 0x6B, 0x4B, 0x51, 0x3C, 0x72, 0x78,
0x21, 0x5F, 0x21, 0x6A, 0x6E, 0x47, 0x51, 0x73, 0x26, 0x3B,
0x4A, 0x64, 0x21, 0x2D, 0x42, 0x50, 0x46, 0x4C, 0x5F, 0x37,
0x39, 0x4C, 0x2B, 0x4A, 0x79, 0x5B, 0x2F, 0x21, 0x61, 0x78,
0x4E, 0x56, 0x32, 0x55, 0x2D, 0x61, 0x47, 0x5C, 0x6A, 0x4A,
0x6B, 0x2D, 0x6A, 0x32, 0x4C, 0x3C, 0x76, 0x2E, 0x6F, 0x3A,
0x43, 0x6D, 0x27, 0x7D, 0x76, 0x44, 0x26, 0x56, 0x24, 0x77,
0x3D, 0x5D, 0x22, 0x5D, 0x40, 0x51, 0x41, 0x39, 0x70, 0x7C,
0x4D, 0x41, 0x36, 0x28, 0x33, 0x4B, 0x5A, 0x5E, 0x25, 0x75,
0x2C, 0x72, 0x5D, 0x6E, 0x3E, 0x44, 0x76, 0x42, 0x56, 0x3D,
0x7E, 0x2F, 0x30, 0x38, 0x23, 0x54, 0x6B, 0x5B, 0x5E, 0x66,
0x42, 0x6E, 0x45, 0x24, 0x31, 0x6E, 0x6C, 0x48, 0x43, 0x28,
0x24, 0x47, 0x6D, 0x26, 0x21, 0x6C, 0x47, 0x4D, 0x5E, 0x44,
0x44, 0x58, 0x25, 0x4D, 0x39, 0x75, 0x67, 0x3C, 0x2D, 0x56,
0x35, 0x35, 0x32, 0x3C, 0x35, 0x73, 0x6C, 0x34, 0x7B, 0x73,
0x5A, 0x40, 0x61, 0x5A, 0x59, 0x6E, 0x5C, 0x36, 0x72, 0x24,
0x70, 0x29, 0x33, 0x72, 0x6D, 0x75, 0x4F, 0x47, 0x68, 0x30,
0x4C, 0x5C, 0x6D, 0x22, 0x59, 0x21, 0x61, 0x5A, 0x56, 0x7D,
0x63, 0x42, 0x59, 0x35, 0x56, 0x22, 0x3F, 0x6C, 0x42, 0x2A,
0x72, 0x25, 0x32, 0x71, 0x58, 0x6C, 0x58, 0x33, 0x35, 0x4B,
0x7D, 0x38, 0x5C, 0x3B, 0x36, 0x34, 0x52, 0x3E, 0x67, 0x24,
0x4E, 0x4D, 0x5F, 0x6F, 0x65, 0x77, 0x71, 0x49, 0x5D, 0x57,
0x6C, 0x43, 0x42, 0x7B, 0x77, 0x6C, 0x66, 0x68, 0x49, 0x46,
0x63, 0x44, 0x50, 0x33, 0x79, 0x64, 0x24, 0x67, 0x44, 0x6F,
0x28, 0x49, 0x2A, 0x34, 0x5D, 0x7B, 0x6A, 0x72, 0x4A, 0x31,
0x52, 0x3E, 0x39, 0x39, 0x33, 0x3B, 0x23, 0x51, 0x5F, 0x52,
0x74, 0x28, 0x7E, 0x60, 0x4C, 0x47, 0x7C, 0x28, 0x28, 0x2A,
0x57, 0x7D, 0x22, 0x2E, 0x50, 0x7D, 0x59, 0x33, 0x7C, 0x35,
0x2A, 0x76, 0x2D, 0x3B, 0x50, 0x22, 0x76, 0x66, 0x7A, 0x2D,
0x66, 0x5E, 0x65, 0x7C, 0x58, 0x34, 0x67, 0x7D, 0x6A, 0x2E,
0x43, 0x67, 0x79, 0x3A, 0x50, 0x74, 0x65, 0x35, 0x38, 0x57,
0x71, 0x31, 0x38, 0x75, 0x7B, 0x36, 0x6A, 0x42, 0x4A, 0x45,
0x40, 0x6B, 0x5B, 0x74, 0x63, 0x36, 0x49, 0x7B, 0x2E, 0x47,
0x41, 0x57, 0x6A, 0x6D, 0x43, 0x34, 0x2A, 0x3A, 0x6C, 0x2A,
0x3F, 0x36, 0x7E, 0x75, 0x2E, 0x69, 0x69, 0x32, 0x67, 0x27,
0x33, 0x67, 0x49, 0x59, 0x2B, 0x4B, 0x45, 0x45, 0x5A, 0x77,
0x4E, 0x33, 0x48, 0x53, 0x3F, 0x63, 0x27, 0x47, 0x3E, 0x77,
0x24, 0x3E, 0x55, 0x42, 0x78, 0x23, 0x68, 0x36, 0x2B, 0x2F,
0x7B, 0x22, 0x78, 0x79, 0x2D, 0x6A, 0x37, 0x72, 0x37, 0x2B,
0x45, 0x3E, 0x37, 0x4D, 0x5C, 0x4B, 0x61, 0x2C, 0x5B, 0x70,
0x77, 0x5C, 0x52, 0x2D, 0x3F, 0x2D, 0x46, 0x54, 0x39, 0x4D,
0x33, 0x78, 0x47, 0x6D, 0x27, 0x25, 0x7E, 0x7D, 0x76, 0x60,
0x63, 0x54, 0x65, 0x7B, 0x70, 0x47, 0x7C, 0x51, 0x52, 0x3F,
0x72, 0x21, 0x51, 0x3E, 0x2F, 0x70, 0x70, 0x39, 0x5E, 0x7B,
0x79, 0x32, 0x6F, 0x7B, 0x76, 0x72, 0x2B, 0x65, 0x65, 0x37,
0x64, 0x34, 0x3D, 0x52, 0x5E, 0x23, 0x41, 0x6F, 0x72, 0x74,
0x3C, 0x7C, 0x5F, 0x4C, 0x6A, 0x76, 0x39, 0x5C, 0x75, 0x42,
0x3E, 0x35, 0x59, 0x2C, 0x39, 0x67, 0x78, 0x49, 0x79, 0x47,
0x37, 0x3E, 0x79, 0x61, 0x4D, 0x2E, 0x28, 0x49, 0x4E, 0x34,
0x71, 0x5A, 0x5B, 0x33, 0x45, 0x2C, 0x2B, 0x69, 0x33, 0x32,
0x2A, 0x5C, 0x5A, 0x6B, 0x65, 0x54, 0x26, 0x7D, 0x63, 0x31,
0x4E, 0x33, 0x7E, 0x6B, 0x67, 0x5F, 0x3D, 0x67, 0x4D, 0x54,
0x5E, 0x45, 0x45, 0x75, 0x49, 0x28, 0x4B, 0x33, 0x34, 0x7A,
0x72, 0x5D, 0x72, 0x49, 0x48, 0x27, 0x75, 0x7C, 0x6B, 0x4A,
0x60, 0x25, 0x62, 0x6D, 0x29, 0x30, 0x6C, 0x3F, 0x34, 0x49,
0x2B, 0x45, 0x2D, 0x58, 0x54, 0x35, 0x42, 0x41, 0x7D, 0x31,
0x4A, 0x64, 0x67, 0x6D, 0x5A, 0x34, 0x46, 0x4E, 0x39, 0x3C,
0x59, 0x34, 0x35, 0x2C, 0x3E, 0x7E, 0x5F, 0x32, 0x37, 0x68,
0x7B, 0x29, 0x57, 0x44, 0x5E, 0x24, 0x72, 0x50, 0x44, 0x37,
0x68, 0x7E, 0x45, 0x56, 0x60, 0x4A, 0x38, 0x77, 0x53, 0x43,
0x50, 0x4A, 0x37, 0x62, 0x22, 0x45, 0x4B, 0x7C, 0x57, 0x7A,
0x63, 0x44, 0x4D, 0x55, 0x2F, 0x50, 0x44, 0x65, 0x6C, 0x78,
0x49, 0x34, 0x61, 0x5E, 0x65, 0x34, 0x25, 0x3C, 0x3A, 0x43,
0x63, 0x40, 0x7D, 0x24, 0x7C, 0x75, 0x59, 0x7A, 0x33, 0x4C,
0x59, 0x47, 0x65, 0x78, 0x3B, 0x30, 0x25, 0x66, 0x33, 0x32,
0x58, 0x48, 0x7C, 0x7C, 0x3E, 0x32, 0x70, 0x26, 0x66, 0x35,
0x24, 0x30, 0x2C, 0x44, 0x76, 0x72, 0x28, 0x54, 0x64, 0x3D,
0x5E, 0x42, 0x51, 0x36, 0x76, 0x40, 0x54, 0x50, 0x51, 0x3F,
0x46, 0x55, 0x44, 0x53, 0x5F, 0x4D, 0x4B, 0x26, 0x78, 0x78,
0x5A, 0x2B, 0x34, 0x31, 0x63, 0x3B, 0x41, 0x56, 0x62, 0x32,
0x58, 0x54, 0x26, 0x32, 0x28, 0x57, 0x49, 0x31, 0x5B, 0x46,
0x71, 0x40, 0x42, 0x55, 0x7C, 0x33, 0x40, 0x2D, 0x3D, 0x3D,
0x49, 0x73, 0x2B, 0x5D, 0x32, 0x2A, 0x5C, 0x2A, 0x5E, 0x71,
0x62, 0x53, 0x26, 0x26, 0x53, 0x2B, 0x56, 0x74, 0x6A, 0x5E,
0x4B, 0x68, 0x62, 0x2A, 0x67, 0x5C, 0x3B, 0x31, 0x2F, 0x5E,
0x4E, 0x6D, 0x57, 0x6E, 0x6E, 0x73, 0x53, 0x56, 0x35, 0x3A,
0x22, 0x5A, 0x6F, 0x44, 0x39, 0x2D, 0x23, 0x21, 0x42, 0x74,
0x4B, 0x3C, 0x74, 0x61, 0x38, 0x24, 0x70
]
# -------------------------------------------------------------------------------------------------
if __name__ == "__main__":
print '[+] Simple machine side channel attack started.'
for di in xrange(0xdead*0xbeef % 33):
disk_data = disk_data[0x200:0x200*33] + disk_data[:0x200]
key = ''
for cx in xrange(2, 0x23):
idx = ((cx - 2)*0xD + 1) & 0x1FF
key += chr(disk_data[0x200*(cx-2) + idx])
print '[+] Final key:', key
print '[+] Program finished successfully. Bye bye :)'
# -------------------------------------------------------------------------------------------------
'''
ispo@leet:~/ctf/codegate_2020/malicious$ ./malicious_mbr_crack.py
[+] Simple machine side channel attack started.
[+] Final key: 8_bits_per_byte_1_byte_per_sector
[+] Program finished successfully. Bye bye :)
'''
# ------------------------------------------------------------------------------------------------- | 0.034464 | 0.650009 |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Density Estimation via Voronoi Diagrams in High Dimensions
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <NAME> and <NAME>
# MAGIC
# MAGIC [Video of project presentation](https://drive.google.com/file/d/14E_igECN6hDZieWNn9VVTepCo5mu-rzy/view?usp=sharing)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Introduction
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Density estimation** is a wide sub-area of statistics, tasked with understanding an underlying probability distribution of a given set of points, sampled from an unknown distribution. It can be used as a way of data investigation, like determining the location of low- and high-density regions in data, clusters and outliers, as well as for visualization purposes.
# MAGIC
# MAGIC A histogram can be considered as a simple density estimator. Other well-known methods include:
# MAGIC - a k-nearest-neighbor density estimator, which describes the density *p()* at a point *x* as $$p(x) \cong \frac{1}{d_k(x)}$$
# MAGIC where d_k(x) is the distance to the *k*th nearest neighbor of *x*;
# MAGIC - a kernel density estimator, which requires a selection of a kernel probability distribution *K* and a bandwidth *h* and essentially places the distributions at the data points, giving the density estimation
# MAGIC $$p(x) \cong \sum_i K(\frac{x - x_i}{h})$$
# MAGIC
# MAGIC All of the mentioned methods are sensitive to parameter selection, such as choosing the right number of neighbors or a fitting bandwidth.
# COMMAND ----------
# MAGIC %md
# MAGIC **Voronoi diagrams** are widely used in many areas, including computer science, and provide a natural cell decomposition of space based on the nearest-neighbor rule. For a given data point *x*, its corresponding cell contains all the points of the metric space, for which *x* is the closest point among all in the dataset.
# MAGIC
# MAGIC An example of a 2D Voronoi diagram built over a set of points sampled from a normal distribution can be seen below in the methodology part.
# MAGIC
# MAGIC One of the biggest drawbacks of Voronoi diagrams is their geometric complexity, which grows exponentially with dimensionality and essentially prevents their exact computation in dimensions above 6 for a reasonable number of points. In the worst case, the number of geometric elements of the diagram (such as Voronoi vertices, edges and polyhedra of different dimensions that arise on the cell boundaries) grows as
# MAGIC
# MAGIC $$O(n^{\lceil{d/2}\rceil})$$
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Our method.**
# MAGIC In this work, we use some intuition about the Voronoi diagrams to develop a new method of density estimation. In addition, we apply a methodology from our previous work which allows one to work with Voronoi diagrams in high dimensions without their explicit construction.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Methodology
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Intuition:** if we construct a Voronoi diagram over a set of points sampled from an unknown distribution then Voronoi cells in regions with higher density will be of a smaller *size*.
# MAGIC
# MAGIC Consider the image below, which depicts a Voronoi diagram in a two-dimensional space built over points sampled from a Gaussian distribution. Voronoi cells in the center of the distribution appear naturally smaller in comparison with other cells, and the cell size increases when we move away from the center.
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/voronoi_gaussian.png"/>
# MAGIC
# MAGIC This intuition follows, in a way, a one-nearest-neighbor density estimator: the distance *d* to the nearest neighbor is inversly proportional to the estimated density of the point, and at the same time, a ball of radius *d/2* centered at the query point always fits into (and touches the boundary of) the Voronoi cell.
# MAGIC
# MAGIC On the discussed image, one of the cells is marked with a blue color. Assume that the point inside that cell is our query point, at which we want to understand the density, and all other points are the training (unlabeled) data that provides information about the density. Then, let us try to find a reasonable approximation of the density in a form of
# MAGIC
# MAGIC $$p(x) = \frac{c}{size(Cell(x))}$$
# MAGIC
# MAGIC where *c* is some constant, *Cell* denotes the Voronoi cell of *x*, and *size* is some measure of a cell.
# MAGIC
# MAGIC Note: at any moment, the Voronoi diagram consists of only one query point and all dataset points.
# COMMAND ----------
# MAGIC %md
# MAGIC **Volume function**
# MAGIC
# MAGIC Let us assume for a while that cell's geometry is known to us. What would be a natural way to describe the size of the cell?
# MAGIC
# MAGIC Perhaps, one of the first ideas that comes to mind is to use the cell's *volume* as a size measure. Here we run into an issue of infinite cells, whose volume would also be infinite. Potentially, this could be resolved by computing a weighted volume with an integrable weight function that rapidly decays at infinity.
# MAGIC
# MAGIC However, instead, we propose a way to describe the size via *volume functions*, inspired by how alpha-complexes are motivated and constructed in the area of topological data analysis, where we consider a set of balls of an increasing radius with intersection with voronoi cells:
# MAGIC
# MAGIC <img width=250pt src="files/group17/images/alpha_1.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_2.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_3.png"/>
# MAGIC
# MAGIC We define the volume function as follows:
# MAGIC
# MAGIC $$\overline{Vol}_d(x)(r) = \frac{Vol_d(Cell(x) \cap B_r(x))}{Vol_d(B_r)}$$
# MAGIC
# MAGIC Here, *r* is a positive radius, *Vol()* denotes the standard d-dimensional volume, and *B_r(x)* is a d-dimensional ball of radius *r* centered at *x*. The volume function of *x* returns a function that takes a radius *r* and returns a ratio of the volume of the intersection of the ball with the cell to the whole volume of the ball. Clearly, at the limit to zero, the ratio is equal to 1 (when the ball fully fits inside the cell), but starts to decrease as soon as parts of the ball start to leave the boundary.
# MAGIC
# MAGIC Below are two images. On the left, a simple rectangular Voronoi cell with a point, generating it. On the right, a depiction of the volume function for this cell.
# MAGIC
# MAGIC <img width=300pt src="files/group17/images/rect.png"/>
# MAGIC <img width=300pt src="files/group17/images/rect_vol.png"/>
# MAGIC
# MAGIC If we go into higher dimensions, we will not be able to see the steps that the function makes anymore. Below is an example, which we approximated (with a method described below) on MNIST data (784-dimensional) some time ago of volume functions for different data points:
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/mnist_vol.png"/>
# MAGIC
# MAGIC On the picture above, we can guess that, for example, the point with the light-blue volume curve is located in a lower-density region than other given points, based on the fact that its volume function is greater than other functions at every radius.
# MAGIC
# MAGIC A couple of things to consider here.
# MAGIC 1. If a cell is infinite, then its volume function will not tend to 0 at infinity. Instead, it will tend to the angular size of this infinity.
# MAGIC 2. If one cell can be placed inside another cell, identifying their generator points and rotating arbitrarily, the first volume function will be below the second volume function.
# MAGIC
# MAGIC The second bullet point provides an idea that maybe we want to integrate this volume functions and compare them: a function with a larger integral would denote a lower-density region. At the same time, the first bullet point tells us that the functions are not always integrable. Thus, in this project we do the following modifications: we do not consider the directions of the balls which end up in infinity. To be more precise, we replace *B_r* with its *sector* where the voronoi cell is finite, in the formula for the volume function. This helps to mitigate the integrability issues.
# MAGIC
# MAGIC Before we go into details about the computational aspects, we need to mention another modification to the formula. Instead of computing the d-dimensional volumes of balls, we decided to compute the (d-1)-dimensional volumes of spheres (or, the surface area of the balls). This modification makes the computation much easier. For example, the approximations of the volume functions become piecewise-constant.
# MAGIC
# MAGIC Therefore, the formula for the *size(x)* becomes:
# MAGIC
# MAGIC $$size(x) = \int_0^{inf}{\overline{Vol}_{d-1}(x)(r) dr} = \int_0^{inf}{ \frac{Vol_{d-1}(Cell(x) \cap \hat{S}_r(x))}{Vol_{d-1}( \hat{S}_r )} dr}$$
# MAGIC
# MAGIC where *S_r(x)* denotes a hypersphere of radius *r*, and a "^" denotes that we only consider sections of a sphere where the cell is finite.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Integral computation.**
# MAGIC
# MAGIC
# MAGIC We perform a Monte-Carlo sampling integration method to approximate the volume function, a motivation for which is described in detail in one of our earlier papers about Voronoi Boundary Classification (http://proceedings.mlr.press/v97/polianskii19a.html).
# MAGIC
# MAGIC In short details, we sample random rays in uniform directions (equivalently, we sample points uniformly on the unit hypersphere), starting from the query point. For each ray, we record where it hits the boundary of the Voronoi cell. The length is computed by the following equation:
# MAGIC
# MAGIC $$l(x, m) = \min_{i=1..N, \langle m, x - x_i \rangle > 0} \frac{\lVert x - x_i \rVert^2}{2\langle m, x - x_i \rangle }$$
# MAGIC
# MAGIC Here, *x* is the origin of the ray (the generator/query point), *m* is the directional unit vector, *x_i* are other data points. The "infinite" directions are excluded. The condition in the minimum signifies, that we are only interested in the positive length, i.e. we can't find an intersection behind the ray.
# MAGIC
# MAGIC After casting *T* rays from a point, we can approximate the volume function as:
# MAGIC
# MAGIC $$\overline{Vol}_{d-1}(x)(r) = \frac{1}{T}\sum_{t=1}^{T} \mathbb{1}\left[l(x, m_t) \ge r \right]$$
# MAGIC
# MAGIC The integral of the function can be easily computed as a sum of all lengths:
# MAGIC
# MAGIC $$size(x) = \frac{1}{T}\sum_{t=1}^{T} l(x, m_t)$$
# MAGIC
# MAGIC And, our (unnormalized) density:
# MAGIC
# MAGIC $$\tilde{p}(x) = \frac{T}{\sum_{t=1}^{T} l(x, m_t)}$$
# MAGIC
# MAGIC Overall, the method's compexity with some optimizations is:
# MAGIC
# MAGIC $$O(NMT + NMD + NTD + MTD)$$
# MAGIC
# MAGIC where *N* is the number of train points, *M* is the number of query points, *T* is the number of rays from each point and *D* is data dimensionality.
# COMMAND ----------
# MAGIC %md
# MAGIC **Ranking loss.**
# MAGIC
# MAGIC At the moment, we do not have any proofs that this indeed generates an unnormalized approximation for the density.
# MAGIC
# MAGIC However, we are fairly certain (though also without a proof) that the approximation, when the dataset size tends to infinity, approximates the correct "ranking" of the estimates. Namely,
# MAGIC
# MAGIC $$p(x_1) < p(x_2) \Leftrightarrow \tilde{p}(x_1) < \tilde{p}(x_2)$$
# MAGIC
# MAGIC with probability 1 when data size is large enough. Here *p* is the real density used for point sampling, and *\tilde{p}* is the approximation.
# MAGIC
# MAGIC This quality is meaningful in tasks when we need to sort points according to their density. For example, if we want to exclude noise (say, 5% of the all points with the lowest density), or use for density filtration in topological data analysis.
# MAGIC
# MAGIC A measure that we use to estimate how well we approximate the correct density ranking works as following:
# MAGIC 1. Sort available query points according to their true density.
# MAGIC 2. Sort available query points according to the approximated density.
# MAGIC 3. Find the number of inverses (swaps of two consecutive elements) required to obtain the first sequence of points from the second one.
# MAGIC
# MAGIC The can easily be counted with a merge-sort algorithm in n log n time, but for simplicity and testing purposes (also because we use python for that) we do it in a simple quadratic time. | dbcArchives/2021/000_0-sds-3-x-projects/student-project-17_group-TowardsScalableTDA/00_introduction.py |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Density Estimation via Voronoi Diagrams in High Dimensions
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <NAME> and <NAME>
# MAGIC
# MAGIC [Video of project presentation](https://drive.google.com/file/d/14E_igECN6hDZieWNn9VVTepCo5mu-rzy/view?usp=sharing)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Introduction
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Density estimation** is a wide sub-area of statistics, tasked with understanding an underlying probability distribution of a given set of points, sampled from an unknown distribution. It can be used as a way of data investigation, like determining the location of low- and high-density regions in data, clusters and outliers, as well as for visualization purposes.
# MAGIC
# MAGIC A histogram can be considered as a simple density estimator. Other well-known methods include:
# MAGIC - a k-nearest-neighbor density estimator, which describes the density *p()* at a point *x* as $$p(x) \cong \frac{1}{d_k(x)}$$
# MAGIC where d_k(x) is the distance to the *k*th nearest neighbor of *x*;
# MAGIC - a kernel density estimator, which requires a selection of a kernel probability distribution *K* and a bandwidth *h* and essentially places the distributions at the data points, giving the density estimation
# MAGIC $$p(x) \cong \sum_i K(\frac{x - x_i}{h})$$
# MAGIC
# MAGIC All of the mentioned methods are sensitive to parameter selection, such as choosing the right number of neighbors or a fitting bandwidth.
# COMMAND ----------
# MAGIC %md
# MAGIC **Voronoi diagrams** are widely used in many areas, including computer science, and provide a natural cell decomposition of space based on the nearest-neighbor rule. For a given data point *x*, its corresponding cell contains all the points of the metric space, for which *x* is the closest point among all in the dataset.
# MAGIC
# MAGIC An example of a 2D Voronoi diagram built over a set of points sampled from a normal distribution can be seen below in the methodology part.
# MAGIC
# MAGIC One of the biggest drawbacks of Voronoi diagrams is their geometric complexity, which grows exponentially with dimensionality and essentially prevents their exact computation in dimensions above 6 for a reasonable number of points. In the worst case, the number of geometric elements of the diagram (such as Voronoi vertices, edges and polyhedra of different dimensions that arise on the cell boundaries) grows as
# MAGIC
# MAGIC $$O(n^{\lceil{d/2}\rceil})$$
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Our method.**
# MAGIC In this work, we use some intuition about the Voronoi diagrams to develop a new method of density estimation. In addition, we apply a methodology from our previous work which allows one to work with Voronoi diagrams in high dimensions without their explicit construction.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Methodology
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Intuition:** if we construct a Voronoi diagram over a set of points sampled from an unknown distribution then Voronoi cells in regions with higher density will be of a smaller *size*.
# MAGIC
# MAGIC Consider the image below, which depicts a Voronoi diagram in a two-dimensional space built over points sampled from a Gaussian distribution. Voronoi cells in the center of the distribution appear naturally smaller in comparison with other cells, and the cell size increases when we move away from the center.
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/voronoi_gaussian.png"/>
# MAGIC
# MAGIC This intuition follows, in a way, a one-nearest-neighbor density estimator: the distance *d* to the nearest neighbor is inversly proportional to the estimated density of the point, and at the same time, a ball of radius *d/2* centered at the query point always fits into (and touches the boundary of) the Voronoi cell.
# MAGIC
# MAGIC On the discussed image, one of the cells is marked with a blue color. Assume that the point inside that cell is our query point, at which we want to understand the density, and all other points are the training (unlabeled) data that provides information about the density. Then, let us try to find a reasonable approximation of the density in a form of
# MAGIC
# MAGIC $$p(x) = \frac{c}{size(Cell(x))}$$
# MAGIC
# MAGIC where *c* is some constant, *Cell* denotes the Voronoi cell of *x*, and *size* is some measure of a cell.
# MAGIC
# MAGIC Note: at any moment, the Voronoi diagram consists of only one query point and all dataset points.
# COMMAND ----------
# MAGIC %md
# MAGIC **Volume function**
# MAGIC
# MAGIC Let us assume for a while that cell's geometry is known to us. What would be a natural way to describe the size of the cell?
# MAGIC
# MAGIC Perhaps, one of the first ideas that comes to mind is to use the cell's *volume* as a size measure. Here we run into an issue of infinite cells, whose volume would also be infinite. Potentially, this could be resolved by computing a weighted volume with an integrable weight function that rapidly decays at infinity.
# MAGIC
# MAGIC However, instead, we propose a way to describe the size via *volume functions*, inspired by how alpha-complexes are motivated and constructed in the area of topological data analysis, where we consider a set of balls of an increasing radius with intersection with voronoi cells:
# MAGIC
# MAGIC <img width=250pt src="files/group17/images/alpha_1.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_2.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_3.png"/>
# MAGIC
# MAGIC We define the volume function as follows:
# MAGIC
# MAGIC $$\overline{Vol}_d(x)(r) = \frac{Vol_d(Cell(x) \cap B_r(x))}{Vol_d(B_r)}$$
# MAGIC
# MAGIC Here, *r* is a positive radius, *Vol()* denotes the standard d-dimensional volume, and *B_r(x)* is a d-dimensional ball of radius *r* centered at *x*. The volume function of *x* returns a function that takes a radius *r* and returns a ratio of the volume of the intersection of the ball with the cell to the whole volume of the ball. Clearly, at the limit to zero, the ratio is equal to 1 (when the ball fully fits inside the cell), but starts to decrease as soon as parts of the ball start to leave the boundary.
# MAGIC
# MAGIC Below are two images. On the left, a simple rectangular Voronoi cell with a point, generating it. On the right, a depiction of the volume function for this cell.
# MAGIC
# MAGIC <img width=300pt src="files/group17/images/rect.png"/>
# MAGIC <img width=300pt src="files/group17/images/rect_vol.png"/>
# MAGIC
# MAGIC If we go into higher dimensions, we will not be able to see the steps that the function makes anymore. Below is an example, which we approximated (with a method described below) on MNIST data (784-dimensional) some time ago of volume functions for different data points:
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/mnist_vol.png"/>
# MAGIC
# MAGIC On the picture above, we can guess that, for example, the point with the light-blue volume curve is located in a lower-density region than other given points, based on the fact that its volume function is greater than other functions at every radius.
# MAGIC
# MAGIC A couple of things to consider here.
# MAGIC 1. If a cell is infinite, then its volume function will not tend to 0 at infinity. Instead, it will tend to the angular size of this infinity.
# MAGIC 2. If one cell can be placed inside another cell, identifying their generator points and rotating arbitrarily, the first volume function will be below the second volume function.
# MAGIC
# MAGIC The second bullet point provides an idea that maybe we want to integrate this volume functions and compare them: a function with a larger integral would denote a lower-density region. At the same time, the first bullet point tells us that the functions are not always integrable. Thus, in this project we do the following modifications: we do not consider the directions of the balls which end up in infinity. To be more precise, we replace *B_r* with its *sector* where the voronoi cell is finite, in the formula for the volume function. This helps to mitigate the integrability issues.
# MAGIC
# MAGIC Before we go into details about the computational aspects, we need to mention another modification to the formula. Instead of computing the d-dimensional volumes of balls, we decided to compute the (d-1)-dimensional volumes of spheres (or, the surface area of the balls). This modification makes the computation much easier. For example, the approximations of the volume functions become piecewise-constant.
# MAGIC
# MAGIC Therefore, the formula for the *size(x)* becomes:
# MAGIC
# MAGIC $$size(x) = \int_0^{inf}{\overline{Vol}_{d-1}(x)(r) dr} = \int_0^{inf}{ \frac{Vol_{d-1}(Cell(x) \cap \hat{S}_r(x))}{Vol_{d-1}( \hat{S}_r )} dr}$$
# MAGIC
# MAGIC where *S_r(x)* denotes a hypersphere of radius *r*, and a "^" denotes that we only consider sections of a sphere where the cell is finite.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Integral computation.**
# MAGIC
# MAGIC
# MAGIC We perform a Monte-Carlo sampling integration method to approximate the volume function, a motivation for which is described in detail in one of our earlier papers about Voronoi Boundary Classification (http://proceedings.mlr.press/v97/polianskii19a.html).
# MAGIC
# MAGIC In short details, we sample random rays in uniform directions (equivalently, we sample points uniformly on the unit hypersphere), starting from the query point. For each ray, we record where it hits the boundary of the Voronoi cell. The length is computed by the following equation:
# MAGIC
# MAGIC $$l(x, m) = \min_{i=1..N, \langle m, x - x_i \rangle > 0} \frac{\lVert x - x_i \rVert^2}{2\langle m, x - x_i \rangle }$$
# MAGIC
# MAGIC Here, *x* is the origin of the ray (the generator/query point), *m* is the directional unit vector, *x_i* are other data points. The "infinite" directions are excluded. The condition in the minimum signifies, that we are only interested in the positive length, i.e. we can't find an intersection behind the ray.
# MAGIC
# MAGIC After casting *T* rays from a point, we can approximate the volume function as:
# MAGIC
# MAGIC $$\overline{Vol}_{d-1}(x)(r) = \frac{1}{T}\sum_{t=1}^{T} \mathbb{1}\left[l(x, m_t) \ge r \right]$$
# MAGIC
# MAGIC The integral of the function can be easily computed as a sum of all lengths:
# MAGIC
# MAGIC $$size(x) = \frac{1}{T}\sum_{t=1}^{T} l(x, m_t)$$
# MAGIC
# MAGIC And, our (unnormalized) density:
# MAGIC
# MAGIC $$\tilde{p}(x) = \frac{T}{\sum_{t=1}^{T} l(x, m_t)}$$
# MAGIC
# MAGIC Overall, the method's compexity with some optimizations is:
# MAGIC
# MAGIC $$O(NMT + NMD + NTD + MTD)$$
# MAGIC
# MAGIC where *N* is the number of train points, *M* is the number of query points, *T* is the number of rays from each point and *D* is data dimensionality.
# COMMAND ----------
# MAGIC %md
# MAGIC **Ranking loss.**
# MAGIC
# MAGIC At the moment, we do not have any proofs that this indeed generates an unnormalized approximation for the density.
# MAGIC
# MAGIC However, we are fairly certain (though also without a proof) that the approximation, when the dataset size tends to infinity, approximates the correct "ranking" of the estimates. Namely,
# MAGIC
# MAGIC $$p(x_1) < p(x_2) \Leftrightarrow \tilde{p}(x_1) < \tilde{p}(x_2)$$
# MAGIC
# MAGIC with probability 1 when data size is large enough. Here *p* is the real density used for point sampling, and *\tilde{p}* is the approximation.
# MAGIC
# MAGIC This quality is meaningful in tasks when we need to sort points according to their density. For example, if we want to exclude noise (say, 5% of the all points with the lowest density), or use for density filtration in topological data analysis.
# MAGIC
# MAGIC A measure that we use to estimate how well we approximate the correct density ranking works as following:
# MAGIC 1. Sort available query points according to their true density.
# MAGIC 2. Sort available query points according to the approximated density.
# MAGIC 3. Find the number of inverses (swaps of two consecutive elements) required to obtain the first sequence of points from the second one.
# MAGIC
# MAGIC The can easily be counted with a merge-sort algorithm in n log n time, but for simplicity and testing purposes (also because we use python for that) we do it in a simple quadratic time. | 0.878562 | 0.842734 |
from django.shortcuts import render, HttpResponse, redirect, \
get_object_or_404, reverse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.conf import settings
from decimal import Decimal
from paypal.standard.forms import PayPalPaymentsForm
from django.views.decorators.csrf import csrf_exempt
from .models import Product, Order, LineItem
from .forms import CartForm, CheckoutForm
from . import cart
# Create your views here.
def index(request):
all_products = Product.objects.all()
return render(request, "ecommerce_app/index.html", {
'all_products': all_products,
})
def show_product(request, product_id, product_slug):
product = get_object_or_404(Product, id=product_id)
if request.method == 'POST':
form = CartForm(request, request.POST)
if form.is_valid():
request.form_data = form.cleaned_data
cart.add_item_to_cart(request)
return redirect('show_cart')
form = CartForm(request, initial={'product_id': product.id})
return render(request, 'ecommerce_app/product_detail.html', {
'product': product,
'form': form,
})
def show_cart(request):
if request.method == 'POST':
if request.POST.get('submit') == 'Update':
cart.update_item(request)
if request.POST.get('submit') == 'Remove':
cart.remove_item(request)
cart_items = cart.get_all_cart_items(request)
cart_subtotal = cart.subtotal(request)
return render(request, 'ecommerce_app/cart.html', {
'cart_items': cart_items,
'cart_subtotal': cart_subtotal,
})
def checkout(request):
if request.method == 'POST':
form = CheckoutForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
o = Order(
name=cleaned_data.get('name'),
email=cleaned_data.get('email'),
postal_code=cleaned_data.get('postal_code'),
address=cleaned_data.get('address'),
)
o.save()
all_items = cart.get_all_cart_items(request)
for cart_item in all_items:
li = LineItem(
product_id=cart_item.product_id,
price=cart_item.price,
quantity=cart_item.quantity,
order_id=o.id
)
li.save()
cart.clear(request)
request.session['order_id'] = o.id
return redirect('process_payment')
else:
form = CheckoutForm()
return render(request, 'ecommerce_app/checkout.html', locals())
def process_payment(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
line_items = order.lineitem_set.all()
host = request.get_host()
paypal_dict = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': '%.2f' % order.total_cost().quantize(
Decimal('.01')),
'item_name': 'Order {}'.format(order.id),
'invoice': str(order.id),
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host,
reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host,
reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host,
reverse('payment_cancelled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request, 'ecommerce_app/process_payment.html', {'order': order,
'line_itens': line_items,
'form': form})
@login_required
def process_subscription(request, product_id):
product = get_object_or_404(Product, id=product_id)
if not product.is_subscription:
return redirect('index')
user = request.user
order = Order.objects.create(user=user)
line_item = LineItem.objects.create(product_id=product.id,
price=product.price,
quantity=1,
order_id=order.id)
host = request.get_host()
paypal_dict = {
'cmd': '_xclick-subscriptions',
'business': settings.PAYPAL_RECEIVER_EMAIL,
'a3': '%.2f' % order.total_cost().quantize(
Decimal('.01')), # monthly price
'p3': 1, # duration of each unit (depends on unit)
't3': "M", # duration unit ("M for Month")
'src': "1", # make payments recur
'sra': "1", # reattempt payment on payment error
'no_note': "1", # remove extra notes (optional)
'item_name': product.slug,
'invoice': str(order.id),
'notify_url': 'http://{}{}'.format(host,
reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host,
reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host,
reverse('payment_cancelled')),
}
# Create the instance.
form = PayPalPaymentsForm(initial=paypal_dict, button_type="subscribe")
# Output the button.
return render(request, 'ecommerce_app/process_subscription.html', {'order': order,
'line_items': order.lineitem_set.all(),
'user': request.user,
'form': form})
@csrf_exempt
def payment_done(request):
return render(request, 'ecommerce_app/payment_done.html')
@csrf_exempt
def payment_canceled(request):
return render(request, 'ecommerce_app/payment_cancelled.html') | ecommerce_app/views.py | from django.shortcuts import render, HttpResponse, redirect, \
get_object_or_404, reverse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.conf import settings
from decimal import Decimal
from paypal.standard.forms import PayPalPaymentsForm
from django.views.decorators.csrf import csrf_exempt
from .models import Product, Order, LineItem
from .forms import CartForm, CheckoutForm
from . import cart
# Create your views here.
def index(request):
all_products = Product.objects.all()
return render(request, "ecommerce_app/index.html", {
'all_products': all_products,
})
def show_product(request, product_id, product_slug):
product = get_object_or_404(Product, id=product_id)
if request.method == 'POST':
form = CartForm(request, request.POST)
if form.is_valid():
request.form_data = form.cleaned_data
cart.add_item_to_cart(request)
return redirect('show_cart')
form = CartForm(request, initial={'product_id': product.id})
return render(request, 'ecommerce_app/product_detail.html', {
'product': product,
'form': form,
})
def show_cart(request):
if request.method == 'POST':
if request.POST.get('submit') == 'Update':
cart.update_item(request)
if request.POST.get('submit') == 'Remove':
cart.remove_item(request)
cart_items = cart.get_all_cart_items(request)
cart_subtotal = cart.subtotal(request)
return render(request, 'ecommerce_app/cart.html', {
'cart_items': cart_items,
'cart_subtotal': cart_subtotal,
})
def checkout(request):
if request.method == 'POST':
form = CheckoutForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
o = Order(
name=cleaned_data.get('name'),
email=cleaned_data.get('email'),
postal_code=cleaned_data.get('postal_code'),
address=cleaned_data.get('address'),
)
o.save()
all_items = cart.get_all_cart_items(request)
for cart_item in all_items:
li = LineItem(
product_id=cart_item.product_id,
price=cart_item.price,
quantity=cart_item.quantity,
order_id=o.id
)
li.save()
cart.clear(request)
request.session['order_id'] = o.id
return redirect('process_payment')
else:
form = CheckoutForm()
return render(request, 'ecommerce_app/checkout.html', locals())
def process_payment(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
line_items = order.lineitem_set.all()
host = request.get_host()
paypal_dict = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': '%.2f' % order.total_cost().quantize(
Decimal('.01')),
'item_name': 'Order {}'.format(order.id),
'invoice': str(order.id),
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host,
reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host,
reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host,
reverse('payment_cancelled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request, 'ecommerce_app/process_payment.html', {'order': order,
'line_itens': line_items,
'form': form})
@login_required
def process_subscription(request, product_id):
product = get_object_or_404(Product, id=product_id)
if not product.is_subscription:
return redirect('index')
user = request.user
order = Order.objects.create(user=user)
line_item = LineItem.objects.create(product_id=product.id,
price=product.price,
quantity=1,
order_id=order.id)
host = request.get_host()
paypal_dict = {
'cmd': '_xclick-subscriptions',
'business': settings.PAYPAL_RECEIVER_EMAIL,
'a3': '%.2f' % order.total_cost().quantize(
Decimal('.01')), # monthly price
'p3': 1, # duration of each unit (depends on unit)
't3': "M", # duration unit ("M for Month")
'src': "1", # make payments recur
'sra': "1", # reattempt payment on payment error
'no_note': "1", # remove extra notes (optional)
'item_name': product.slug,
'invoice': str(order.id),
'notify_url': 'http://{}{}'.format(host,
reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host,
reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host,
reverse('payment_cancelled')),
}
# Create the instance.
form = PayPalPaymentsForm(initial=paypal_dict, button_type="subscribe")
# Output the button.
return render(request, 'ecommerce_app/process_subscription.html', {'order': order,
'line_items': order.lineitem_set.all(),
'user': request.user,
'form': form})
@csrf_exempt
def payment_done(request):
return render(request, 'ecommerce_app/payment_done.html')
@csrf_exempt
def payment_canceled(request):
return render(request, 'ecommerce_app/payment_cancelled.html') | 0.508788 | 0.081739 |
import click
def parse_variable_filter(argument):
variable, _, values = argument[1:].partition('=')
if variable == 'py':
variable = 'python'
parsed_values = set(values.split(',')) if values else set()
return variable, parsed_values
def select_matrix_environments(environments, included_variables, excluded_variables):
selected_environments = []
for env_name, variables in environments.items():
for variable, value in variables.items():
if variable in excluded_variables:
excluded_values = excluded_variables[variable]
if not excluded_values or value in excluded_values:
break
if included_variables:
if variable not in included_variables:
break
else:
included_values = included_variables[variable]
if included_values and value not in included_values:
break
else:
selected_environments.append(env_name)
return selected_environments
@click.command(
short_help='Run commands within project environments',
context_settings={'help_option_names': [], 'ignore_unknown_options': True},
)
@click.argument('args', metavar='[ENV:]ARGS...', required=True, nargs=-1)
@click.pass_obj
def run(app, args):
"""
Run commands within project environments.
If the first argument contains a colon, then the preceding component will be
interpreted as the name of the environment to target, overriding the `-e`/`--env`
[root option](#hatch) and the `HATCH_ENV` environment variable.
If the environment provides matrices, then you may also provide leading arguments
starting with a `+` or `-` to select or exclude certain variables, optionally
followed by specific comma-separated values. For example, if you have the
following configuration:
=== ":octicons-file-code-16: pyproject.toml"
```toml
[[tool.hatch.envs.test.matrix]]
python = ["39", "310"]
version = ["42", "3.14", "9000"]
```
=== ":octicons-file-code-16: hatch.toml"
```toml
[[envs.test.matrix]]
python = ["39", "310"]
version = ["42", "3.14", "9000"]
```
then running:
```
hatch run +py=310 -version=9000 test:pytest
```
would execute `pytest` in the environments `test.py310-42` and `test.py310-3.14`.
Note that `py` may be used as an alias for `python`.
"""
project = app.project
command_start = 0
included_variables = {}
excluded_variables = {}
for i, arg in enumerate(args):
command_start = i
if arg.startswith('+'):
variable, values = parse_variable_filter(arg)
if variable in included_variables:
app.abort(f'Duplicate included variable: {variable}')
included_variables[variable] = values
elif arg.startswith('-'):
variable, values = parse_variable_filter(arg)
if variable in excluded_variables:
app.abort(f'Duplicate excluded variable: {variable}')
excluded_variables[variable] = values
else:
break
else:
command_start += 1
args = args[command_start:]
if not args:
app.abort('Missing argument `MATRIX:ARGS...`')
command, *args = args
env_name, separator, command = command.rpartition(':')
if not separator:
env_name = app.env
args = [command, *args]
system_environment = False
if not env_name:
system_environment = True
env_name = 'system'
project.config.config['envs'] = {
env_name: {
'type': env_name,
'skip-install': True,
'scripts': project.config.scripts,
}
}
is_matrix = False
if env_name in project.config.matrices:
is_matrix = True
env_data = project.config.matrices[env_name]['envs']
if not env_data:
app.abort(f'No variables defined for matrix: {env_name}')
environments = select_matrix_environments(env_data, included_variables, excluded_variables)
if not environments:
app.abort('No environments were selected')
else:
if included_variables or excluded_variables:
app.abort(f'Variable selection is unsupported for non-matrix environment: {env_name}')
environments = [env_name]
any_compatible = False
incompatible = {}
with project.location.as_cwd():
for env_name in environments:
environment = app.get_environment(env_name)
try:
environment.check_compatibility()
except Exception as e:
if is_matrix:
incompatible[environment.name] = str(e)
continue
else:
app.abort(f'Environment `{env_name}` is incompatible: {e}')
any_compatible = True
if is_matrix:
app.display_header(environment.name)
if system_environment:
environment.exists = lambda: True
app.prepare_environment(environment)
for process in environment.run_shell_commands([environment.join_command_args(args)]):
if process.returncode:
app.abort(code=process.returncode)
if incompatible:
num_incompatible = len(incompatible)
padding = '\n' if any_compatible else ''
app.display_warning(
f'{padding}Skipped {num_incompatible} incompatible environment{"s" if num_incompatible > 1 else ""}:'
)
for env_name, reason in incompatible.items():
app.display_warning(f'{env_name} -> {reason}') | src/hatch/cli/run/__init__.py | import click
def parse_variable_filter(argument):
variable, _, values = argument[1:].partition('=')
if variable == 'py':
variable = 'python'
parsed_values = set(values.split(',')) if values else set()
return variable, parsed_values
def select_matrix_environments(environments, included_variables, excluded_variables):
selected_environments = []
for env_name, variables in environments.items():
for variable, value in variables.items():
if variable in excluded_variables:
excluded_values = excluded_variables[variable]
if not excluded_values or value in excluded_values:
break
if included_variables:
if variable not in included_variables:
break
else:
included_values = included_variables[variable]
if included_values and value not in included_values:
break
else:
selected_environments.append(env_name)
return selected_environments
@click.command(
short_help='Run commands within project environments',
context_settings={'help_option_names': [], 'ignore_unknown_options': True},
)
@click.argument('args', metavar='[ENV:]ARGS...', required=True, nargs=-1)
@click.pass_obj
def run(app, args):
"""
Run commands within project environments.
If the first argument contains a colon, then the preceding component will be
interpreted as the name of the environment to target, overriding the `-e`/`--env`
[root option](#hatch) and the `HATCH_ENV` environment variable.
If the environment provides matrices, then you may also provide leading arguments
starting with a `+` or `-` to select or exclude certain variables, optionally
followed by specific comma-separated values. For example, if you have the
following configuration:
=== ":octicons-file-code-16: pyproject.toml"
```toml
[[tool.hatch.envs.test.matrix]]
python = ["39", "310"]
version = ["42", "3.14", "9000"]
```
=== ":octicons-file-code-16: hatch.toml"
```toml
[[envs.test.matrix]]
python = ["39", "310"]
version = ["42", "3.14", "9000"]
```
then running:
```
hatch run +py=310 -version=9000 test:pytest
```
would execute `pytest` in the environments `test.py310-42` and `test.py310-3.14`.
Note that `py` may be used as an alias for `python`.
"""
project = app.project
command_start = 0
included_variables = {}
excluded_variables = {}
for i, arg in enumerate(args):
command_start = i
if arg.startswith('+'):
variable, values = parse_variable_filter(arg)
if variable in included_variables:
app.abort(f'Duplicate included variable: {variable}')
included_variables[variable] = values
elif arg.startswith('-'):
variable, values = parse_variable_filter(arg)
if variable in excluded_variables:
app.abort(f'Duplicate excluded variable: {variable}')
excluded_variables[variable] = values
else:
break
else:
command_start += 1
args = args[command_start:]
if not args:
app.abort('Missing argument `MATRIX:ARGS...`')
command, *args = args
env_name, separator, command = command.rpartition(':')
if not separator:
env_name = app.env
args = [command, *args]
system_environment = False
if not env_name:
system_environment = True
env_name = 'system'
project.config.config['envs'] = {
env_name: {
'type': env_name,
'skip-install': True,
'scripts': project.config.scripts,
}
}
is_matrix = False
if env_name in project.config.matrices:
is_matrix = True
env_data = project.config.matrices[env_name]['envs']
if not env_data:
app.abort(f'No variables defined for matrix: {env_name}')
environments = select_matrix_environments(env_data, included_variables, excluded_variables)
if not environments:
app.abort('No environments were selected')
else:
if included_variables or excluded_variables:
app.abort(f'Variable selection is unsupported for non-matrix environment: {env_name}')
environments = [env_name]
any_compatible = False
incompatible = {}
with project.location.as_cwd():
for env_name in environments:
environment = app.get_environment(env_name)
try:
environment.check_compatibility()
except Exception as e:
if is_matrix:
incompatible[environment.name] = str(e)
continue
else:
app.abort(f'Environment `{env_name}` is incompatible: {e}')
any_compatible = True
if is_matrix:
app.display_header(environment.name)
if system_environment:
environment.exists = lambda: True
app.prepare_environment(environment)
for process in environment.run_shell_commands([environment.join_command_args(args)]):
if process.returncode:
app.abort(code=process.returncode)
if incompatible:
num_incompatible = len(incompatible)
padding = '\n' if any_compatible else ''
app.display_warning(
f'{padding}Skipped {num_incompatible} incompatible environment{"s" if num_incompatible > 1 else ""}:'
)
for env_name, reason in incompatible.items():
app.display_warning(f'{env_name} -> {reason}') | 0.649023 | 0.735167 |
import numpy as np
from tqdm import trange
from chapter04.car_rental_mine import cartesian_prod
np.random.seed(5)
class WindyWorld(object):
def __init__(self, hight, width, start, end, wind_force):
self.hight = hight
self.width = width
self.start = start
self.end = end
self.init_wind_force = wind_force.copy()
self.wind_force = wind_force.copy()
def reset_wind_force(self):
self.wind_force = self.init_wind_force
def stochastic_wind(self):
self.reset_wind_force()
random_wind = np.random.randint(-1,2,len(self.wind_force))
random_wind = np.where(self.wind_force==0,0,random_wind)
self.wind_force += random_wind
def action_gen(kings_move=True):
vertical_possible = np.arange(-1, 2)
horizontal_possible = np.arange(-1, 2)
actions = cartesian_prod(vertical_possible, horizontal_possible)
actions = np.vstack(actions).T
if kings_move is None:
return actions
mask = np.any(actions != 0, axis=1)
if not kings_move:
mask2 = np.any(abs(actions)!=1, axis=1)
mask &= mask2
return actions[mask]
def move(env: WindyWorld, state, action):
new_state_vertical = state[0] + action[0] - env.wind_force[state[1]]
new_state_horizontal = state[1] + action[1]
new_state_vertical = np.clip(new_state_vertical, 0, env.hight-1)
new_state_horizontal = np.clip(new_state_horizontal, 0, env.width-1)
return new_state_vertical, new_state_horizontal
def epsilon_greedy(state, actions, q, epsilon):
if np.random.random() < epsilon:
action_idx = np.random.randint(len(actions))
else:
action_idx = np.argmax(q[state[0], state[1],:])
return action_idx
def single_episode(env: WindyWorld, actions, epsilon, step_size, q, stochastic_wind=False):
state = env.start
ending = False
action_idx = epsilon_greedy(state, actions, q, epsilon)
steps = 0
trajectory = [state]
while not ending:
if stochastic_wind:
env.stochastic_wind()
new_state = move(env, state, actions[action_idx])
new_action_idx = epsilon_greedy(new_state, actions, q, epsilon)
q[state+(action_idx,)] += step_size * (-1 + q[new_state+(new_action_idx,)] - q[state+(action_idx,)])
if new_state == env.end:
break
state = new_state
action_idx = new_action_idx
steps+=1
trajectory.append(state)
return steps, trajectory
if __name__ == '__main__':
epsilon = 0.1
step_size = 0.5
# actions
actions = action_gen(kings_move=True)
# gridworld
hight = 7
width = 10
start = (3, 0)
end = (3, 7)
wind_force = np.array([0, 0, 0, 1, 1, 1, 2, 2, 1, 0])
world = WindyWorld(hight=hight, width=width, start=start, end=end, wind_force=wind_force)
q = np.zeros((hight, width, len(actions)))
episodes = 200
for episode in trange(episodes):
single_episode(world, actions, epsilon, step_size, q, stochastic_wind=True)
world.reset_wind_force()
steps = single_episode(world, actions, 0, step_size, q, stochastic_wind=True) | chapter06/windy_grid_world_mine.py | import numpy as np
from tqdm import trange
from chapter04.car_rental_mine import cartesian_prod
np.random.seed(5)
class WindyWorld(object):
def __init__(self, hight, width, start, end, wind_force):
self.hight = hight
self.width = width
self.start = start
self.end = end
self.init_wind_force = wind_force.copy()
self.wind_force = wind_force.copy()
def reset_wind_force(self):
self.wind_force = self.init_wind_force
def stochastic_wind(self):
self.reset_wind_force()
random_wind = np.random.randint(-1,2,len(self.wind_force))
random_wind = np.where(self.wind_force==0,0,random_wind)
self.wind_force += random_wind
def action_gen(kings_move=True):
vertical_possible = np.arange(-1, 2)
horizontal_possible = np.arange(-1, 2)
actions = cartesian_prod(vertical_possible, horizontal_possible)
actions = np.vstack(actions).T
if kings_move is None:
return actions
mask = np.any(actions != 0, axis=1)
if not kings_move:
mask2 = np.any(abs(actions)!=1, axis=1)
mask &= mask2
return actions[mask]
def move(env: WindyWorld, state, action):
new_state_vertical = state[0] + action[0] - env.wind_force[state[1]]
new_state_horizontal = state[1] + action[1]
new_state_vertical = np.clip(new_state_vertical, 0, env.hight-1)
new_state_horizontal = np.clip(new_state_horizontal, 0, env.width-1)
return new_state_vertical, new_state_horizontal
def epsilon_greedy(state, actions, q, epsilon):
if np.random.random() < epsilon:
action_idx = np.random.randint(len(actions))
else:
action_idx = np.argmax(q[state[0], state[1],:])
return action_idx
def single_episode(env: WindyWorld, actions, epsilon, step_size, q, stochastic_wind=False):
state = env.start
ending = False
action_idx = epsilon_greedy(state, actions, q, epsilon)
steps = 0
trajectory = [state]
while not ending:
if stochastic_wind:
env.stochastic_wind()
new_state = move(env, state, actions[action_idx])
new_action_idx = epsilon_greedy(new_state, actions, q, epsilon)
q[state+(action_idx,)] += step_size * (-1 + q[new_state+(new_action_idx,)] - q[state+(action_idx,)])
if new_state == env.end:
break
state = new_state
action_idx = new_action_idx
steps+=1
trajectory.append(state)
return steps, trajectory
if __name__ == '__main__':
epsilon = 0.1
step_size = 0.5
# actions
actions = action_gen(kings_move=True)
# gridworld
hight = 7
width = 10
start = (3, 0)
end = (3, 7)
wind_force = np.array([0, 0, 0, 1, 1, 1, 2, 2, 1, 0])
world = WindyWorld(hight=hight, width=width, start=start, end=end, wind_force=wind_force)
q = np.zeros((hight, width, len(actions)))
episodes = 200
for episode in trange(episodes):
single_episode(world, actions, epsilon, step_size, q, stochastic_wind=True)
world.reset_wind_force()
steps = single_episode(world, actions, 0, step_size, q, stochastic_wind=True) | 0.609989 | 0.441914 |
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import nibabel as nib
import os
import cv2
import math
def water(img_path):
src = cv2.imread(img_path)
img = src.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
kernel2 = np.ones((7, 7), np.uint8)
sure_bg = cv2.dilate(opening, kernel2, iterations=3)
dist_transform = cv2.distanceTransform(sure_bg, 1, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers1 = cv2.connectedComponents(sure_fg)
markers = markers1 + 1
markers[unknown == 255] = 0
markers3 = cv2.watershed(img, markers)
img[markers3 == -1] = [0, 0, 0]
img[markers3 == 1] = [0, 0, 0]
img[markers3 == 2] = [255, 255, 255]
img[markers3 == 3] = [255, 255, 255]
img[markers3 == 4] = [255, 255, 255]
return img
def segmentation(img_path):
src= cv2.imread(img_path)
img = src.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, gray.max(), 255, cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
dilate = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
return opening
source_path = r"a2_b path"
path = source_path + "/masks"
output_path = source_path + '/labels'
path_list = os.listdir(path)
path_list.sort()
len1 = 0
count = 0
for filename in path_list:
count += 1
cont_area = []
len1 += 1
image_path = os.path.join(path, filename)
src = cv2.imread(image_path)
result = water(image_path)
index = filename.rfind('.')
filename = filename[:index]
filename = filename[:-5] + "_segmentation"
cv2.imwrite(output_path +'/'+ filename+".png", result)
print(round(count * 100 / len(path_list), 2), "%") | util/mask2label.py | import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import nibabel as nib
import os
import cv2
import math
def water(img_path):
src = cv2.imread(img_path)
img = src.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
kernel2 = np.ones((7, 7), np.uint8)
sure_bg = cv2.dilate(opening, kernel2, iterations=3)
dist_transform = cv2.distanceTransform(sure_bg, 1, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers1 = cv2.connectedComponents(sure_fg)
markers = markers1 + 1
markers[unknown == 255] = 0
markers3 = cv2.watershed(img, markers)
img[markers3 == -1] = [0, 0, 0]
img[markers3 == 1] = [0, 0, 0]
img[markers3 == 2] = [255, 255, 255]
img[markers3 == 3] = [255, 255, 255]
img[markers3 == 4] = [255, 255, 255]
return img
def segmentation(img_path):
src= cv2.imread(img_path)
img = src.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, gray.max(), 255, cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
dilate = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
return opening
source_path = r"a2_b path"
path = source_path + "/masks"
output_path = source_path + '/labels'
path_list = os.listdir(path)
path_list.sort()
len1 = 0
count = 0
for filename in path_list:
count += 1
cont_area = []
len1 += 1
image_path = os.path.join(path, filename)
src = cv2.imread(image_path)
result = water(image_path)
index = filename.rfind('.')
filename = filename[:index]
filename = filename[:-5] + "_segmentation"
cv2.imwrite(output_path +'/'+ filename+".png", result)
print(round(count * 100 / len(path_list), 2), "%") | 0.193566 | 0.329931 |
import math,random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torchaudio
from torchaudio import transforms
class AudioData(Dataset):
def __init__(self, df, data_path, train=True):
self.train = train
self.df = df
self.data_path = str(data_path)
self.duration = 1500
self.sr = 48000
self.channel = 2
self.shift_pct = 0.4
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
audio_file = self.data_path + self.df.loc[idx, 'relative_path']
class_id = self.df.loc[idx, 'classID']
aud = AudioUtil.open(audio_file)
reaud = AudioUtil.resample(aud, self.sr)
rechan = AudioUtil.rechannel(reaud, self.channel)
dur_aud = AudioUtil.pad_trunc(rechan, self.duration)
#shift_aud = AudioUtil.time_shift(dur_aud, self.shift_pct)
sgram = AudioUtil.spectrogram(dur_aud)
if self.train:
sgram = AudioUtil.spec_augment(sgram, max_mask_pct=0.1, n_freq_masks=2, n_time_masks=2)
return sgram, class_id
class AudioUtil():
'''
Load an audio file. Return as tensor and sample rate
'''
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
'''
Model expects 2 channels. Convert 1 channel audio files to 2.
'''
@staticmethod
def rechannel(aud, new_channel):
sig, sr = aud
if sig.shape[0] == new_channel:
return aud
if new_channel == 1:
resig = sig[:1, :]
else:
resig = torch.cat([sig,sig])
return (resig, sr)
'''
Standardize sampling rate.
'''
@staticmethod
def resample(aud, newsr):
sig, sr = aud
if sr == newsr:
return aud
num_channels = sig.shape[0]
resig = torchaudio.transforms.Resample(sr, newsr)(sig[:1,:])
if num_channels > 1:
retwo = torchaudio.transforms.Resample(sr, newsr)(sig[1:,:])
resig = torch.cat([resig, retwo])
return (resig, newsr)
'''
Standardize sample length.
'''
@staticmethod
def pad_trunc(aud, max_ms):
sig, sr = aud
num_rows, sig_len = sig.shape
max_len = sr//1000 * max_ms
if sig_len > max_len:
sig = sig[:,:max_len]
elif sig_len < max_len:
pad_begin_len = random.randint(0,max_len - sig_len)
pad_end_len = max_len - sig_len - pad_begin_len
max_noise = sig.max()
min_noise = sig.min()
pad_begin = (max_noise-min_noise)*torch.rand((num_rows, pad_begin_len)) + min_noise
pad_end = (max_noise-min_noise)*torch.rand((num_rows, pad_end_len)) + min_noise
sig = torch.cat((pad_begin, sig, pad_end), 1)
return (sig,sr)
'''
Shift signal left/right by some percent; wrap the end.
'''
@staticmethod
def time_shift(aud, shift_limit):
sig, sr = aud
_, sig_len = sig.shape
shift_amt = int(random.random() * shift_limit * sig_len)
return (sig.roll(shift_amt), sr)
'''
Generate Mel Spectrogram.
'''
@staticmethod
def spectrogram(aud, n_mels=64, n_fft=1024, hop_len=None):
sig,sr = aud
top_db = 80
spec = transforms.MelSpectrogram(sr, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels)(sig)
spec = transforms.AmplitudeToDB(top_db=top_db)(spec)
return spec
'''
Augment spectrogram by masking periods of time and periods of frequency.
'''
@staticmethod
def spec_augment(spec, max_mask_pct=0.1, n_freq_masks=1, n_time_masks=1):
_, n_mels, n_steps = spec.shape
mask_value = spec.mean()
aug_spec = spec
freq_mask_param = max_mask_pct * n_mels
for _ in range(n_freq_masks):
aug_spec = transforms.FrequencyMasking(freq_mask_param)(aug_spec, mask_value)
time_mask_param = max_mask_pct * n_steps
for _ in range(n_time_masks):
aug_spec = transforms.TimeMasking(time_mask_param)(aug_spec, mask_value)
return aug_spec
class TensorBoard():
'''
Load an audio file. Return as tensor and sample rate
'''
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
@staticmethod
def images_to_probs(net, images):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of images
'''
output = net(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
@staticmethod
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.cpu().numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
@staticmethod
def plot_classes_preds(net, images, labels):
'''
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function.
'''
classes = ('0','1','2','3','4','5','6','7','8','9')
preds, probs = TensorBoard.images_to_probs(net, images)
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(12, 48))
for idx in np.arange(4):
ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])
TensorBoard.matplotlib_imshow(images[idx], one_channel=True)
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
return fig
@staticmethod
def add_pr_curve_tensorboard(class_index, test_probs, test_label, global_step=0, writer=None):
'''
Takes in a "class_index" from 0 to 9 and plots the corresponding
precision-recall curve
'''
tensorboard_truth = test_label == class_index
tensorboard_probs = test_probs[:, class_index]
writer.add_pr_curve(classes[class_index],
tensorboard_truth,
tensorboard_probs,
global_step=global_step)
writer.close()
class Validation():
@staticmethod
def confusion(y_pred, y_true, save_filepath='confusion.png'):
'''
Creates and returns confusion matrix
'''
classes = ('0','1','2','3','4','5','6','7','8','9')
cf_matrix = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cf_matrix/np.sum(cf_matrix)*10, index=[i for i in classes]
, columns=[i for i in classes])
plt.close('all')
plt.figure(figsize=(12,7))
sn.heatmap(df_cm, annot=True)
plt.savefig(save_filepath) | utils.py | import math,random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torchaudio
from torchaudio import transforms
class AudioData(Dataset):
def __init__(self, df, data_path, train=True):
self.train = train
self.df = df
self.data_path = str(data_path)
self.duration = 1500
self.sr = 48000
self.channel = 2
self.shift_pct = 0.4
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
audio_file = self.data_path + self.df.loc[idx, 'relative_path']
class_id = self.df.loc[idx, 'classID']
aud = AudioUtil.open(audio_file)
reaud = AudioUtil.resample(aud, self.sr)
rechan = AudioUtil.rechannel(reaud, self.channel)
dur_aud = AudioUtil.pad_trunc(rechan, self.duration)
#shift_aud = AudioUtil.time_shift(dur_aud, self.shift_pct)
sgram = AudioUtil.spectrogram(dur_aud)
if self.train:
sgram = AudioUtil.spec_augment(sgram, max_mask_pct=0.1, n_freq_masks=2, n_time_masks=2)
return sgram, class_id
class AudioUtil():
'''
Load an audio file. Return as tensor and sample rate
'''
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
'''
Model expects 2 channels. Convert 1 channel audio files to 2.
'''
@staticmethod
def rechannel(aud, new_channel):
sig, sr = aud
if sig.shape[0] == new_channel:
return aud
if new_channel == 1:
resig = sig[:1, :]
else:
resig = torch.cat([sig,sig])
return (resig, sr)
'''
Standardize sampling rate.
'''
@staticmethod
def resample(aud, newsr):
sig, sr = aud
if sr == newsr:
return aud
num_channels = sig.shape[0]
resig = torchaudio.transforms.Resample(sr, newsr)(sig[:1,:])
if num_channels > 1:
retwo = torchaudio.transforms.Resample(sr, newsr)(sig[1:,:])
resig = torch.cat([resig, retwo])
return (resig, newsr)
'''
Standardize sample length.
'''
@staticmethod
def pad_trunc(aud, max_ms):
sig, sr = aud
num_rows, sig_len = sig.shape
max_len = sr//1000 * max_ms
if sig_len > max_len:
sig = sig[:,:max_len]
elif sig_len < max_len:
pad_begin_len = random.randint(0,max_len - sig_len)
pad_end_len = max_len - sig_len - pad_begin_len
max_noise = sig.max()
min_noise = sig.min()
pad_begin = (max_noise-min_noise)*torch.rand((num_rows, pad_begin_len)) + min_noise
pad_end = (max_noise-min_noise)*torch.rand((num_rows, pad_end_len)) + min_noise
sig = torch.cat((pad_begin, sig, pad_end), 1)
return (sig,sr)
'''
Shift signal left/right by some percent; wrap the end.
'''
@staticmethod
def time_shift(aud, shift_limit):
sig, sr = aud
_, sig_len = sig.shape
shift_amt = int(random.random() * shift_limit * sig_len)
return (sig.roll(shift_amt), sr)
'''
Generate Mel Spectrogram.
'''
@staticmethod
def spectrogram(aud, n_mels=64, n_fft=1024, hop_len=None):
sig,sr = aud
top_db = 80
spec = transforms.MelSpectrogram(sr, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels)(sig)
spec = transforms.AmplitudeToDB(top_db=top_db)(spec)
return spec
'''
Augment spectrogram by masking periods of time and periods of frequency.
'''
@staticmethod
def spec_augment(spec, max_mask_pct=0.1, n_freq_masks=1, n_time_masks=1):
_, n_mels, n_steps = spec.shape
mask_value = spec.mean()
aug_spec = spec
freq_mask_param = max_mask_pct * n_mels
for _ in range(n_freq_masks):
aug_spec = transforms.FrequencyMasking(freq_mask_param)(aug_spec, mask_value)
time_mask_param = max_mask_pct * n_steps
for _ in range(n_time_masks):
aug_spec = transforms.TimeMasking(time_mask_param)(aug_spec, mask_value)
return aug_spec
class TensorBoard():
'''
Load an audio file. Return as tensor and sample rate
'''
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
@staticmethod
def images_to_probs(net, images):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of images
'''
output = net(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
@staticmethod
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.cpu().numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
@staticmethod
def plot_classes_preds(net, images, labels):
'''
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function.
'''
classes = ('0','1','2','3','4','5','6','7','8','9')
preds, probs = TensorBoard.images_to_probs(net, images)
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(12, 48))
for idx in np.arange(4):
ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])
TensorBoard.matplotlib_imshow(images[idx], one_channel=True)
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
return fig
@staticmethod
def add_pr_curve_tensorboard(class_index, test_probs, test_label, global_step=0, writer=None):
'''
Takes in a "class_index" from 0 to 9 and plots the corresponding
precision-recall curve
'''
tensorboard_truth = test_label == class_index
tensorboard_probs = test_probs[:, class_index]
writer.add_pr_curve(classes[class_index],
tensorboard_truth,
tensorboard_probs,
global_step=global_step)
writer.close()
class Validation():
@staticmethod
def confusion(y_pred, y_true, save_filepath='confusion.png'):
'''
Creates and returns confusion matrix
'''
classes = ('0','1','2','3','4','5','6','7','8','9')
cf_matrix = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cf_matrix/np.sum(cf_matrix)*10, index=[i for i in classes]
, columns=[i for i in classes])
plt.close('all')
plt.figure(figsize=(12,7))
sn.heatmap(df_cm, annot=True)
plt.savefig(save_filepath) | 0.719778 | 0.438184 |
import requests
import argparse
import pathlib
import json
class PublishingFailedException(Exception):
pass
class PactBrokerInterface:
""" Interface to a pact-broker instance
Allows publishing pact test JSON files to pact-broker instance
Attributes
----------
url : str
pact-broker URL
user : str
pact-broker username
password : str
pact-broker password
auth : tuple
(user, password) for request's HTTPBasicAuth
glob : str
glob pattern to match pact files
sep : str
separator for extracting Consumer/Producer name from filename
"""
def __init__(self, url, user, password, glob="*-pact.json", sep="-"):
self.url = url.strip("/")
self.user = user
self.password = password
self.auth = (self.user, self.password)
self.glob = glob
self.sep = sep
self.headers = {'Content-Type': 'application/json'}
def find_pacts(self, pact_path=".", version="1.0.0"):
""" Find local pact files and prepare publication
Parameters
----------
pact_path : str
Filepath or directory containing pact JSON file
version : str
(Consumer) application version
Returns
-------
dict
Keys: Pact file name
Values: URL & body for publication to pact-broker
"""
publication = {}
path = pathlib.Path(pact_path)
if not path.exists():
raise ValueError(f"Unable to find {pact_path}. No such file or directory.")
if path.is_dir():
pathlist = path.glob(f"**/{self.glob}")
for pact in pathlist:
consumer, provider, _ = pact.stem.split(self.sep)
publish_url = f"{self.url}/pacts/provider/{provider}/consumer/{consumer}/version/{version}"
with open(pact, "r") as stream:
data = json.load(stream)
publication[pact.name] = {"url": publish_url, "data": data}
elif path.is_file() and path.suffix.lower() == ".json":
consumer, provider, _ = path.stem.split(self.sep)
publish_url = f"{self.url}/pacts/provider/{provider}/consumer/{consumer}/version/{version}"
with open(path, "r") as stream:
data = json.load(stream)
publication[path.name] = {"url": publish_url, "data": data}
return publication
def publish(self, publication):
""" Publish pact to pact-broker instance
Parameters
----------
publication : dict
Keys: Pact file name
Values: URL & body for publication to pact-broker
Returned by PactBrokerInterface.find_pacts(...)
"""
for name in publication:
response = requests.put(publication[name]["url"], json=publication[name]["data"], auth=self.auth)
response.raise_for_status()
if response.status_code == 201:
print(f"Published new pact {name} to {self.url}")
elif response.status_code == 200:
print(f"Published pact update {name} to {self.url}")
def tag_version(self, participant, version, tag):
tag_url = f'{self.url}/pacticipants/{participant}/versions/{version}/tags/{tag}'
response = requests.put(
tag_url, auth=self.auth, headers={'Content-Length': '0', 'Content-Type': 'application/json'}
)
response.raise_for_status()
if 200 <= response.status_code < 300:
print(f'Tagged {participant} version {version} to with {tag}')
def get_consumers(self, publication):
consumers = set()
for name in publication:
consumers.add(name.split(self.sep, 1)[0])
return list(consumers)
def main():
parser = argparse.ArgumentParser(description="Publish pact test JSONs to pact-broker")
parser.add_argument("url", help="URL of the pact-broker", type=str)
parser.add_argument("username", help="pact-broker username", type=str)
parser.add_argument("password", help="pact-broker password", type=str)
parser.add_argument(
"path", help="Location of pact JSON file(s) [file|dir]", nargs="?", type=str, default="."
)
parser.add_argument(
"-v", "--version", help="Application version", type=str, default="1.0.0", dest="version"
)
parser.add_argument(
"-g",
"--glob",
help="Glob pattern for matching pact files",
default="*-pact.json",
type=str,
dest="glob",
)
parser.add_argument(
"-s",
"--separator",
help="Separator for extracting Consumer/Producer name from pactfile",
default="-",
type=str,
dest="sep",
)
parser.add_argument(
"-t", "--tag", help="Consumer tag for the version", default="latest", type=str, dest="tag"
)
args = parser.parse_args()
broker = PactBrokerInterface(args.url, args.username, args.password, args.glob, args.sep)
publication = broker.find_pacts(args.path, args.version)
broker.publish(publication)
for consumer in broker.get_consumers(publication):
broker.tag_version(consumer, args.version, args.tag)
if __name__ == "__main__":
main() | pact_test_utils/publish_pacts.py | import requests
import argparse
import pathlib
import json
class PublishingFailedException(Exception):
pass
class PactBrokerInterface:
""" Interface to a pact-broker instance
Allows publishing pact test JSON files to pact-broker instance
Attributes
----------
url : str
pact-broker URL
user : str
pact-broker username
password : str
pact-broker password
auth : tuple
(user, password) for request's HTTPBasicAuth
glob : str
glob pattern to match pact files
sep : str
separator for extracting Consumer/Producer name from filename
"""
def __init__(self, url, user, password, glob="*-pact.json", sep="-"):
self.url = url.strip("/")
self.user = user
self.password = password
self.auth = (self.user, self.password)
self.glob = glob
self.sep = sep
self.headers = {'Content-Type': 'application/json'}
def find_pacts(self, pact_path=".", version="1.0.0"):
""" Find local pact files and prepare publication
Parameters
----------
pact_path : str
Filepath or directory containing pact JSON file
version : str
(Consumer) application version
Returns
-------
dict
Keys: Pact file name
Values: URL & body for publication to pact-broker
"""
publication = {}
path = pathlib.Path(pact_path)
if not path.exists():
raise ValueError(f"Unable to find {pact_path}. No such file or directory.")
if path.is_dir():
pathlist = path.glob(f"**/{self.glob}")
for pact in pathlist:
consumer, provider, _ = pact.stem.split(self.sep)
publish_url = f"{self.url}/pacts/provider/{provider}/consumer/{consumer}/version/{version}"
with open(pact, "r") as stream:
data = json.load(stream)
publication[pact.name] = {"url": publish_url, "data": data}
elif path.is_file() and path.suffix.lower() == ".json":
consumer, provider, _ = path.stem.split(self.sep)
publish_url = f"{self.url}/pacts/provider/{provider}/consumer/{consumer}/version/{version}"
with open(path, "r") as stream:
data = json.load(stream)
publication[path.name] = {"url": publish_url, "data": data}
return publication
def publish(self, publication):
""" Publish pact to pact-broker instance
Parameters
----------
publication : dict
Keys: Pact file name
Values: URL & body for publication to pact-broker
Returned by PactBrokerInterface.find_pacts(...)
"""
for name in publication:
response = requests.put(publication[name]["url"], json=publication[name]["data"], auth=self.auth)
response.raise_for_status()
if response.status_code == 201:
print(f"Published new pact {name} to {self.url}")
elif response.status_code == 200:
print(f"Published pact update {name} to {self.url}")
def tag_version(self, participant, version, tag):
tag_url = f'{self.url}/pacticipants/{participant}/versions/{version}/tags/{tag}'
response = requests.put(
tag_url, auth=self.auth, headers={'Content-Length': '0', 'Content-Type': 'application/json'}
)
response.raise_for_status()
if 200 <= response.status_code < 300:
print(f'Tagged {participant} version {version} to with {tag}')
def get_consumers(self, publication):
consumers = set()
for name in publication:
consumers.add(name.split(self.sep, 1)[0])
return list(consumers)
def main():
parser = argparse.ArgumentParser(description="Publish pact test JSONs to pact-broker")
parser.add_argument("url", help="URL of the pact-broker", type=str)
parser.add_argument("username", help="pact-broker username", type=str)
parser.add_argument("password", help="pact-broker password", type=str)
parser.add_argument(
"path", help="Location of pact JSON file(s) [file|dir]", nargs="?", type=str, default="."
)
parser.add_argument(
"-v", "--version", help="Application version", type=str, default="1.0.0", dest="version"
)
parser.add_argument(
"-g",
"--glob",
help="Glob pattern for matching pact files",
default="*-pact.json",
type=str,
dest="glob",
)
parser.add_argument(
"-s",
"--separator",
help="Separator for extracting Consumer/Producer name from pactfile",
default="-",
type=str,
dest="sep",
)
parser.add_argument(
"-t", "--tag", help="Consumer tag for the version", default="latest", type=str, dest="tag"
)
args = parser.parse_args()
broker = PactBrokerInterface(args.url, args.username, args.password, args.glob, args.sep)
publication = broker.find_pacts(args.path, args.version)
broker.publish(publication)
for consumer in broker.get_consumers(publication):
broker.tag_version(consumer, args.version, args.tag)
if __name__ == "__main__":
main() | 0.698946 | 0.147709 |
import re
class Account():
"""Client account"""
ID_COUNT = 1
def __init__(self, name, **kwargs):
self.id = self.ID_COUNT
self.name = name
self.__dict__.update(kwargs)
Account.ID_COUNT += 1
def __getitem__(self, key):
return getattr(self, key)
def __str__(self):
txt = "Account"
for attribute, value in self.__dict__.items():
txt = txt + f"\n{attribute} : {value}"
txt += '\n'
return txt
def transfer(self, amount):
"""Method to allow bank transfer to this account"""
self.value += amount
class Bank():
"""The bank"""
def __init__(self):
self.account = []
@staticmethod
def _is_valid_account(account):
"""Verify if account given as argument is valid"""
found = {
'name':False,
'id':False,
'value':False,
'zip':False,
'addr':False
}
if isinstance(account, Account):
if len(account.__dict__) % 2 == 1:
raise ValueError("Value Error: Corrupted account. Even number of attributes.")
for attribute in account.__dict__.keys():
if re.search("^b", attribute):
raise ValueError(f"Value Error: Corrupted account. Invalid attribute '{attribute}'.")
if attribute in found.keys():
found[attribute] = True
if False in found.values():
missing_values = list(found.keys())[list(found.values()).index(False)]
if missing_values in ['zip','addr']:
if (found['zip'] is False and found['addr'] is True) \
or (found['zip'] is True and found['addr'] is False):
return True
raise ValueError(f"Value Error: Corrupted account. Missing attribute '{missing_values}'.")
return True
raise TypeError("Type Error: Argument needs to be an Account object.")
def find_account(self, account_to_find):
if not isinstance(account_to_find, int) and not isinstance(account_to_find, str):
return None
account_found = next((item for item in self.account if item["id"] == account_to_find), None)\
if isinstance(account_to_find, int) else next((item for item in self.account if item["name"] == account_to_find), None)
if account_found is None:
return None
return account_found
def add(self, account):
"""Method to add account to bank"""
try:
if self.find_account(account['name']) != None:
print(ValueError("Value Error: Account name is already in the bank accounts."))
return
if self._is_valid_account(account):
self.account.append(account)
except Exception as exception:
print(TypeError(f"{exception} Please make sure you are adding a valid Account object."))
def transfer(self, origin, dest, amount):
"""
@origin: int(id) or str(name) of the first account
@dest: int(id) or str(name) of the destination account
@amount: float(amount) amount to transfer
@return True if success, False if an error occurred
"""
origin_account = self.find_account(origin)
if origin_account == None:
print(ValueError("Value Error: Account not found in bank accounts. Are you sure you've added the account to this bank?"))
return False
dest_account = self.find_account(dest)
if dest_account == None:
print(ValueError("Value Error: Account not found in bank accounts. Are you sure you've added the account to this bank?"))
return False
if not isinstance(amount, int) and not isinstance(amount, float):
print(TypeError("Type Error: Wrong type for amount argument. \
Please make sure it is an int or a float."))
return False
origin_index = self.account.index(origin_account)
dest_index = self.account.index(dest_account)
if float(self.account[origin_index].value) < float(amount):
print(ValueError(f"Value Error: You are trying to transfer {float(amount)} \
from {origin_account['name']} account but funds are insufficient."))
return False
elif float(amount) < 0:
print(ValueError("Value Error: You are trying to transfer a negative amount."))
return False
else:
self.account[origin_index].transfer(float(-amount))
self.account[dest_index].transfer(float(amount))
return True
@staticmethod
def fix_account(account):
"""
fix the corrupted account
@account: int(id) or str(name) of the account
@return True if success, False if an error occurred
"""
print(f"Fixing account {account['name']}")
keys_to_change = {}
for attribute in account.__dict__.keys():
if re.search("^b", attribute):
new_key = attribute[1:]
while re.search("^b", new_key):
new_key = new_key[1:]
keys_to_change[attribute] = new_key
for old_key, new_key in keys_to_change.items():
account.__dict__[new_key] = account.__dict__.pop(old_key)
for attribute in ['name', 'id', 'value', 'zip', 'addr']:
if attribute not in dir(account):
if attribute == 'name':
setattr(account, attribute, "Account " + account['id'])
if attribute == 'value':
setattr(account, attribute, 0)
if attribute == 'zip':
setattr(account, attribute, 00000)
if attribute == 'addr':
setattr(account, attribute, 'No address')
if len(account.__dict__) % 2 == 1:
setattr(account, 'placeholder', 0)
print("Fixed account.")
return True | Module_01/ex05/the_bank.py | import re
class Account():
"""Client account"""
ID_COUNT = 1
def __init__(self, name, **kwargs):
self.id = self.ID_COUNT
self.name = name
self.__dict__.update(kwargs)
Account.ID_COUNT += 1
def __getitem__(self, key):
return getattr(self, key)
def __str__(self):
txt = "Account"
for attribute, value in self.__dict__.items():
txt = txt + f"\n{attribute} : {value}"
txt += '\n'
return txt
def transfer(self, amount):
"""Method to allow bank transfer to this account"""
self.value += amount
class Bank():
"""The bank"""
def __init__(self):
self.account = []
@staticmethod
def _is_valid_account(account):
"""Verify if account given as argument is valid"""
found = {
'name':False,
'id':False,
'value':False,
'zip':False,
'addr':False
}
if isinstance(account, Account):
if len(account.__dict__) % 2 == 1:
raise ValueError("Value Error: Corrupted account. Even number of attributes.")
for attribute in account.__dict__.keys():
if re.search("^b", attribute):
raise ValueError(f"Value Error: Corrupted account. Invalid attribute '{attribute}'.")
if attribute in found.keys():
found[attribute] = True
if False in found.values():
missing_values = list(found.keys())[list(found.values()).index(False)]
if missing_values in ['zip','addr']:
if (found['zip'] is False and found['addr'] is True) \
or (found['zip'] is True and found['addr'] is False):
return True
raise ValueError(f"Value Error: Corrupted account. Missing attribute '{missing_values}'.")
return True
raise TypeError("Type Error: Argument needs to be an Account object.")
def find_account(self, account_to_find):
if not isinstance(account_to_find, int) and not isinstance(account_to_find, str):
return None
account_found = next((item for item in self.account if item["id"] == account_to_find), None)\
if isinstance(account_to_find, int) else next((item for item in self.account if item["name"] == account_to_find), None)
if account_found is None:
return None
return account_found
def add(self, account):
"""Method to add account to bank"""
try:
if self.find_account(account['name']) != None:
print(ValueError("Value Error: Account name is already in the bank accounts."))
return
if self._is_valid_account(account):
self.account.append(account)
except Exception as exception:
print(TypeError(f"{exception} Please make sure you are adding a valid Account object."))
def transfer(self, origin, dest, amount):
"""
@origin: int(id) or str(name) of the first account
@dest: int(id) or str(name) of the destination account
@amount: float(amount) amount to transfer
@return True if success, False if an error occurred
"""
origin_account = self.find_account(origin)
if origin_account == None:
print(ValueError("Value Error: Account not found in bank accounts. Are you sure you've added the account to this bank?"))
return False
dest_account = self.find_account(dest)
if dest_account == None:
print(ValueError("Value Error: Account not found in bank accounts. Are you sure you've added the account to this bank?"))
return False
if not isinstance(amount, int) and not isinstance(amount, float):
print(TypeError("Type Error: Wrong type for amount argument. \
Please make sure it is an int or a float."))
return False
origin_index = self.account.index(origin_account)
dest_index = self.account.index(dest_account)
if float(self.account[origin_index].value) < float(amount):
print(ValueError(f"Value Error: You are trying to transfer {float(amount)} \
from {origin_account['name']} account but funds are insufficient."))
return False
elif float(amount) < 0:
print(ValueError("Value Error: You are trying to transfer a negative amount."))
return False
else:
self.account[origin_index].transfer(float(-amount))
self.account[dest_index].transfer(float(amount))
return True
@staticmethod
def fix_account(account):
"""
fix the corrupted account
@account: int(id) or str(name) of the account
@return True if success, False if an error occurred
"""
print(f"Fixing account {account['name']}")
keys_to_change = {}
for attribute in account.__dict__.keys():
if re.search("^b", attribute):
new_key = attribute[1:]
while re.search("^b", new_key):
new_key = new_key[1:]
keys_to_change[attribute] = new_key
for old_key, new_key in keys_to_change.items():
account.__dict__[new_key] = account.__dict__.pop(old_key)
for attribute in ['name', 'id', 'value', 'zip', 'addr']:
if attribute not in dir(account):
if attribute == 'name':
setattr(account, attribute, "Account " + account['id'])
if attribute == 'value':
setattr(account, attribute, 0)
if attribute == 'zip':
setattr(account, attribute, 00000)
if attribute == 'addr':
setattr(account, attribute, 'No address')
if len(account.__dict__) % 2 == 1:
setattr(account, 'placeholder', 0)
print("Fixed account.")
return True | 0.539105 | 0.172694 |
from numpy.core.defchararray import zfill
import taichi as ti
import numpy as np
from .camera import *
from .shading import *
from .renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract
inf = 1e8
eps = 1e-4
@ti.data_oriented
class ParticleRenderer:
padding = 3 # extra padding to avoid cropping some of the projected sphere
def __init__(self, system, radius=0.025, main_res=512):
self.system = system
system.renderer = self
self.main_res = main_res
self.radius = radius
self.epsilon = 20.0 * self.radius
''' directional light '''
self.camera_main = Camera(res=(main_res, main_res), pos=[0, 0.5, 2.5], target=[0, 0, 0])
self.camera_main.add_buffer("pos", dim=3, dtype=float)
self.camera_main.add_buffer("zbuf", dim=0, dtype=float)
self.camera_main.add_buffer("normal", dim=3, dtype=float)
self.main_img = self.camera_main.img
light_y_pos = 2.0 - eps
light_x_min_pos = -0.15
light_x_range = 0.3
light_z_min_pos = 1.0
light_z_range = 0.3
self.light_area = light_x_range * light_z_range
self.light_vertices = [
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos]),
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos]),
]
self.left_wall = [ti.Vector([-1.1, 0.0, 0.0]), ti.Vector([-1.1, 0.0, 2.0]), ti.Vector([-1.1, 2.0, 2.0]), ti.Vector([-1.1, 2.0, 0.0])]
self.color_left = ti.Vector([0.65, 0.05, 0.05])
self.right_wall = [ti.Vector([1.1, 0.0, 0.0]), ti.Vector([1.1, 2.0, 0.0]), ti.Vector([1.1, 2.0, 2.0]), ti.Vector([1.1, 0.0, 2.0])]
self.color_right = ti.Vector([0.12, 0.45, 0.15])
self.light_min_pos = self.light_vertices[0]
self.light_max_pos = self.light_vertices[2]
self.light_normal = ti.Vector([0.0, -1.0, 0.0])
self.light_color = ti.Vector([0.9, 0.85, 0.7])
self.light_intensity = 200
self.camera_shadow = Camera(res=(2048, 2048), mainimg=False,
pos=[light_x_min_pos + light_x_range / 2, light_y_pos + light_x_range / 2, light_z_min_pos + light_z_range / 2],
target=[light_x_min_pos + light_x_range / 2, 0.0, light_z_min_pos + light_z_range / 2],
up=[0, 0, 1],
fov=45)
self.camera_shadow.add_buffer("zbuf", dim=0, dtype=float)
'''
Clear camera
'''
@ti.kernel
def clear_camera(self, camera: ti.template()):
for I in ti.grouped(camera.img):
camera.zbuf[I] = 0
camera.img[I].fill(0)
camera.normal[I].fill(0)
camera.pos[I].fill(0)
'''
Calculates G-buffer
'''
@ti.kernel
def calculate_buffers(self, camera: ti.template()):
camera.W2V[None] = camera.L2W[None].inverse()
# first pass: visibility splatting
for i in range(self.system.num_particles_max):
if i >= self.system.num_particles[None]:
continue
# particle center coordinate transfer
# particle position view space 4d homogeneous coord [x, y, z, 1]
pos_view = ti.Vector.zero(float, 3)
pos_view = xyz(camera.W2V @ position(self.system.pos[i]))
pos_img = camera.uncook(pos_view) # 2d image space position (x, y) in pixel unit
# find the projected radius in image space
ref_view_space = ti.Vector([pos_view[0] + self.radius, pos_view[1], pos_view[2]])
ref_img_space = camera.uncook(ref_view_space)
r_projected = abs(ref_img_space[0] - pos_img[0]) + self.padding # projected radius in pixel unit
# fragment ranges to render
xmin = int(min(max(0, pos_img[0] - r_projected), camera.res[0]))
xmax = int(min(max(0, pos_img[0] + r_projected), camera.res[0]))
ymin = int(min(max(0, pos_img[1] - r_projected), camera.res[1]))
ymax = int(min(max(0, pos_img[1] + r_projected), camera.res[1]))
if pos_view.z > 0 and 0 <= xmin < xmax < camera.res[0] and 0 <= ymin < ymax < camera.res[1]:
# process projected fragments and compute depth
for row in range(xmin, xmax):
for column in range(ymin, ymax):
# discard fragment if its distance to particle center > projected radius
frag_view_space = ti.Vector([row, column, pos_view[2]]).cast(float)
frag_view_space = camera.cook(frag_view_space) # 3d position in view space
dis_projected = (frag_view_space - pos_view).norm()
if dis_projected <= self.radius:
# compute depth value for valid fragment
depth = pos_view[2] - ti.sqrt(self.radius ** 2 - dis_projected ** 2)
z = camera.depth(depth)
# overwrite if closer
if z >= ti.atomic_max(camera.zbuf[row, column], z):
if ti.static(hasattr(camera, "normal")):
frag_surface = ti.Vector([frag_view_space[0], frag_view_space[1], depth])
normal = (frag_surface - pos_view).normalized()
normal_world = xyz(camera.L2W @ direction(normal))
pos_world = xyz(camera.L2W @ position(frag_surface))
camera.img[row, column] = self.system.col[i] # diffuse
camera.normal[row, column] = normal_world
camera.pos[row, column] = pos_world
@ti.func
def intersect_light(self, pos, d, tmax):
hit, t, _ = ray_aabb_intersection(self.light_min_pos, self.light_max_pos, pos, d)
if hit and 0 < t < tmax:
hit = 1
else:
hit = 0
t = inf
return hit, t
'''
Wall intersection from Cornell Box example
'''
@ti.func
def intersect_scene(self, pos, ray_dir):
closest, normal = inf, ti.Vector.zero(ti.f32, 3)
c = ti.Vector.zero(ti.f32, 3)
# left
pnorm = ti.Vector([1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([-1.1, 0.0,
0.0]), pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_left
# right
pnorm = ti.Vector([-1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([1.1, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_right
# bottom
gray = ti.Vector([0.93, 0.93, 0.93])
pnorm = ti.Vector([0.0, 1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# top
pnorm = ti.Vector([0.0, -1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 2.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# far
pnorm = ti.Vector([0.0, 0.0, 1.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# light
hit_l, cur_dist = self.intersect_light(pos, ray_dir, closest)
if hit_l and 0 < cur_dist < closest:
# technically speaking, no need to check the second term
closest = cur_dist
normal = self.light_normal
c = self.light_color
return closest, normal, c
'''
Shadow map functions
'''
@ti.func
def shadowmap_soft(self, pos):
bias = eps
light_size = 16
n_sample = 64
n_ring = 10
radius = 1 / n_sample
radius_step = radius
angle = ti.random() * 2 * math.pi
angle_step = 2 * math.pi * n_ring / n_sample
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
visibility = 0.0
for _ in range(n_sample):
delta_UV = ti.Vector([ti.cos(angle), ti.sin(angle)]) * (radius ** 0.75) * light_size
angle += angle_step
radius += radius_step
#print(zbuf_UV, delta_UV)
shadow_depth = texture(self.camera_shadow.zbuf, zbuf_UV + delta_UV)
if 0 <= shadow_depth < z_shadow - bias:
visibility += 1.0
return visibility / n_sample
@ti.func
def shadowmap(self, pos):
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
bias = eps
visibility = 1.0
if texture(self.camera_shadow.zbuf, zbuf_UV) > z_shadow + bias:
visibility = 0.0
return visibility
@ti.func
def ssao(self, pos):
ao_radius = self.radius * 15
n_sample = 64
sample = 0
visible = 0.0
while sample < n_sample:
rand_vec = ti.Vector([ti.random(), ti.random(), ti.random()]) * 2 - 1.0
if (rand_vec ** 2).sum() <= 1.0:
sample += 1
pos_test = pos + rand_vec * ao_radius
pos_test_view = xyz(self.camera_main.W2V @ position(pos_test))
pos_test_UV = self.camera_main.uncook(pos_test_view)
z_test = self.camera_main.depth(pos_test_view.z)
if z_test >= texture(self.camera_main.zbuf, pos_test_UV):
visible += 1.0
return min(1.0, visible / n_sample * 2)
'''
Shading
'''
@ti.kernel
def shade_particles(self):
camera = self.camera_main
# third pass: shading
for I in ti.grouped(camera.img):
rayorig, viewdir = camera.pixel_ray(I)
closest, normal, color = self.intersect_scene(rayorig, viewdir)
pos_world = rayorig + viewdir * closest
pos_view = xyz(camera.W2V @ position(pos_world))
z = camera.depth(pos_view.z)
if z < camera.zbuf[I]:
normal = camera.normal[I]
color = camera.img[I]
pos_world = camera.pos[I]
# ambient
ao = self.ssao(pos_world)
color = color * 0.2 * ao
# diffuse shadowed
visibility = self.shadowmap_soft(pos_world)
color += visibility * shade_area_diffuse(pos_world, normal, color,
-self.light_normal, self.light_vertices, self.light_color, self.light_intensity)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([1.0, 0.0, 0.0]), self.left_wall, self.color_left, self.light_intensity * 0.02)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([-1.0, 0.0, 0.0]), self.right_wall, self.color_right, self.light_intensity * 0.02)
#camera.img[I] = ti.Vector([1.0, 1.0, 1.0]) * ao * visibility
# reflection
#refldir = viewdir - 2 * viewdir.dot(normal) * normal
# tone mapping
#camera.img[I] = camera.img[I] * 1.6 / (1.0 + camera.img[I])
# gamma correction
camera.img[I] = color ** (1 / 2.2)
def render_main(self):
self.clear_camera(self.camera_main)
self.camera_shadow.zbuf.fill(0)
self.calculate_buffers(self.camera_shadow)
self.calculate_buffers(self.camera_main)
self.shade_particles()
'''
Main render function which renders to the GUI.
'''
def render(self, gui):
gui.clear()
self.camera_main.from_mouse(gui)
self.render_main()
gui.set_image(self.main_img)
#gui.set_image(self.camera_shadow.zbuf) | engine/fast_renderer/renderer.py | from numpy.core.defchararray import zfill
import taichi as ti
import numpy as np
from .camera import *
from .shading import *
from .renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract
inf = 1e8
eps = 1e-4
@ti.data_oriented
class ParticleRenderer:
padding = 3 # extra padding to avoid cropping some of the projected sphere
def __init__(self, system, radius=0.025, main_res=512):
self.system = system
system.renderer = self
self.main_res = main_res
self.radius = radius
self.epsilon = 20.0 * self.radius
''' directional light '''
self.camera_main = Camera(res=(main_res, main_res), pos=[0, 0.5, 2.5], target=[0, 0, 0])
self.camera_main.add_buffer("pos", dim=3, dtype=float)
self.camera_main.add_buffer("zbuf", dim=0, dtype=float)
self.camera_main.add_buffer("normal", dim=3, dtype=float)
self.main_img = self.camera_main.img
light_y_pos = 2.0 - eps
light_x_min_pos = -0.15
light_x_range = 0.3
light_z_min_pos = 1.0
light_z_range = 0.3
self.light_area = light_x_range * light_z_range
self.light_vertices = [
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos]),
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos]),
]
self.left_wall = [ti.Vector([-1.1, 0.0, 0.0]), ti.Vector([-1.1, 0.0, 2.0]), ti.Vector([-1.1, 2.0, 2.0]), ti.Vector([-1.1, 2.0, 0.0])]
self.color_left = ti.Vector([0.65, 0.05, 0.05])
self.right_wall = [ti.Vector([1.1, 0.0, 0.0]), ti.Vector([1.1, 2.0, 0.0]), ti.Vector([1.1, 2.0, 2.0]), ti.Vector([1.1, 0.0, 2.0])]
self.color_right = ti.Vector([0.12, 0.45, 0.15])
self.light_min_pos = self.light_vertices[0]
self.light_max_pos = self.light_vertices[2]
self.light_normal = ti.Vector([0.0, -1.0, 0.0])
self.light_color = ti.Vector([0.9, 0.85, 0.7])
self.light_intensity = 200
self.camera_shadow = Camera(res=(2048, 2048), mainimg=False,
pos=[light_x_min_pos + light_x_range / 2, light_y_pos + light_x_range / 2, light_z_min_pos + light_z_range / 2],
target=[light_x_min_pos + light_x_range / 2, 0.0, light_z_min_pos + light_z_range / 2],
up=[0, 0, 1],
fov=45)
self.camera_shadow.add_buffer("zbuf", dim=0, dtype=float)
'''
Clear camera
'''
@ti.kernel
def clear_camera(self, camera: ti.template()):
for I in ti.grouped(camera.img):
camera.zbuf[I] = 0
camera.img[I].fill(0)
camera.normal[I].fill(0)
camera.pos[I].fill(0)
'''
Calculates G-buffer
'''
@ti.kernel
def calculate_buffers(self, camera: ti.template()):
camera.W2V[None] = camera.L2W[None].inverse()
# first pass: visibility splatting
for i in range(self.system.num_particles_max):
if i >= self.system.num_particles[None]:
continue
# particle center coordinate transfer
# particle position view space 4d homogeneous coord [x, y, z, 1]
pos_view = ti.Vector.zero(float, 3)
pos_view = xyz(camera.W2V @ position(self.system.pos[i]))
pos_img = camera.uncook(pos_view) # 2d image space position (x, y) in pixel unit
# find the projected radius in image space
ref_view_space = ti.Vector([pos_view[0] + self.radius, pos_view[1], pos_view[2]])
ref_img_space = camera.uncook(ref_view_space)
r_projected = abs(ref_img_space[0] - pos_img[0]) + self.padding # projected radius in pixel unit
# fragment ranges to render
xmin = int(min(max(0, pos_img[0] - r_projected), camera.res[0]))
xmax = int(min(max(0, pos_img[0] + r_projected), camera.res[0]))
ymin = int(min(max(0, pos_img[1] - r_projected), camera.res[1]))
ymax = int(min(max(0, pos_img[1] + r_projected), camera.res[1]))
if pos_view.z > 0 and 0 <= xmin < xmax < camera.res[0] and 0 <= ymin < ymax < camera.res[1]:
# process projected fragments and compute depth
for row in range(xmin, xmax):
for column in range(ymin, ymax):
# discard fragment if its distance to particle center > projected radius
frag_view_space = ti.Vector([row, column, pos_view[2]]).cast(float)
frag_view_space = camera.cook(frag_view_space) # 3d position in view space
dis_projected = (frag_view_space - pos_view).norm()
if dis_projected <= self.radius:
# compute depth value for valid fragment
depth = pos_view[2] - ti.sqrt(self.radius ** 2 - dis_projected ** 2)
z = camera.depth(depth)
# overwrite if closer
if z >= ti.atomic_max(camera.zbuf[row, column], z):
if ti.static(hasattr(camera, "normal")):
frag_surface = ti.Vector([frag_view_space[0], frag_view_space[1], depth])
normal = (frag_surface - pos_view).normalized()
normal_world = xyz(camera.L2W @ direction(normal))
pos_world = xyz(camera.L2W @ position(frag_surface))
camera.img[row, column] = self.system.col[i] # diffuse
camera.normal[row, column] = normal_world
camera.pos[row, column] = pos_world
@ti.func
def intersect_light(self, pos, d, tmax):
hit, t, _ = ray_aabb_intersection(self.light_min_pos, self.light_max_pos, pos, d)
if hit and 0 < t < tmax:
hit = 1
else:
hit = 0
t = inf
return hit, t
'''
Wall intersection from Cornell Box example
'''
@ti.func
def intersect_scene(self, pos, ray_dir):
closest, normal = inf, ti.Vector.zero(ti.f32, 3)
c = ti.Vector.zero(ti.f32, 3)
# left
pnorm = ti.Vector([1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([-1.1, 0.0,
0.0]), pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_left
# right
pnorm = ti.Vector([-1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([1.1, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_right
# bottom
gray = ti.Vector([0.93, 0.93, 0.93])
pnorm = ti.Vector([0.0, 1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# top
pnorm = ti.Vector([0.0, -1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 2.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# far
pnorm = ti.Vector([0.0, 0.0, 1.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# light
hit_l, cur_dist = self.intersect_light(pos, ray_dir, closest)
if hit_l and 0 < cur_dist < closest:
# technically speaking, no need to check the second term
closest = cur_dist
normal = self.light_normal
c = self.light_color
return closest, normal, c
'''
Shadow map functions
'''
@ti.func
def shadowmap_soft(self, pos):
bias = eps
light_size = 16
n_sample = 64
n_ring = 10
radius = 1 / n_sample
radius_step = radius
angle = ti.random() * 2 * math.pi
angle_step = 2 * math.pi * n_ring / n_sample
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
visibility = 0.0
for _ in range(n_sample):
delta_UV = ti.Vector([ti.cos(angle), ti.sin(angle)]) * (radius ** 0.75) * light_size
angle += angle_step
radius += radius_step
#print(zbuf_UV, delta_UV)
shadow_depth = texture(self.camera_shadow.zbuf, zbuf_UV + delta_UV)
if 0 <= shadow_depth < z_shadow - bias:
visibility += 1.0
return visibility / n_sample
@ti.func
def shadowmap(self, pos):
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
bias = eps
visibility = 1.0
if texture(self.camera_shadow.zbuf, zbuf_UV) > z_shadow + bias:
visibility = 0.0
return visibility
@ti.func
def ssao(self, pos):
ao_radius = self.radius * 15
n_sample = 64
sample = 0
visible = 0.0
while sample < n_sample:
rand_vec = ti.Vector([ti.random(), ti.random(), ti.random()]) * 2 - 1.0
if (rand_vec ** 2).sum() <= 1.0:
sample += 1
pos_test = pos + rand_vec * ao_radius
pos_test_view = xyz(self.camera_main.W2V @ position(pos_test))
pos_test_UV = self.camera_main.uncook(pos_test_view)
z_test = self.camera_main.depth(pos_test_view.z)
if z_test >= texture(self.camera_main.zbuf, pos_test_UV):
visible += 1.0
return min(1.0, visible / n_sample * 2)
'''
Shading
'''
@ti.kernel
def shade_particles(self):
camera = self.camera_main
# third pass: shading
for I in ti.grouped(camera.img):
rayorig, viewdir = camera.pixel_ray(I)
closest, normal, color = self.intersect_scene(rayorig, viewdir)
pos_world = rayorig + viewdir * closest
pos_view = xyz(camera.W2V @ position(pos_world))
z = camera.depth(pos_view.z)
if z < camera.zbuf[I]:
normal = camera.normal[I]
color = camera.img[I]
pos_world = camera.pos[I]
# ambient
ao = self.ssao(pos_world)
color = color * 0.2 * ao
# diffuse shadowed
visibility = self.shadowmap_soft(pos_world)
color += visibility * shade_area_diffuse(pos_world, normal, color,
-self.light_normal, self.light_vertices, self.light_color, self.light_intensity)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([1.0, 0.0, 0.0]), self.left_wall, self.color_left, self.light_intensity * 0.02)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([-1.0, 0.0, 0.0]), self.right_wall, self.color_right, self.light_intensity * 0.02)
#camera.img[I] = ti.Vector([1.0, 1.0, 1.0]) * ao * visibility
# reflection
#refldir = viewdir - 2 * viewdir.dot(normal) * normal
# tone mapping
#camera.img[I] = camera.img[I] * 1.6 / (1.0 + camera.img[I])
# gamma correction
camera.img[I] = color ** (1 / 2.2)
def render_main(self):
self.clear_camera(self.camera_main)
self.camera_shadow.zbuf.fill(0)
self.calculate_buffers(self.camera_shadow)
self.calculate_buffers(self.camera_main)
self.shade_particles()
'''
Main render function which renders to the GUI.
'''
def render(self, gui):
gui.clear()
self.camera_main.from_mouse(gui)
self.render_main()
gui.set_image(self.main_img)
#gui.set_image(self.camera_shadow.zbuf) | 0.708213 | 0.514278 |
from __future__ import print_function
import argparse
import os
import csv
import sys
from scipy.stats import pearsonr
import numpy
import pandas
def mse(y_true, y_pred):
from sklearn.metrics import mean_squared_error
return mean_squared_error(y_true,y_pred)
def f1(y_true, y_pred):
from sklearn.metrics import f1_score
label = [0,1,2,3,4,5,6]
return f1_score(y_true,y_pred,labels=label,average="micro")
def ccc(y_true, y_pred):
true_mean = numpy.mean(y_true)
true_variance = numpy.var(y_true)
pred_mean = numpy.mean(y_pred)
pred_variance = numpy.var(y_pred)
rho,_ = pearsonr(y_pred,y_true)
std_predictions = numpy.std(y_pred)
std_gt = numpy.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
std_predictions ** 2 + std_gt ** 2 +
(pred_mean - true_mean) ** 2)
return ccc, rho
def calculateCCC(validationFile, modelOutputFile):
dataY = pandas.read_csv(validationFile, header=0, sep=",")
dataYPred = pandas.read_csv(modelOutputFile, header=0, sep=",")
dataYArousal = dataY["arousal"]
dataYValence = dataY["valence"]
dataYPredArousal = dataYPred["arousal"]
dataYPredValence = dataYPred["valence"]
arousalCCC, acor = ccc(dataYArousal, dataYPredArousal)
arousalmse = mse(dataYArousal, dataYPredArousal)
valenceCCC, vcor = ccc(dataYValence, dataYPredValence)
valencemse = mse(dataYValence, dataYPredValence)
print ("Arousal CCC: ", arousalCCC)
print ("Arousal Pearson Cor: ", acor)
print ("Arousal MSE: ", arousalmse)
print ("Valence CCC: ", valenceCCC)
print ("Valence cor: ", vcor)
print ("Valence MSE: ", valencemse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("validationFile")
parser.add_argument("modelOutputFile")
opt = parser.parse_args()
if not os.path.exists(opt.validationFile):
print("Cannot find validation File")
sys.exit(-1)
if not os.path.exists(opt.modelOutputFile):
print("Cannot find modelOutput File")
sys.exit(-1)
calculateCCC(opt.validationFile, opt.modelOutputFile) | calculateEvaluationCCC.py | from __future__ import print_function
import argparse
import os
import csv
import sys
from scipy.stats import pearsonr
import numpy
import pandas
def mse(y_true, y_pred):
from sklearn.metrics import mean_squared_error
return mean_squared_error(y_true,y_pred)
def f1(y_true, y_pred):
from sklearn.metrics import f1_score
label = [0,1,2,3,4,5,6]
return f1_score(y_true,y_pred,labels=label,average="micro")
def ccc(y_true, y_pred):
true_mean = numpy.mean(y_true)
true_variance = numpy.var(y_true)
pred_mean = numpy.mean(y_pred)
pred_variance = numpy.var(y_pred)
rho,_ = pearsonr(y_pred,y_true)
std_predictions = numpy.std(y_pred)
std_gt = numpy.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
std_predictions ** 2 + std_gt ** 2 +
(pred_mean - true_mean) ** 2)
return ccc, rho
def calculateCCC(validationFile, modelOutputFile):
dataY = pandas.read_csv(validationFile, header=0, sep=",")
dataYPred = pandas.read_csv(modelOutputFile, header=0, sep=",")
dataYArousal = dataY["arousal"]
dataYValence = dataY["valence"]
dataYPredArousal = dataYPred["arousal"]
dataYPredValence = dataYPred["valence"]
arousalCCC, acor = ccc(dataYArousal, dataYPredArousal)
arousalmse = mse(dataYArousal, dataYPredArousal)
valenceCCC, vcor = ccc(dataYValence, dataYPredValence)
valencemse = mse(dataYValence, dataYPredValence)
print ("Arousal CCC: ", arousalCCC)
print ("Arousal Pearson Cor: ", acor)
print ("Arousal MSE: ", arousalmse)
print ("Valence CCC: ", valenceCCC)
print ("Valence cor: ", vcor)
print ("Valence MSE: ", valencemse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("validationFile")
parser.add_argument("modelOutputFile")
opt = parser.parse_args()
if not os.path.exists(opt.validationFile):
print("Cannot find validation File")
sys.exit(-1)
if not os.path.exists(opt.modelOutputFile):
print("Cannot find modelOutput File")
sys.exit(-1)
calculateCCC(opt.validationFile, opt.modelOutputFile) | 0.337859 | 0.230573 |
import sys
import re
import json
import uuid
import datetime
import time
import glob
import codecs
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2014"
__license__ = "GPL"
__version__ = "3.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# ghost settings
post_id = 1
author_id = 1
next_tag_id = 1
post_tag_id = 1
lang = "en_US"
# internal
ARG_INPUT_FOLDER = 1
ARG_OUTPUT_JSON = 2
def strip_single_quote(s):
if s.endswith("'"): s = s[:-1]
if s.startswith("'"): s = s[1:]
return s
def tuning_post_content(s):
return s.replace("{% codeblock %}", "```").replace("{% endcodeblock %}", "```")
if len(sys.argv) < 3:
ARG_INPUT_FOLDER = "."
ARG_OUTPUT_JSON = "output.json"
import translitcodec
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
posts = []
tags = []
posts_tags = []
categories = {}
markdown_files = glob.glob("%s/*.markdown" % sys.argv[ARG_INPUT_FOLDER]) + glob.glob("%s/*.md" % sys.argv[ARG_INPUT_FOLDER])
for markdown_file in markdown_files:
is_metadata = False
is_post = False
post = {
"id": post_id,
"uuid": str(uuid.uuid4()),
"created_by": author_id,
"updated_by": author_id,
"published_by": author_id,
"language": lang,
"status": "published"
}
markdown = []
with codecs.open(markdown_file, "r", "utf-8") as f:
for line in f:
line = line.rstrip()
if line == "---":
if is_metadata:
is_post = True
else:
is_metadata = True
continue
if is_post:
m = re.match(r'\{% img (?P<image>.+) %\}', line)
if m:
markdown.append("".format(m.group("image")))
else:
markdown.append(line)
elif is_metadata:
if line == "":
continue
for match in re.finditer(r'(?P<field>\w+):\s*(?P<value>.*)', line):
field = match.group("field")
value = match.group("value")
if field == "title":
title = re.sub(r'^"|"$', '', value)
title = strip_single_quote(title)
post["title"] = title[:150] if len(title) > 150 else title
post["slug"] = slugify(title)
elif field == "slug":
# FIX: Use slug if available
post["slug"] = value
elif field == "published":
post["status"] = value == "true" and "published" or "draft"
elif field == "date":
# FIX: This fixes the ValueError when timezone is at the end of the value
values = value.split(':')
if (len(values) > 1):
value = values[0] + ":" + values[1]
else:
value = values[0] + " " + "00:00"
d = datetime.datetime.strptime(value.strip(), "%Y-%m-%d %H:%M")
t = int(time.mktime(d.timetuple()) * 1e3)
post["created_at"] = t
post["updated_at"] = t
post["published_at"] = t
elif field == "categories":
if not value:
pass
the_tags = value.split(" ")
for tag in the_tags:
if tag:
if not categories.has_key(tag):
categories[tag] = next_tag_id
next_tag_id = next_tag_id + 1
tags.append({
"id": categories[tag],
"slug": slugify(tag),
"name": tag.replace(",", "").replace("]", "").replace("[", ""),
"uuid": str(uuid.uuid4())
})
posts_tags.append({
"id": post_tag_id,
"post_id": post_id,
"tag_id": categories[tag],
})
post_tag_id = post_tag_id + 1
else:
pass
else:
raise Exception('Unexpected exception!')
post_id = post_id + 1
post["markdown"] = tuning_post_content("\n".join(markdown))
posts.append(post)
ghost_json_file_name = sys.argv[ARG_OUTPUT_JSON]
ghost_data = json.loads(open(ghost_json_file_name).read())
ghost_data["db"][0]["data"]["posts"] = posts
ghost_data["db"][0]["data"]["tags"] = tags
ghost_data["db"][0]["data"]['posts_tags'] = posts_tags
print json.dumps(ghost_data) | octopress2ghost.py | import sys
import re
import json
import uuid
import datetime
import time
import glob
import codecs
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2014"
__license__ = "GPL"
__version__ = "3.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# ghost settings
post_id = 1
author_id = 1
next_tag_id = 1
post_tag_id = 1
lang = "en_US"
# internal
ARG_INPUT_FOLDER = 1
ARG_OUTPUT_JSON = 2
def strip_single_quote(s):
if s.endswith("'"): s = s[:-1]
if s.startswith("'"): s = s[1:]
return s
def tuning_post_content(s):
return s.replace("{% codeblock %}", "```").replace("{% endcodeblock %}", "```")
if len(sys.argv) < 3:
ARG_INPUT_FOLDER = "."
ARG_OUTPUT_JSON = "output.json"
import translitcodec
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
posts = []
tags = []
posts_tags = []
categories = {}
markdown_files = glob.glob("%s/*.markdown" % sys.argv[ARG_INPUT_FOLDER]) + glob.glob("%s/*.md" % sys.argv[ARG_INPUT_FOLDER])
for markdown_file in markdown_files:
is_metadata = False
is_post = False
post = {
"id": post_id,
"uuid": str(uuid.uuid4()),
"created_by": author_id,
"updated_by": author_id,
"published_by": author_id,
"language": lang,
"status": "published"
}
markdown = []
with codecs.open(markdown_file, "r", "utf-8") as f:
for line in f:
line = line.rstrip()
if line == "---":
if is_metadata:
is_post = True
else:
is_metadata = True
continue
if is_post:
m = re.match(r'\{% img (?P<image>.+) %\}', line)
if m:
markdown.append("".format(m.group("image")))
else:
markdown.append(line)
elif is_metadata:
if line == "":
continue
for match in re.finditer(r'(?P<field>\w+):\s*(?P<value>.*)', line):
field = match.group("field")
value = match.group("value")
if field == "title":
title = re.sub(r'^"|"$', '', value)
title = strip_single_quote(title)
post["title"] = title[:150] if len(title) > 150 else title
post["slug"] = slugify(title)
elif field == "slug":
# FIX: Use slug if available
post["slug"] = value
elif field == "published":
post["status"] = value == "true" and "published" or "draft"
elif field == "date":
# FIX: This fixes the ValueError when timezone is at the end of the value
values = value.split(':')
if (len(values) > 1):
value = values[0] + ":" + values[1]
else:
value = values[0] + " " + "00:00"
d = datetime.datetime.strptime(value.strip(), "%Y-%m-%d %H:%M")
t = int(time.mktime(d.timetuple()) * 1e3)
post["created_at"] = t
post["updated_at"] = t
post["published_at"] = t
elif field == "categories":
if not value:
pass
the_tags = value.split(" ")
for tag in the_tags:
if tag:
if not categories.has_key(tag):
categories[tag] = next_tag_id
next_tag_id = next_tag_id + 1
tags.append({
"id": categories[tag],
"slug": slugify(tag),
"name": tag.replace(",", "").replace("]", "").replace("[", ""),
"uuid": str(uuid.uuid4())
})
posts_tags.append({
"id": post_tag_id,
"post_id": post_id,
"tag_id": categories[tag],
})
post_tag_id = post_tag_id + 1
else:
pass
else:
raise Exception('Unexpected exception!')
post_id = post_id + 1
post["markdown"] = tuning_post_content("\n".join(markdown))
posts.append(post)
ghost_json_file_name = sys.argv[ARG_OUTPUT_JSON]
ghost_data = json.loads(open(ghost_json_file_name).read())
ghost_data["db"][0]["data"]["posts"] = posts
ghost_data["db"][0]["data"]["tags"] = tags
ghost_data["db"][0]["data"]['posts_tags'] = posts_tags
print json.dumps(ghost_data) | 0.223377 | 0.161353 |
import mock
import pytest
from boto3.exceptions import Boto3Error
from ruamel.yaml import YAML
from paasta_tools.cli.cmds.spark_run import configure_and_run_docker_container
from paasta_tools.cli.cmds.spark_run import create_spark_config_str
from paasta_tools.cli.cmds.spark_run import DEFAULT_SERVICE
from paasta_tools.cli.cmds.spark_run import emit_resource_requirements
from paasta_tools.cli.cmds.spark_run import get_aws_credentials
from paasta_tools.cli.cmds.spark_run import get_docker_cmd
from paasta_tools.cli.cmds.spark_run import get_docker_run_cmd
from paasta_tools.cli.cmds.spark_run import get_spark_config
from paasta_tools.cli.cmds.spark_run import load_aws_credentials_from_yaml
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import SystemPaastaConfig
@mock.patch('paasta_tools.cli.cmds.spark_run.os.geteuid', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.os.getegid', autospec=True)
def test_get_docker_run_cmd(
mock_getegid,
mock_geteuid,
):
mock_geteuid.return_value = 1234
mock_getegid.return_value = 100
container_name = 'fake_name'
volumes = ['v1:v1:rw', 'v2:v2:rw']
env = {'k1': 'v1', 'k2': 'v2'}
docker_img = 'fake-registry/fake-service'
docker_cmd = 'pyspark'
actual = get_docker_run_cmd(
container_name,
volumes,
env,
docker_img,
docker_cmd,
)
assert actual[5:] == [
'--user=1234:100',
'--name=fake_name',
'--env', 'k1=v1', '--env', 'k2=v2',
'--volume=v1:v1:rw', '--volume=v2:v2:rw',
'fake-registry/fake-service',
'sh', '-c', 'pyspark', {},
]
@mock.patch('paasta_tools.cli.cmds.spark_run.find_mesos_leader', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run._load_mesos_secret', autospec=True)
def test_get_spark_config(
mock_load_mesos_secret,
mock_find_mesos_leader,
):
mock_find_mesos_leader.return_value = 'fake_leader'
args = mock.MagicMock()
args.cluster = 'fake_cluster'
spark_conf = get_spark_config(
args=args,
spark_app_name='fake_name',
spark_ui_port=123,
docker_img='fake-registry/fake-service',
system_paasta_config=SystemPaastaConfig(
{"cluster_fqdn_format": "paasta-{cluster:s}.something"},
'fake_dir',
),
volumes=['v1:v1:rw', 'v2:v2:rw'],
)
assert spark_conf['spark.master'] == 'mesos://fake_leader:5050'
assert 'spark.master=mesos://fake_leader:5050' in create_spark_config_str(spark_conf, is_mrjob=False)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_aws_credentials', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.os.path.exists', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.pick_random_port', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_username', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_spark_config', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.run_docker_container', autospec=True)
@mock.patch('time.time', autospec=True)
class TestConfigureAndRunDockerContainer:
instance_config = InstanceConfig(
cluster='fake_cluster',
instance='fake_instance',
service='fake_service',
config_dict={
'extra_volumes': [{
"hostPath": "/h1",
"containerPath": "/c1",
"mode": "RO",
}],
},
branch_dict={'docker_image': 'fake_service:fake_sha'},
)
system_paasta_config = SystemPaastaConfig(
{
'volumes': [{
"hostPath": "/h2",
"containerPath": "/c2",
"mode": "RO",
}],
},
'fake_dir',
)
@pytest.fixture
def mock_create_spark_config_str(self):
with mock.patch(
'paasta_tools.cli.cmds.spark_run.create_spark_config_str',
autospec=True,
) as _mock_create_spark_config_str:
yield _mock_create_spark_config_str
def test_configure_and_run_docker_container(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_pick_random_port.return_value = 123
mock_get_username.return_value = 'fake_user'
mock_get_spark_config.return_value = {'spark.app.name': 'fake_app'}
mock_run_docker_container.return_value = 0
mock_get_aws_credentials.return_value = ('id', 'secret')
args = mock.MagicMock()
args.cluster = 'fake_cluster'
args.cmd = 'pyspark'
args.work_dir = '/fake_dir:/spark_driver'
args.dry_run = True
args.mrjob = False
retcode = configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert retcode == 0
mock_run_docker_container.assert_called_once_with(
container_name='paasta_spark_run_fake_user_123',
volumes=[
'/h1:/c1:ro',
'/h2:/c2:ro',
'/fake_dir:/spark_driver:rw',
'/etc/passwd:/etc/passwd:ro',
'/etc/group:/etc/group:ro',
],
environment={
'PAASTA_SERVICE': 'fake_service',
'PAASTA_INSTANCE': 'fake_instance',
'PAASTA_CLUSTER': 'fake_cluster',
'PAASTA_DEPLOY_GROUP': 'fake_cluster.fake_instance',
'PAASTA_DOCKER_IMAGE': 'fake_service:fake_sha',
'PAASTA_LAUNCHED_BY': mock.ANY,
'AWS_ACCESS_KEY_ID': 'id',
'AWS_SECRET_ACCESS_KEY': 'secret',
'SPARK_USER': 'root',
'SPARK_OPTS': '--conf spark.app.name=fake_app',
},
docker_img='fake-registry/fake-service',
docker_cmd='pyspark --conf spark.app.name=fake_app',
dry_run=True,
)
def test_configure_and_run_docker_container_mrjob(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_get_spark_config.return_value = {'spark.cores.max': 5, 'spark.master': 'mesos://spark.master'}
args = mock.MagicMock(cmd='python mrjob_wrapper.py', mrjob=True)
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
args, kwargs = mock_run_docker_container.call_args
assert kwargs['docker_cmd'] == (
'python mrjob_wrapper.py --spark-master=mesos://spark.master --jobconf spark.cores.max=5'
)
assert mock_emit_resource_requirements.called
def test_suppress_clusterman_metrics_errors(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_emit_resource_requirements.side_effect = Boto3Error
mock_create_spark_config_str.return_value = '--conf spark.cores.max=5'
args = mock.MagicMock(
suppress_clusterman_metrics_errors=False,
cmd='pyspark',
)
with pytest.raises(Boto3Error):
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
# make sure we don't blow up when this setting is True
args.suppress_clusterman_metrics_errors = True
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
def test_dont_emit_metrics_for_inappropriate_commands(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_create_spark_config_str.return_value = '--conf spark.cores.max=5'
args = mock.MagicMock(cmd='bash', mrjob=False)
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert not mock_emit_resource_requirements.called
def test_emit_resource_requirements(tmpdir):
spark_config_dict = {
'spark.executor.cores': '2',
'spark.cores.max': '4',
'spark.executor.memory': '4g',
'spark.mesos.executor.memoryOverhead': '555',
'spark.app.name': 'paasta_spark_run_johndoe_2_3',
'spark.mesos.constraints': 'pool:cool-pool\\;other:value',
}
clusterman_yaml_contents = {
'mesos_clusters': {
'anywhere-prod': {
'aws_region': 'us-north-14',
},
},
}
clusterman_yaml_file_path = tmpdir.join('fake_clusterman.yaml')
with open(clusterman_yaml_file_path, 'w') as f:
YAML().dump(clusterman_yaml_contents, f)
with mock.patch(
'paasta_tools.cli.cmds.spark_run.get_clusterman_metrics', autospec=True,
), mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
) as mock_clusterman_metrics, mock.patch(
'paasta_tools.cli.cmds.spark_run.CLUSTERMAN_YAML_FILE_PATH',
clusterman_yaml_file_path,
autospec=None, # we're replacing this name, so we can't autospec
), mock.patch(
'time.time', return_value=1234, autospec=True,
):
mock_clusterman_metrics.generate_key_with_dimensions.side_effect = lambda name, dims: (
f'{name}|framework_name={dims["framework_name"]},webui_url={dims["webui_url"]}'
)
emit_resource_requirements(spark_config_dict, 'anywhere-prod', 'http://spark.yelp')
mock_clusterman_metrics.ClustermanMetricsBotoClient.assert_called_once_with(
region_name='us-north-14',
app_identifier='cool-pool',
)
metrics_writer = mock_clusterman_metrics.ClustermanMetricsBotoClient.return_value.\
get_writer.return_value.__enter__.return_value
metric_key_template = (
'requested_{resource}|framework_name=paasta_spark_run_johndoe_2_3,webui_url=http://spark.yelp'
)
expected_memory_request = (4 * 1024 + 555) * 2
metrics_writer.send.assert_has_calls(
[
mock.call((metric_key_template.format(resource='cpus'), 1234, 4)),
mock.call((metric_key_template.format(resource='mem'), 1234, expected_memory_request)),
mock.call((metric_key_template.format(resource='disk'), 1234, expected_memory_request)),
],
any_order=True,
)
def test_get_docker_cmd_add_spark_conf_str():
args = mock.Mock(cmd='pyspark -v', mrjob=False)
instance_config = None
spark_conf_str = '--conf spark.app.name=fake_app'
docker_cmd = get_docker_cmd(args, instance_config, spark_conf_str)
assert docker_cmd == 'pyspark --conf spark.app.name=fake_app -v'
def test_get_docker_cmd_other_cmd():
args = mock.Mock(cmd='bash', mrjob=False)
instance_config = None
spark_conf_str = '--conf spark.app.name=fake_app'
assert get_docker_cmd(args, instance_config, spark_conf_str) == 'bash'
def test_get_docker_cmd_mrjob():
args = mock.Mock(cmd='python mrjob_wrapper.py', mrjob=True)
instance_config = None
spark_conf_str = '--jobconf spark.app.name=fake_app'
expected_cmd = 'python mrjob_wrapper.py --jobconf spark.app.name=fake_app'
assert get_docker_cmd(args, instance_config, spark_conf_str) == expected_cmd
def test_load_aws_credentials_from_yaml(tmpdir):
fake_access_key_id = 'fake_access_key_id'
fake_secret_access_key = 'fake_secret_access_key'
yaml_file = tmpdir.join('test.yaml')
yaml_file.write(
f'aws_access_key_id: "{fake_access_key_id}"\n'
f'aws_secret_access_key: "{fake_secret_access_key}"',
)
aws_access_key_id, aws_secret_access_key = load_aws_credentials_from_yaml(yaml_file)
assert aws_access_key_id == fake_access_key_id
assert aws_secret_access_key == fake_secret_access_key
class TestGetAwsCredentials:
@pytest.fixture(autouse=True)
def mock_load_aws_credentials_from_yaml(self):
with mock.patch(
'paasta_tools.cli.cmds.spark_run.load_aws_credentials_from_yaml',
autospec=True,
) as self.mock_load_aws_credentials_from_yaml:
yield
def test_yaml_provided(self):
args = mock.Mock(aws_credentials_yaml='credentials.yaml')
credentials = get_aws_credentials(args)
self.mock_load_aws_credentials_from_yaml.assert_called_once_with('credentials.yaml')
assert credentials == self.mock_load_aws_credentials_from_yaml.return_value
@mock.patch('paasta_tools.cli.cmds.spark_run.os', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_service_aws_credentials_path', autospec=True)
def test_service_provided_no_yaml(self, mock_get_credentials_path, mock_os):
args = mock.Mock(aws_credentials_yaml=None, service='service_name')
mock_os.path.exists.return_value = True
credentials = get_aws_credentials(args)
mock_get_credentials_path.assert_called_once_with(args.service)
self.mock_load_aws_credentials_from_yaml.assert_called_once_with(
mock_get_credentials_path.return_value,
)
assert credentials == self.mock_load_aws_credentials_from_yaml.return_value
@mock.patch('paasta_tools.cli.cmds.spark_run.Session.get_credentials', autospec=True)
def test_use_default_creds(self, mock_get_credentials):
args = mock.Mock(aws_credentials_yaml=None, service=DEFAULT_SERVICE)
mock_get_credentials.return_value = mock.MagicMock(access_key='id', secret_key='secret')
credentials = get_aws_credentials(args)
assert credentials == ('id', 'secret')
@mock.patch('paasta_tools.cli.cmds.spark_run.os', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.Session.get_credentials', autospec=True)
def test_service_provided_fallback_to_default(self, mock_get_credentials, mock_os):
args = mock.Mock(aws_credentials_yaml=None, service='service_name')
mock_os.path.exists.return_value = False
mock_get_credentials.return_value = mock.MagicMock(access_key='id', secret_key='secret')
credentials = get_aws_credentials(args)
assert credentials == ('id', 'secret') | tests/cli/test_cmds_spark_run.py | import mock
import pytest
from boto3.exceptions import Boto3Error
from ruamel.yaml import YAML
from paasta_tools.cli.cmds.spark_run import configure_and_run_docker_container
from paasta_tools.cli.cmds.spark_run import create_spark_config_str
from paasta_tools.cli.cmds.spark_run import DEFAULT_SERVICE
from paasta_tools.cli.cmds.spark_run import emit_resource_requirements
from paasta_tools.cli.cmds.spark_run import get_aws_credentials
from paasta_tools.cli.cmds.spark_run import get_docker_cmd
from paasta_tools.cli.cmds.spark_run import get_docker_run_cmd
from paasta_tools.cli.cmds.spark_run import get_spark_config
from paasta_tools.cli.cmds.spark_run import load_aws_credentials_from_yaml
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import SystemPaastaConfig
@mock.patch('paasta_tools.cli.cmds.spark_run.os.geteuid', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.os.getegid', autospec=True)
def test_get_docker_run_cmd(
mock_getegid,
mock_geteuid,
):
mock_geteuid.return_value = 1234
mock_getegid.return_value = 100
container_name = 'fake_name'
volumes = ['v1:v1:rw', 'v2:v2:rw']
env = {'k1': 'v1', 'k2': 'v2'}
docker_img = 'fake-registry/fake-service'
docker_cmd = 'pyspark'
actual = get_docker_run_cmd(
container_name,
volumes,
env,
docker_img,
docker_cmd,
)
assert actual[5:] == [
'--user=1234:100',
'--name=fake_name',
'--env', 'k1=v1', '--env', 'k2=v2',
'--volume=v1:v1:rw', '--volume=v2:v2:rw',
'fake-registry/fake-service',
'sh', '-c', 'pyspark', {},
]
@mock.patch('paasta_tools.cli.cmds.spark_run.find_mesos_leader', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run._load_mesos_secret', autospec=True)
def test_get_spark_config(
mock_load_mesos_secret,
mock_find_mesos_leader,
):
mock_find_mesos_leader.return_value = 'fake_leader'
args = mock.MagicMock()
args.cluster = 'fake_cluster'
spark_conf = get_spark_config(
args=args,
spark_app_name='fake_name',
spark_ui_port=123,
docker_img='fake-registry/fake-service',
system_paasta_config=SystemPaastaConfig(
{"cluster_fqdn_format": "paasta-{cluster:s}.something"},
'fake_dir',
),
volumes=['v1:v1:rw', 'v2:v2:rw'],
)
assert spark_conf['spark.master'] == 'mesos://fake_leader:5050'
assert 'spark.master=mesos://fake_leader:5050' in create_spark_config_str(spark_conf, is_mrjob=False)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_aws_credentials', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.os.path.exists', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.pick_random_port', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_username', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_spark_config', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.run_docker_container', autospec=True)
@mock.patch('time.time', autospec=True)
class TestConfigureAndRunDockerContainer:
instance_config = InstanceConfig(
cluster='fake_cluster',
instance='fake_instance',
service='fake_service',
config_dict={
'extra_volumes': [{
"hostPath": "/h1",
"containerPath": "/c1",
"mode": "RO",
}],
},
branch_dict={'docker_image': 'fake_service:fake_sha'},
)
system_paasta_config = SystemPaastaConfig(
{
'volumes': [{
"hostPath": "/h2",
"containerPath": "/c2",
"mode": "RO",
}],
},
'fake_dir',
)
@pytest.fixture
def mock_create_spark_config_str(self):
with mock.patch(
'paasta_tools.cli.cmds.spark_run.create_spark_config_str',
autospec=True,
) as _mock_create_spark_config_str:
yield _mock_create_spark_config_str
def test_configure_and_run_docker_container(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_pick_random_port.return_value = 123
mock_get_username.return_value = 'fake_user'
mock_get_spark_config.return_value = {'spark.app.name': 'fake_app'}
mock_run_docker_container.return_value = 0
mock_get_aws_credentials.return_value = ('id', 'secret')
args = mock.MagicMock()
args.cluster = 'fake_cluster'
args.cmd = 'pyspark'
args.work_dir = '/fake_dir:/spark_driver'
args.dry_run = True
args.mrjob = False
retcode = configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert retcode == 0
mock_run_docker_container.assert_called_once_with(
container_name='paasta_spark_run_fake_user_123',
volumes=[
'/h1:/c1:ro',
'/h2:/c2:ro',
'/fake_dir:/spark_driver:rw',
'/etc/passwd:/etc/passwd:ro',
'/etc/group:/etc/group:ro',
],
environment={
'PAASTA_SERVICE': 'fake_service',
'PAASTA_INSTANCE': 'fake_instance',
'PAASTA_CLUSTER': 'fake_cluster',
'PAASTA_DEPLOY_GROUP': 'fake_cluster.fake_instance',
'PAASTA_DOCKER_IMAGE': 'fake_service:fake_sha',
'PAASTA_LAUNCHED_BY': mock.ANY,
'AWS_ACCESS_KEY_ID': 'id',
'AWS_SECRET_ACCESS_KEY': 'secret',
'SPARK_USER': 'root',
'SPARK_OPTS': '--conf spark.app.name=fake_app',
},
docker_img='fake-registry/fake-service',
docker_cmd='pyspark --conf spark.app.name=fake_app',
dry_run=True,
)
def test_configure_and_run_docker_container_mrjob(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_get_spark_config.return_value = {'spark.cores.max': 5, 'spark.master': 'mesos://spark.master'}
args = mock.MagicMock(cmd='python mrjob_wrapper.py', mrjob=True)
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
args, kwargs = mock_run_docker_container.call_args
assert kwargs['docker_cmd'] == (
'python mrjob_wrapper.py --spark-master=mesos://spark.master --jobconf spark.cores.max=5'
)
assert mock_emit_resource_requirements.called
def test_suppress_clusterman_metrics_errors(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_emit_resource_requirements.side_effect = Boto3Error
mock_create_spark_config_str.return_value = '--conf spark.cores.max=5'
args = mock.MagicMock(
suppress_clusterman_metrics_errors=False,
cmd='pyspark',
)
with pytest.raises(Boto3Error):
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
# make sure we don't blow up when this setting is True
args.suppress_clusterman_metrics_errors = True
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
def test_dont_emit_metrics_for_inappropriate_commands(
self,
mock_time,
mock_run_docker_container,
mock_get_spark_config,
mock_get_username,
mock_pick_random_port,
mock_os_path_exists,
mock_get_aws_credentials,
mock_create_spark_config_str,
):
mock_get_aws_credentials.return_value = ('id', 'secret')
with mock.patch(
'paasta_tools.cli.cmds.spark_run.emit_resource_requirements', autospec=True,
) as mock_emit_resource_requirements, mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
):
mock_create_spark_config_str.return_value = '--conf spark.cores.max=5'
args = mock.MagicMock(cmd='bash', mrjob=False)
configure_and_run_docker_container(
args=args,
docker_img='fake-registry/fake-service',
instance_config=self.instance_config,
system_paasta_config=self.system_paasta_config,
)
assert not mock_emit_resource_requirements.called
def test_emit_resource_requirements(tmpdir):
spark_config_dict = {
'spark.executor.cores': '2',
'spark.cores.max': '4',
'spark.executor.memory': '4g',
'spark.mesos.executor.memoryOverhead': '555',
'spark.app.name': 'paasta_spark_run_johndoe_2_3',
'spark.mesos.constraints': 'pool:cool-pool\\;other:value',
}
clusterman_yaml_contents = {
'mesos_clusters': {
'anywhere-prod': {
'aws_region': 'us-north-14',
},
},
}
clusterman_yaml_file_path = tmpdir.join('fake_clusterman.yaml')
with open(clusterman_yaml_file_path, 'w') as f:
YAML().dump(clusterman_yaml_contents, f)
with mock.patch(
'paasta_tools.cli.cmds.spark_run.get_clusterman_metrics', autospec=True,
), mock.patch(
'paasta_tools.cli.cmds.spark_run.clusterman_metrics', autospec=True,
) as mock_clusterman_metrics, mock.patch(
'paasta_tools.cli.cmds.spark_run.CLUSTERMAN_YAML_FILE_PATH',
clusterman_yaml_file_path,
autospec=None, # we're replacing this name, so we can't autospec
), mock.patch(
'time.time', return_value=1234, autospec=True,
):
mock_clusterman_metrics.generate_key_with_dimensions.side_effect = lambda name, dims: (
f'{name}|framework_name={dims["framework_name"]},webui_url={dims["webui_url"]}'
)
emit_resource_requirements(spark_config_dict, 'anywhere-prod', 'http://spark.yelp')
mock_clusterman_metrics.ClustermanMetricsBotoClient.assert_called_once_with(
region_name='us-north-14',
app_identifier='cool-pool',
)
metrics_writer = mock_clusterman_metrics.ClustermanMetricsBotoClient.return_value.\
get_writer.return_value.__enter__.return_value
metric_key_template = (
'requested_{resource}|framework_name=paasta_spark_run_johndoe_2_3,webui_url=http://spark.yelp'
)
expected_memory_request = (4 * 1024 + 555) * 2
metrics_writer.send.assert_has_calls(
[
mock.call((metric_key_template.format(resource='cpus'), 1234, 4)),
mock.call((metric_key_template.format(resource='mem'), 1234, expected_memory_request)),
mock.call((metric_key_template.format(resource='disk'), 1234, expected_memory_request)),
],
any_order=True,
)
def test_get_docker_cmd_add_spark_conf_str():
args = mock.Mock(cmd='pyspark -v', mrjob=False)
instance_config = None
spark_conf_str = '--conf spark.app.name=fake_app'
docker_cmd = get_docker_cmd(args, instance_config, spark_conf_str)
assert docker_cmd == 'pyspark --conf spark.app.name=fake_app -v'
def test_get_docker_cmd_other_cmd():
args = mock.Mock(cmd='bash', mrjob=False)
instance_config = None
spark_conf_str = '--conf spark.app.name=fake_app'
assert get_docker_cmd(args, instance_config, spark_conf_str) == 'bash'
def test_get_docker_cmd_mrjob():
args = mock.Mock(cmd='python mrjob_wrapper.py', mrjob=True)
instance_config = None
spark_conf_str = '--jobconf spark.app.name=fake_app'
expected_cmd = 'python mrjob_wrapper.py --jobconf spark.app.name=fake_app'
assert get_docker_cmd(args, instance_config, spark_conf_str) == expected_cmd
def test_load_aws_credentials_from_yaml(tmpdir):
fake_access_key_id = 'fake_access_key_id'
fake_secret_access_key = 'fake_secret_access_key'
yaml_file = tmpdir.join('test.yaml')
yaml_file.write(
f'aws_access_key_id: "{fake_access_key_id}"\n'
f'aws_secret_access_key: "{fake_secret_access_key}"',
)
aws_access_key_id, aws_secret_access_key = load_aws_credentials_from_yaml(yaml_file)
assert aws_access_key_id == fake_access_key_id
assert aws_secret_access_key == fake_secret_access_key
class TestGetAwsCredentials:
@pytest.fixture(autouse=True)
def mock_load_aws_credentials_from_yaml(self):
with mock.patch(
'paasta_tools.cli.cmds.spark_run.load_aws_credentials_from_yaml',
autospec=True,
) as self.mock_load_aws_credentials_from_yaml:
yield
def test_yaml_provided(self):
args = mock.Mock(aws_credentials_yaml='credentials.yaml')
credentials = get_aws_credentials(args)
self.mock_load_aws_credentials_from_yaml.assert_called_once_with('credentials.yaml')
assert credentials == self.mock_load_aws_credentials_from_yaml.return_value
@mock.patch('paasta_tools.cli.cmds.spark_run.os', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.get_service_aws_credentials_path', autospec=True)
def test_service_provided_no_yaml(self, mock_get_credentials_path, mock_os):
args = mock.Mock(aws_credentials_yaml=None, service='service_name')
mock_os.path.exists.return_value = True
credentials = get_aws_credentials(args)
mock_get_credentials_path.assert_called_once_with(args.service)
self.mock_load_aws_credentials_from_yaml.assert_called_once_with(
mock_get_credentials_path.return_value,
)
assert credentials == self.mock_load_aws_credentials_from_yaml.return_value
@mock.patch('paasta_tools.cli.cmds.spark_run.Session.get_credentials', autospec=True)
def test_use_default_creds(self, mock_get_credentials):
args = mock.Mock(aws_credentials_yaml=None, service=DEFAULT_SERVICE)
mock_get_credentials.return_value = mock.MagicMock(access_key='id', secret_key='secret')
credentials = get_aws_credentials(args)
assert credentials == ('id', 'secret')
@mock.patch('paasta_tools.cli.cmds.spark_run.os', autospec=True)
@mock.patch('paasta_tools.cli.cmds.spark_run.Session.get_credentials', autospec=True)
def test_service_provided_fallback_to_default(self, mock_get_credentials, mock_os):
args = mock.Mock(aws_credentials_yaml=None, service='service_name')
mock_os.path.exists.return_value = False
mock_get_credentials.return_value = mock.MagicMock(access_key='id', secret_key='secret')
credentials = get_aws_credentials(args)
assert credentials == ('id', 'secret') | 0.453262 | 0.137243 |
import logging
import os
def env_bool(name: str) -> bool:
raw_value = os.getenv(name, "")
return raw_value.lower() == "true"
def env_list(name: str) -> list[str]:
raw_value = os.getenv(name, "")
if not raw_value:
return []
return raw_value.split(",")
SILENCED_SYSTEM_CHECKS = []
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env_bool("CHMVH_DEBUG")
SECRET_KEY = os.getenv("CHMVH_SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "debug"
ALLOWED_HOSTS = env_list("CHMVH_ALLOWED_HOSTS")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
# Third Party Apps
"adminsortable2",
"captcha",
"rest_framework",
"sass_processor",
"solo",
# Custom Apps
"common",
"configuration",
"contact",
"gallery",
"resources",
"staticpages",
"team",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "chmvh_website.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"common.context_processors.analytics",
"configuration.context_processors.practice_info",
],
},
},
]
WSGI_APPLICATION = "chmvh_website.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": os.getenv("CHMVH_DB_HOST", "localhost"),
"PORT": os.getenv("CHMVH_DB_PORT", "5432"),
"USER": os.getenv("CHMVH_DB_USER", "postgres"),
"PASSWORD": os.getenv("CHMVH_DB_PASSWORD"),
"NAME": os.getenv("CHMVH_DB_NAME", "postgres"),
}
}
if os.getenv("CHMVH_TEST"):
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.getenv('CHMVH_STATIC_ROOT')
STATIC_URL = "/static/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"sass_processor.finders.CssFinder",
]
# Media Files (User Uploaded)
# This is only used for development when we're not uploading files to S3.
MEDIA_ROOT = os.getenv("CHMVH_MEDIA_ROOT", os.path.join(BASE_DIR, "media"))
MEDIA_URL = "/media/"
# HTTPS
if env_bool("CHMVH_HTTPS"):
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
X_FRAME_OPTIONS = "DENY"
# Email Settings
DEFAULT_FROM_EMAIL = "<EMAIL>"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_SUBJECT_PREFIX = "[CHMVH Website] "
if os.getenv("CHMVH_EMAIL_USER"):
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = os.getenv("CHMVH_EMAIL_USER")
EMAIL_HOST_PASSWORD = os.getenv("CHMVH_EMAIL_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if os.getenv("CHMVH_ADMIN_NAME"):
ADMINS = ((os.getenv("CHMVH_ADMIN_NAME"), os.getenv("CHMVH_ADMIN_EMAIL")),)
# Google Analytics
GOOGLE_ANALYTICS_ID = os.getenv("CHMVH_GOOGLE_ANALYTICS_ID")
# ReCAPTCHA
if os.getenv("CHMVH_RECAPTCHA_PRIVATE_KEY"):
RECAPTCHA_PRIVATE_KEY = os.getenv("CHMVH_RECAPTCHA_PRIVATE_KEY")
RECAPTCHA_PUBLIC_KEY = os.getenv("CHMVH_RECAPTCHA_PUBLIC_KEY")
else:
NOCAPTCHA = True
SILENCED_SYSTEM_CHECKS.append("captcha.recaptcha_test_key_error")
# Gallery Settings
GALLERY_THUMBNAIL_SIZE = 300, 300
# Django Storages
AWS_S3_ENDPOINT_URL = os.getenv('CHMVH_S3_ENDPOINT_URL')
AWS_S3_REGION_NAME = os.getenv('CHMVH_S3_REGION_NAME')
S3_MEDIA_BUCKET = os.getenv('CHMVH_S3_MEDIA_BUCKET')
S3_STATIC_BUCKET = os.getenv('CHMVH_S3_STATIC_BUCKET')
if S3_MEDIA_BUCKET:
DEFAULT_FILE_STORAGE = 'custom_storage.s3.MediaStorage'
if S3_STATIC_BUCKET:
STATICFILES_STORAGE = 'custom_storage.s3.StaticStorage'
# Config for django-sass-processor
COMPRESS_ROOT = os.getenv("CHMVH_COMPRESS_ROOT")
SASS_PROCESSOR_STORAGE = 'django.core.files.storage.FileSystemStorage'
SASS_PROCESSOR_STORAGE_OPTIONS = {
'location': STATIC_ROOT,
'base_url': STATIC_URL
}
SASS_PROCESSOR_ROOT = os.path.join(BASE_DIR, "static")
# Config for djangorestframework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": (
"rest_framework.permissions.IsAuthenticated",
),
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", # noqa
"datefmt": "%d/%b/%Y %H:%M:%S",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
},
},
"loggers": {
"root": {
"handlers": ["console", "mail_admins"],
"level": logging.INFO,
}
},
} | chmvh_website/chmvh_website/settings.py | import logging
import os
def env_bool(name: str) -> bool:
raw_value = os.getenv(name, "")
return raw_value.lower() == "true"
def env_list(name: str) -> list[str]:
raw_value = os.getenv(name, "")
if not raw_value:
return []
return raw_value.split(",")
SILENCED_SYSTEM_CHECKS = []
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env_bool("CHMVH_DEBUG")
SECRET_KEY = os.getenv("CHMVH_SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "debug"
ALLOWED_HOSTS = env_list("CHMVH_ALLOWED_HOSTS")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
# Third Party Apps
"adminsortable2",
"captcha",
"rest_framework",
"sass_processor",
"solo",
# Custom Apps
"common",
"configuration",
"contact",
"gallery",
"resources",
"staticpages",
"team",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "chmvh_website.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"common.context_processors.analytics",
"configuration.context_processors.practice_info",
],
},
},
]
WSGI_APPLICATION = "chmvh_website.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"HOST": os.getenv("CHMVH_DB_HOST", "localhost"),
"PORT": os.getenv("CHMVH_DB_PORT", "5432"),
"USER": os.getenv("CHMVH_DB_USER", "postgres"),
"PASSWORD": os.getenv("CHMVH_DB_PASSWORD"),
"NAME": os.getenv("CHMVH_DB_NAME", "postgres"),
}
}
if os.getenv("CHMVH_TEST"):
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.getenv('CHMVH_STATIC_ROOT')
STATIC_URL = "/static/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"sass_processor.finders.CssFinder",
]
# Media Files (User Uploaded)
# This is only used for development when we're not uploading files to S3.
MEDIA_ROOT = os.getenv("CHMVH_MEDIA_ROOT", os.path.join(BASE_DIR, "media"))
MEDIA_URL = "/media/"
# HTTPS
if env_bool("CHMVH_HTTPS"):
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
X_FRAME_OPTIONS = "DENY"
# Email Settings
DEFAULT_FROM_EMAIL = "<EMAIL>"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_SUBJECT_PREFIX = "[CHMVH Website] "
if os.getenv("CHMVH_EMAIL_USER"):
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = os.getenv("CHMVH_EMAIL_USER")
EMAIL_HOST_PASSWORD = os.getenv("CHMVH_EMAIL_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if os.getenv("CHMVH_ADMIN_NAME"):
ADMINS = ((os.getenv("CHMVH_ADMIN_NAME"), os.getenv("CHMVH_ADMIN_EMAIL")),)
# Google Analytics
GOOGLE_ANALYTICS_ID = os.getenv("CHMVH_GOOGLE_ANALYTICS_ID")
# ReCAPTCHA
if os.getenv("CHMVH_RECAPTCHA_PRIVATE_KEY"):
RECAPTCHA_PRIVATE_KEY = os.getenv("CHMVH_RECAPTCHA_PRIVATE_KEY")
RECAPTCHA_PUBLIC_KEY = os.getenv("CHMVH_RECAPTCHA_PUBLIC_KEY")
else:
NOCAPTCHA = True
SILENCED_SYSTEM_CHECKS.append("captcha.recaptcha_test_key_error")
# Gallery Settings
GALLERY_THUMBNAIL_SIZE = 300, 300
# Django Storages
AWS_S3_ENDPOINT_URL = os.getenv('CHMVH_S3_ENDPOINT_URL')
AWS_S3_REGION_NAME = os.getenv('CHMVH_S3_REGION_NAME')
S3_MEDIA_BUCKET = os.getenv('CHMVH_S3_MEDIA_BUCKET')
S3_STATIC_BUCKET = os.getenv('CHMVH_S3_STATIC_BUCKET')
if S3_MEDIA_BUCKET:
DEFAULT_FILE_STORAGE = 'custom_storage.s3.MediaStorage'
if S3_STATIC_BUCKET:
STATICFILES_STORAGE = 'custom_storage.s3.StaticStorage'
# Config for django-sass-processor
COMPRESS_ROOT = os.getenv("CHMVH_COMPRESS_ROOT")
SASS_PROCESSOR_STORAGE = 'django.core.files.storage.FileSystemStorage'
SASS_PROCESSOR_STORAGE_OPTIONS = {
'location': STATIC_ROOT,
'base_url': STATIC_URL
}
SASS_PROCESSOR_ROOT = os.path.join(BASE_DIR, "static")
# Config for djangorestframework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": (
"rest_framework.permissions.IsAuthenticated",
),
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", # noqa
"datefmt": "%d/%b/%Y %H:%M:%S",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
},
},
"loggers": {
"root": {
"handlers": ["console", "mail_admins"],
"level": logging.INFO,
}
},
} | 0.405684 | 0.131145 |
from tensorflow.keras.models import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import RandomRotation
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Resizing
from tensorflow.keras.layers import Rescaling
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
import pickle
import numpy as np
import matplotlib.pyplot as plt
import copy
import random
from scipy import ndimage
import cv2
# preprocessing
"""
def SoftenNoise(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
c = np.float32(c)
for i in range(x):
c[i] = cv2.medianBlur(c[i],3)
return c
"""
def SoftenNoise(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
for i in range(x):
c[i] = ndimage.gaussian_filter(c[i], 0.69)
return c
def Normalize(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
for i in range(x):
for j in range(y):
for k in range(z):
if c[i][j][k] > 255:
c[i][j][k] = 1.0
else:
raw = c[i][j][k]
c[i][j][k] = raw/255
return c
def process_img(arrayOfPics):
arrayOfPics = SoftenNoise(arrayOfPics)
arrayOfPics = Normalize(arrayOfPics)
return arrayOfPics
"""
with open("/content/drive/MyDrive/551A3/images_l.pkl", 'rb') as f:
training_data = pickle.load(f)
with open("/content/drive/MyDrive/551A3/labels_l.pkl", 'rb') as f:
training_label = pickle.load(f)
"""
# load labeled data
with open("images_l.pkl", 'rb') as f:
training_data = pickle.load(f)
with open("labels_l.pkl", 'rb') as f:
training_label = pickle.load(f)
# one-hot encoding for labels
all_class = {}
def encode(x):
where1s = np.where(x == 1)
# print(where1s[0][0])
index = where1s[0][0] * 26 + (where1s[0][1] - 10)
result = np.zeros(260, dtype=np.int_)
result[index] = 1
if index not in all_class:
all_class[index] = result
return result
def decode(x):
where1s = np.where(x == 1)
# print(where1s[0][0])
index1 = (where1s[0][0]) // 26
index2 = (where1s[0][0]) % 26 + 10
result = np.zeros(36)
result[index1] = 1
result[index2] = 1
return result
def process_label(arrayOfLabels):
l = []
x = arrayOfLabels.shape[0]
for i in range(x):
l.append(encode(arrayOfLabels[i]))
return np.array(l)
training_label = process_label(training_label)
class SmallerVGGNet:
@staticmethod
def build(width, height, depth, classes, finalAct="sigmoid"):
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# CONV 32 => RELU => POOL
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
# (CONV 64 => RELU) * 2 => POOL
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 128 => RELU) * 2 => POOL
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 256 => RELU) * 2 => POOL
model.add(Conv2D(256, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(256, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 512 => RELU) * 2 => POOL
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# FC 4096 => ReLU
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# FC 1024 => ReLU
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# to 260
model.add(Dense(classes))
model.add(Activation(finalAct))
return model
# step 1: train a model with labeled data
#training_data = process_img(training_data)
training_data_step1 = training_data.reshape(-1,56,56,1)
training_label_step1 = copy.deepcopy(training_label)
EPOCHS = 150
LR = 5e-4
BATCH_SIZE = 16
IMAGE_DIMS = (56, 56, 1)
model_1=SmallerVGGNet.build(width=56,height=56,depth=1,classes=260)
opt = Adam(lr=LR, decay=LR / (2*EPOCHS))
model_1.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"])
H1 = model_1.fit(x=training_data_step1, y=training_label_step1, epochs=EPOCHS, validation_split=0.2, batch_size=BATCH_SIZE)
plt.close('all')
plt.plot(H1.history['accuracy'])
plt.plot(H1.history['val_accuracy'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['training','validation'], loc='lower right')
plt.savefig('accuracy_step1')
plt.show()
plt.close('all')
plt.plot(H1.history['loss'])
plt.plot(H1.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training','validation'], loc='upper right')
plt.savefig('loss_step1')
plt.show()
model_1.save("step1")
with open("images_test.pkl", 'rb') as f:
test_data = pickle.load(f)
#test_data = process_img(test_data)
test_data = test_data.reshape(-1,56,56,1)
predictions1 = model_1.predict(test_data)
import pandas as pd
prediction = []
def decode(x):
where1s = x.argmax()
# print(where1s[0][0])
index1 = where1s // 26
index2 = where1s % 26 + 10
result = np.zeros(36)
result[index1] = 1
result[index2] = 1
return result,index1,index2
for data in predictions1:
decoded_pred,index1,index2 = decode(data)
prediction.append(''.join(["0" if i != index1 and i != index2 else "1" for i in range(36)]))
result = {"# Id": np.arange(15000), 'Category': prediction}
df = pd.DataFrame(data=result, columns=['# Id', 'Category'])
df.to_csv('results_1126_step1_1.csv', header=True, index=False)
# '''
# '''
from tensorflow import keras
pre_model = keras.models.load_model("step1")
# '''
with open("images_ul.pkl", 'rb') as f:
unlabeled_data = pickle.load(f)
#unlabeled_data_1 = process_img(unlabeled_data)
unlabeled_data_1 = unlabeled_data.reshape(-1,56,56,1)
unlabeled_labels = pre_model.predict(unlabeled_data_1)
unlabeled_labels = (unlabeled_labels == unlabeled_labels.max(axis=1, keepdims=1)).astype(int)
print(unlabeled_labels[0])
# '''
# training_data = np.concatenate((training_data[:24000], unlabeled_data), axis=0)
validation_data = np.copy(training_data[24000:30000])
print(validation_data.shape)
validation_label = np.copy(training_label[24000:30000])
training_data_step2 = np.concatenate((training_data[:24000], unlabeled_data), axis=0)
training_label_step2 = np.concatenate((training_label[:24000], unlabeled_labels), axis=0)
# training_data = np.copy(training_data[:24000])
# training_label = np.copy(training_label[:24000])
#training_data = process_img(training_data)
#validation_data = process_img(validation_data)
#training_data=training_data.reshape(-1,56,56,1)
training_data_step2 = training_data_step2.reshape(-1,56,56,1)
validation_data = validation_data.reshape(-1,56,56,1)
print(training_data_step2.shape)
print(validation_data.shape)
#training_set = tf.data.Dataset.from_tensor_slices((training_data, training_label))
#print(training_data[1])
#print(training_label[1])
validation_set = tf.data.Dataset.from_tensor_slices((validation_data, validation_label))
#print(validation_label[1])
IMG_SIZE = 56
resize_and_rescale = tf.keras.Sequential([
Resizing(IMG_SIZE, IMG_SIZE),
Rescaling(1./ training_data.max())
])
data_augmentation = tf.keras.Sequential([
RandomRotation(0.2),
])
"""
from google.colab import drive
drive.mount('/content/drive')
"""
EPOCHS = 100
LR = 5e-4
BATCH_SIZE = 16
IMAGE_DIMS = (56, 56, 1)
model_2=SmallerVGGNet.build(width=56,height=56,depth=1,classes=260)
opt = Adam(lr=LR, decay=LR /(2* EPOCHS))
model_2.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"])
#training_data = training_data.reshape(-1, 56, 56, 1)
#validation_data = validation_data.reshape(-1, 56, 56, 1)
# H = model.fit(x=training_data,y=training_label,epochs=EPOCHS,validation_split=0.2,batch_size=BATCH_SIZE)
H2 = model_2.fit(x=training_data_step2, y=training_label_step2,epochs=EPOCHS,validation_data=(validation_data,validation_label),batch_size=BATCH_SIZE)
'''
H = model.fit(datagen.flow(training_data, training_label, batch_size=BATCH_SIZE,
subset='training'),
validation_data=datagen.flow(training_data, training_label,
batch_size=BATCH_SIZE, subset='validation'), epochs=EPOCHS)
'''
model_2.save("step2")
predictions2 = model_2.predict(test_data)
print(predictions2[0])
prediction = []
for data in predictions2:
decoded_pred,index1,index2 = decode(data)
prediction.append(''.join(["0" if i != index1 and i != index2 else "1" for i in range(36)]))
result = {"# Id": np.arange(15000), 'Category': prediction}
df = pd.DataFrame(data=result, columns=['# Id', 'Category'])
df.to_csv('results_1126_step2_1.csv', header=True, index=False)
# plot accuracy and loss to evaluate the learning curve
plt.close('all')
plt.plot(H2.history['accuracy'])
plt.plot(H2.history['val_accuracy'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['training','validation'], loc='lower right')
plt.savefig('accuracy_step2')
plt.show()
plt.close('all')
plt.plot(H2.history['loss'])
plt.plot(H2.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training','validation'], loc='upper right')
plt.savefig('loss_step2')
plt.show() | CNN.py | from tensorflow.keras.models import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import RandomRotation
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Resizing
from tensorflow.keras.layers import Rescaling
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
import pickle
import numpy as np
import matplotlib.pyplot as plt
import copy
import random
from scipy import ndimage
import cv2
# preprocessing
"""
def SoftenNoise(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
c = np.float32(c)
for i in range(x):
c[i] = cv2.medianBlur(c[i],3)
return c
"""
def SoftenNoise(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
for i in range(x):
c[i] = ndimage.gaussian_filter(c[i], 0.69)
return c
def Normalize(arrayOfPics):
x,y,z = arrayOfPics.shape
c = copy.deepcopy(arrayOfPics)
for i in range(x):
for j in range(y):
for k in range(z):
if c[i][j][k] > 255:
c[i][j][k] = 1.0
else:
raw = c[i][j][k]
c[i][j][k] = raw/255
return c
def process_img(arrayOfPics):
arrayOfPics = SoftenNoise(arrayOfPics)
arrayOfPics = Normalize(arrayOfPics)
return arrayOfPics
"""
with open("/content/drive/MyDrive/551A3/images_l.pkl", 'rb') as f:
training_data = pickle.load(f)
with open("/content/drive/MyDrive/551A3/labels_l.pkl", 'rb') as f:
training_label = pickle.load(f)
"""
# load labeled data
with open("images_l.pkl", 'rb') as f:
training_data = pickle.load(f)
with open("labels_l.pkl", 'rb') as f:
training_label = pickle.load(f)
# one-hot encoding for labels
all_class = {}
def encode(x):
where1s = np.where(x == 1)
# print(where1s[0][0])
index = where1s[0][0] * 26 + (where1s[0][1] - 10)
result = np.zeros(260, dtype=np.int_)
result[index] = 1
if index not in all_class:
all_class[index] = result
return result
def decode(x):
where1s = np.where(x == 1)
# print(where1s[0][0])
index1 = (where1s[0][0]) // 26
index2 = (where1s[0][0]) % 26 + 10
result = np.zeros(36)
result[index1] = 1
result[index2] = 1
return result
def process_label(arrayOfLabels):
l = []
x = arrayOfLabels.shape[0]
for i in range(x):
l.append(encode(arrayOfLabels[i]))
return np.array(l)
training_label = process_label(training_label)
class SmallerVGGNet:
@staticmethod
def build(width, height, depth, classes, finalAct="sigmoid"):
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# CONV 32 => RELU => POOL
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
# (CONV 64 => RELU) * 2 => POOL
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 128 => RELU) * 2 => POOL
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 256 => RELU) * 2 => POOL
model.add(Conv2D(256, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(256, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (CONV 512 => RELU) * 2 => POOL
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# FC 4096 => ReLU
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# FC 1024 => ReLU
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# to 260
model.add(Dense(classes))
model.add(Activation(finalAct))
return model
# step 1: train a model with labeled data
#training_data = process_img(training_data)
training_data_step1 = training_data.reshape(-1,56,56,1)
training_label_step1 = copy.deepcopy(training_label)
EPOCHS = 150
LR = 5e-4
BATCH_SIZE = 16
IMAGE_DIMS = (56, 56, 1)
model_1=SmallerVGGNet.build(width=56,height=56,depth=1,classes=260)
opt = Adam(lr=LR, decay=LR / (2*EPOCHS))
model_1.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"])
H1 = model_1.fit(x=training_data_step1, y=training_label_step1, epochs=EPOCHS, validation_split=0.2, batch_size=BATCH_SIZE)
plt.close('all')
plt.plot(H1.history['accuracy'])
plt.plot(H1.history['val_accuracy'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['training','validation'], loc='lower right')
plt.savefig('accuracy_step1')
plt.show()
plt.close('all')
plt.plot(H1.history['loss'])
plt.plot(H1.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training','validation'], loc='upper right')
plt.savefig('loss_step1')
plt.show()
model_1.save("step1")
with open("images_test.pkl", 'rb') as f:
test_data = pickle.load(f)
#test_data = process_img(test_data)
test_data = test_data.reshape(-1,56,56,1)
predictions1 = model_1.predict(test_data)
import pandas as pd
prediction = []
def decode(x):
where1s = x.argmax()
# print(where1s[0][0])
index1 = where1s // 26
index2 = where1s % 26 + 10
result = np.zeros(36)
result[index1] = 1
result[index2] = 1
return result,index1,index2
for data in predictions1:
decoded_pred,index1,index2 = decode(data)
prediction.append(''.join(["0" if i != index1 and i != index2 else "1" for i in range(36)]))
result = {"# Id": np.arange(15000), 'Category': prediction}
df = pd.DataFrame(data=result, columns=['# Id', 'Category'])
df.to_csv('results_1126_step1_1.csv', header=True, index=False)
# '''
# '''
from tensorflow import keras
pre_model = keras.models.load_model("step1")
# '''
with open("images_ul.pkl", 'rb') as f:
unlabeled_data = pickle.load(f)
#unlabeled_data_1 = process_img(unlabeled_data)
unlabeled_data_1 = unlabeled_data.reshape(-1,56,56,1)
unlabeled_labels = pre_model.predict(unlabeled_data_1)
unlabeled_labels = (unlabeled_labels == unlabeled_labels.max(axis=1, keepdims=1)).astype(int)
print(unlabeled_labels[0])
# '''
# training_data = np.concatenate((training_data[:24000], unlabeled_data), axis=0)
validation_data = np.copy(training_data[24000:30000])
print(validation_data.shape)
validation_label = np.copy(training_label[24000:30000])
training_data_step2 = np.concatenate((training_data[:24000], unlabeled_data), axis=0)
training_label_step2 = np.concatenate((training_label[:24000], unlabeled_labels), axis=0)
# training_data = np.copy(training_data[:24000])
# training_label = np.copy(training_label[:24000])
#training_data = process_img(training_data)
#validation_data = process_img(validation_data)
#training_data=training_data.reshape(-1,56,56,1)
training_data_step2 = training_data_step2.reshape(-1,56,56,1)
validation_data = validation_data.reshape(-1,56,56,1)
print(training_data_step2.shape)
print(validation_data.shape)
#training_set = tf.data.Dataset.from_tensor_slices((training_data, training_label))
#print(training_data[1])
#print(training_label[1])
validation_set = tf.data.Dataset.from_tensor_slices((validation_data, validation_label))
#print(validation_label[1])
IMG_SIZE = 56
resize_and_rescale = tf.keras.Sequential([
Resizing(IMG_SIZE, IMG_SIZE),
Rescaling(1./ training_data.max())
])
data_augmentation = tf.keras.Sequential([
RandomRotation(0.2),
])
"""
from google.colab import drive
drive.mount('/content/drive')
"""
EPOCHS = 100
LR = 5e-4
BATCH_SIZE = 16
IMAGE_DIMS = (56, 56, 1)
model_2=SmallerVGGNet.build(width=56,height=56,depth=1,classes=260)
opt = Adam(lr=LR, decay=LR /(2* EPOCHS))
model_2.compile(loss="binary_crossentropy", optimizer=opt,metrics=["accuracy"])
#training_data = training_data.reshape(-1, 56, 56, 1)
#validation_data = validation_data.reshape(-1, 56, 56, 1)
# H = model.fit(x=training_data,y=training_label,epochs=EPOCHS,validation_split=0.2,batch_size=BATCH_SIZE)
H2 = model_2.fit(x=training_data_step2, y=training_label_step2,epochs=EPOCHS,validation_data=(validation_data,validation_label),batch_size=BATCH_SIZE)
'''
H = model.fit(datagen.flow(training_data, training_label, batch_size=BATCH_SIZE,
subset='training'),
validation_data=datagen.flow(training_data, training_label,
batch_size=BATCH_SIZE, subset='validation'), epochs=EPOCHS)
'''
model_2.save("step2")
predictions2 = model_2.predict(test_data)
print(predictions2[0])
prediction = []
for data in predictions2:
decoded_pred,index1,index2 = decode(data)
prediction.append(''.join(["0" if i != index1 and i != index2 else "1" for i in range(36)]))
result = {"# Id": np.arange(15000), 'Category': prediction}
df = pd.DataFrame(data=result, columns=['# Id', 'Category'])
df.to_csv('results_1126_step2_1.csv', header=True, index=False)
# plot accuracy and loss to evaluate the learning curve
plt.close('all')
plt.plot(H2.history['accuracy'])
plt.plot(H2.history['val_accuracy'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['training','validation'], loc='lower right')
plt.savefig('accuracy_step2')
plt.show()
plt.close('all')
plt.plot(H2.history['loss'])
plt.plot(H2.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training','validation'], loc='upper right')
plt.savefig('loss_step2')
plt.show() | 0.564098 | 0.511595 |
import MySQLdb as mysql
import os
import jieba
from wordcloud import WordCloud, ImageColorGenerator
from concurrent.futures import ThreadPoolExecutor as tpe
from matplotlib import pyplot as plt
from util import PROJECT_ABS_PATH
from scipy.misc import imread
import time
custom_dictionary = ["韩国人", "中国人", "第三世界", "死宅", "生是中国人","厉害了我的哥", "死妈", "三哥", "开挂", "手抓饭", "阿三", "印度狗", "妈逼", "不干净","不卫生",
"啊三", "印度阿三", "恒河水", "好人一生平安", "印度人", "狗逼", "找骂", "死是中国魂", "韩国狗", "狗韩国",
"天团", "朝鲜狗", "韩国猪", "猪韩国", "吃狗", "南朝鲜", "大寒冥国", "棒粉" , "小日本", "日本狗", "日本鬼子", "本子", "鬼子", "黑鬼", "黑哥哥",
"种族天赋", "带感", "美黑", "白屁股", "黑屁股", "头脑简单", "四肢发达", "黑人天赋", "哈韩", "哈日", "广州黑人", "民族主义", "种族主义"]
filters = set([
"是不是", "表白", "我", "都", "这个", "这样", "那个", "这么", "还是", "还", "过", "跟", "谁", "说", "觉得", "要", "被", "自己",
"能", "给", "笑", "知道", "着", "真的", "看", "的", "现在", "问题", "为什么", "一个", "没", "比", "来", "有", "是", "把", "打",
"才", "很", "小", "对", "好", "喜欢", "她", "太", "大", "多", "在", "啊", "哈", "和", "呢", "听", "吧", "吗", "吃", "又", "去",
"到", "像", "做", "你", "会", "他", "人", "了", "也", "么", "个", "不", "上", "没有", "所以", "我们", "感觉", "感觉",
"怎么", "弹幕", "就是", "好看", "好吃", "回复", "你们", "但是", "他们", "什么", "不是", "一样", "可以", "时候" , "不要" , "因为" ,
"还有" , "前面" , "不会" , "那么" , "楼主" , "看到" , "这是" , "应该" , "好像" , "这种" , "视频" , "出来" , "一下" , "东西" ,
"不能" , "厉害" , "已经" , "其实" , "人家" , "很多" , "可能" , "一直" , "好听" , "有点" , "哈哈" , "声音" , "如果" , "这里" , "大家" ,
"只是" , "表示" , "只有" , "以为" , "不错" , "别人" , "承包" , "这些" , "开始" , "多少" , "两个" , "真是" , "看看" , "一点",
"就" ,"这" ,"想" ,"那" ,"最" ,"用" ,"为" ,"叫" ,"让" ,"呀" ,"真" ,"得" ,"里" ,"啦" ,"啥" ,"一" ,"哦" ,"但" ,"走" ,"更" ,"话" ,
"买" ,"别" ,"再" ,"挺" ,"年" ,"并" ,"完" ,"只" ,"嘛" ,"请" ,"下" ,"哇" ,"歌" ,"等" ,"拿" ,"超" ,"玩" ,"们" ,"点" ,"钱" ,"前" ,
"脸" ,"快" ,"懂" ,"高" ,"老" ,"当" ,"黑" ,"问", "超级" ,"比较" ,"看过" ,"不过" ,"地方" ,"第一" ,"的话" ,"看着" ,"辛苦" ,"特别" ,
"确实" ,"不行" ,"需要" ,"然后" ,"哪里" ,"老师" ,"一定" ,"最后" ,"以前" ,"这句" ,"突然" ,"而且" ,"直接" ,"首歌" ,"居然" ,"卧槽" ,
"东东" ,"虽然" ,"好多" ,"有人" ,"说话" ,"一次" ,"高能" ,"好好" ,"肯定" ,"为了" ,"衣服" ,"希望" ,"那些" ,"我家" ,"翻译" ,"发现" ,
"一口" ,"里面" ,"孩子" ,"几个" ,"本来" ,"字幕" , "国家", "喜欢","以后" ,"前方" ,"而已" ,"认识" ,"可是" ,"不了" ,"只能" ,"之前" ,"完全" ,"每次" ,
"意思" ,"名字" ,"有些" ,"一些" ,"后面" ,"其他" ,"今天" ,"终于" ,"不用" ,"回来" ,"疯狂", "嘴" ,"国" ,"日" ,"见" ,"连" ,"咋" ,"字" ,
"月" ,"靠" ,"美" ,"先" ,"开" ,"阿" ,"干" ,"手" ,"帮" ,"长" ,"号" ,"之" ,"学" ,"卖" ,"跑" ,"甜" ,"时" ,"泫" ,"饭" ,"它" ,"家" ,"写" ,
"讲" ,"主" ,"路" ,"发" ,"诶" ,"白" ,"行" ,"丶" ,"越" ,"少" ,"李" ,"嗯" ,"哎" ,"该" ,"抱" ,"算" ,"新" ,"地" ,"而" ,"搞" ,"后" ,"从" ,"与" ,
"事" ,"站" ,"带" ,"出" ,"找" ,"放", "至少" ,"哪个" ,"评论" ,"眼睛" ,"变成" ,"注意" ,"所有" ,"干嘛" ,"一天" ,"不同" ,"大爷" ,"呵呵" ,"情况" ,"小米" ,
"有没有" ,"不够" ,"操作" ,"到底" ,"原因" ,"标题" ,"真正" ,"全是" ,"重要" ,"还好", "差不多", "生日快乐", "谢谢", "一般", "起来", "不好",
"加油", "选择", "支持", "当然", "毕竟", "或者", "我要", "成功", "技术", "原来", "帖子", "最好", "过来", "只要", "记得", "电视", "不到",
"正常", "等等", "告诉", "非常", "之后", "准备", "基本", "封面", "上海", "不想", "要是", "小哥", "每天", "系列", "大概", "十五", "容易",
"唱", "由", "加", "已", "以", "无", "贴"
])
class CountWords:
def __init__(self, database, table, country):
self.frequency = dict()
self.file_names = list()
self.current_country = country
self.thread_pool_size = 8
self.is_frequency_sorted = False
self.var_names = ["word", "frequency"]
with open("/Users/Excited/localmysqlrootssh.txt", "r")as f:
local_info = f.readlines() #host, username, passwd, port
local_info = list(map(str.strip, local_info))
try:
self.connection = mysql.connect(
host=local_info[0],
user=local_info[1],
passwd=local_info[2],
db=database,
port=int(local_info[3]),
charset="utf8"
)
except mysql.Error as e:
print("Error: %s" % e)
self.cursor = self.connection.cursor()
self.table = table
def filter_frequency_with(self, target_filter):
for item in target_filter:
if self.frequency.get(item, -1) != -1:
self.frequency.pop(item)
def add_dictionary_from(self, target_dict):
for item in target_dict:
jieba.add_word(item, 3)
def get_all_data_file_name(self):
abs_path = "/Users/Excited/PycharmProjects/bias-comments-mining/data/%s/"%self.current_country
for parent_file_name in os.walk(abs_path):
for child_file_name in parent_file_name[-1]:
if child_file_name[-4:] == ".txt":
self.file_names.append(parent_file_name[0] + child_file_name)
print("found %d files in total"%len(self.file_names))
def read_from_file_and_count(self):
def _read_from_file_and_count(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
if len(lines) < 10:
return
for line in lines:
if not isinstance(line, str) or len(line) < 4 or len(line) > 500:
continue
vline = self.validate(line)
splited_words = [item for item in jieba.cut(vline)]
for splited_word in splited_words:
self.frequency[splited_word] = self.frequency.get(splited_word, 0) + 1
self.file_names.remove(file_name)
print("finish counting %s" % file_name)
executor = tpe(self.thread_pool_size)
executor.map(_read_from_file_and_count, self.file_names)
executor.shutdown(wait=True)
def validate(self, line):
length = len(line)
mark_list = list()
frontIndex = 0
endIndex = 1
while True:
if endIndex >= length and endIndex - frontIndex < 3:
break
if endIndex - frontIndex < 3:
endIndex += 1
continue
if line[frontIndex] == line[frontIndex + 1] == line[frontIndex + 2]:
currentCharacter = line[frontIndex]
frontIndex += 1
while frontIndex < length and line[frontIndex] == currentCharacter:
mark_list.append(frontIndex)
frontIndex += 1
endIndex = frontIndex + 1
else:
frontIndex += 1
if len(mark_list) == 0:
return line.strip()
unmarked = [i for i in range(length) if i not in mark_list]
return "".join([line[i] for i in unmarked]).strip()
def make_wordcloud(self, image_path):
back_coloring_path = PROJECT_ABS_PATH + image_path
font_path = PROJECT_ABS_PATH + "/bin/msyh.ttf"
saving_image_modify_by_shape = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_shape.png"
saving_image_modify_by_all = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_all.png"
back_coloring = imread(back_coloring_path)
wc = WordCloud(
font_path=font_path,
background_color="white",
max_words=300,
mask=back_coloring,
max_font_size=250,
random_state=42,
width=1080,
height=2048,
margin=2
)
wc.generate_from_frequencies(self.frequency)
image_colors = ImageColorGenerator(back_coloring)
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis = "off"
plt.figure()
plt.imshow(back_coloring, cmap=plt.get_cmap('gray'))
plt.axis = "off"
plt.show()
#wc.to_file(saving_image_modify_by_all)
def _sort_frequency(self):
self.frequency = sorted(self.frequency.items(), key=lambda x: x[1], reverse=True)
self.is_frequency_sorted = True
def save_frequency_to_sql(self):
if not self.is_frequency_sorted:
self._sort_frequency()
for pair in self.frequency:
self.addRow(pair)
def closeConnection(self):
if self.connection:
self.connection.close()
def __del__(self):
self.closeConnection()
def getFormat(self):
self.cursor.execute("desc %s"%self.table)
return self.cursor.fetchall()
def execute(self, command):
assert isinstance(command, str)
self.cursor.execute(command)
def india_treatment(self):
modify_word = {"阿三": 10000, "种姓": 5000, "厕所":3000, "强奸": 4391, "素质": 3223, "印度":-10000, "中国":-10000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def korea_treatment(self):
modify_word = {"明星": 5000, "韩剧": 4000, "哥哥": 2000, "韩国": -40000, "中国": -20000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
if self.frequency.get("黑人", -1) != -1:
self.frequency.pop("黑人")
def japan_treatment(self):
modify_word = {"日本": -20141, "日本人": 14982, "日语":5000, "鬼子": 5426, "本子": 3864, "动漫": 6000, "留学": 3000, "小姐姐": 3000, "中国":-10000, "宅": 3236}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def black_treatment(self):
for key, value in self.frequency.items():
self.frequency[key] += value * 1.3
def getOne(self, with_label = False):
try:
res = self.cursor.fetchone()
if not with_label:
return res
res_dict = dict(zip([item[0] for item in self.cursor.description], res))
return res_dict
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def getAll(self, with_label = False):
try:
res = self.cursor.fetchall()
if not with_label:
return res
res_list = list()
for row in res:
res_list.append(dict(zip([item[0] for item in self.cursor.description], row)))
return res_list
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def addRow(self, data):
try:
command = "insert into " + self.table + "(" + ", ".join(["`" + str(item) + "`" for item in self.var_names]) + ")"
command += "VALUE(" + ", ".join(['"' + str(item) + '"' for item in data]) +");"
self.execute(command)
self.connection.commit()
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback() | analyse/count.py | import MySQLdb as mysql
import os
import jieba
from wordcloud import WordCloud, ImageColorGenerator
from concurrent.futures import ThreadPoolExecutor as tpe
from matplotlib import pyplot as plt
from util import PROJECT_ABS_PATH
from scipy.misc import imread
import time
custom_dictionary = ["韩国人", "中国人", "第三世界", "死宅", "生是中国人","厉害了我的哥", "死妈", "三哥", "开挂", "手抓饭", "阿三", "印度狗", "妈逼", "不干净","不卫生",
"啊三", "印度阿三", "恒河水", "好人一生平安", "印度人", "狗逼", "找骂", "死是中国魂", "韩国狗", "狗韩国",
"天团", "朝鲜狗", "韩国猪", "猪韩国", "吃狗", "南朝鲜", "大寒冥国", "棒粉" , "小日本", "日本狗", "日本鬼子", "本子", "鬼子", "黑鬼", "黑哥哥",
"种族天赋", "带感", "美黑", "白屁股", "黑屁股", "头脑简单", "四肢发达", "黑人天赋", "哈韩", "哈日", "广州黑人", "民族主义", "种族主义"]
filters = set([
"是不是", "表白", "我", "都", "这个", "这样", "那个", "这么", "还是", "还", "过", "跟", "谁", "说", "觉得", "要", "被", "自己",
"能", "给", "笑", "知道", "着", "真的", "看", "的", "现在", "问题", "为什么", "一个", "没", "比", "来", "有", "是", "把", "打",
"才", "很", "小", "对", "好", "喜欢", "她", "太", "大", "多", "在", "啊", "哈", "和", "呢", "听", "吧", "吗", "吃", "又", "去",
"到", "像", "做", "你", "会", "他", "人", "了", "也", "么", "个", "不", "上", "没有", "所以", "我们", "感觉", "感觉",
"怎么", "弹幕", "就是", "好看", "好吃", "回复", "你们", "但是", "他们", "什么", "不是", "一样", "可以", "时候" , "不要" , "因为" ,
"还有" , "前面" , "不会" , "那么" , "楼主" , "看到" , "这是" , "应该" , "好像" , "这种" , "视频" , "出来" , "一下" , "东西" ,
"不能" , "厉害" , "已经" , "其实" , "人家" , "很多" , "可能" , "一直" , "好听" , "有点" , "哈哈" , "声音" , "如果" , "这里" , "大家" ,
"只是" , "表示" , "只有" , "以为" , "不错" , "别人" , "承包" , "这些" , "开始" , "多少" , "两个" , "真是" , "看看" , "一点",
"就" ,"这" ,"想" ,"那" ,"最" ,"用" ,"为" ,"叫" ,"让" ,"呀" ,"真" ,"得" ,"里" ,"啦" ,"啥" ,"一" ,"哦" ,"但" ,"走" ,"更" ,"话" ,
"买" ,"别" ,"再" ,"挺" ,"年" ,"并" ,"完" ,"只" ,"嘛" ,"请" ,"下" ,"哇" ,"歌" ,"等" ,"拿" ,"超" ,"玩" ,"们" ,"点" ,"钱" ,"前" ,
"脸" ,"快" ,"懂" ,"高" ,"老" ,"当" ,"黑" ,"问", "超级" ,"比较" ,"看过" ,"不过" ,"地方" ,"第一" ,"的话" ,"看着" ,"辛苦" ,"特别" ,
"确实" ,"不行" ,"需要" ,"然后" ,"哪里" ,"老师" ,"一定" ,"最后" ,"以前" ,"这句" ,"突然" ,"而且" ,"直接" ,"首歌" ,"居然" ,"卧槽" ,
"东东" ,"虽然" ,"好多" ,"有人" ,"说话" ,"一次" ,"高能" ,"好好" ,"肯定" ,"为了" ,"衣服" ,"希望" ,"那些" ,"我家" ,"翻译" ,"发现" ,
"一口" ,"里面" ,"孩子" ,"几个" ,"本来" ,"字幕" , "国家", "喜欢","以后" ,"前方" ,"而已" ,"认识" ,"可是" ,"不了" ,"只能" ,"之前" ,"完全" ,"每次" ,
"意思" ,"名字" ,"有些" ,"一些" ,"后面" ,"其他" ,"今天" ,"终于" ,"不用" ,"回来" ,"疯狂", "嘴" ,"国" ,"日" ,"见" ,"连" ,"咋" ,"字" ,
"月" ,"靠" ,"美" ,"先" ,"开" ,"阿" ,"干" ,"手" ,"帮" ,"长" ,"号" ,"之" ,"学" ,"卖" ,"跑" ,"甜" ,"时" ,"泫" ,"饭" ,"它" ,"家" ,"写" ,
"讲" ,"主" ,"路" ,"发" ,"诶" ,"白" ,"行" ,"丶" ,"越" ,"少" ,"李" ,"嗯" ,"哎" ,"该" ,"抱" ,"算" ,"新" ,"地" ,"而" ,"搞" ,"后" ,"从" ,"与" ,
"事" ,"站" ,"带" ,"出" ,"找" ,"放", "至少" ,"哪个" ,"评论" ,"眼睛" ,"变成" ,"注意" ,"所有" ,"干嘛" ,"一天" ,"不同" ,"大爷" ,"呵呵" ,"情况" ,"小米" ,
"有没有" ,"不够" ,"操作" ,"到底" ,"原因" ,"标题" ,"真正" ,"全是" ,"重要" ,"还好", "差不多", "生日快乐", "谢谢", "一般", "起来", "不好",
"加油", "选择", "支持", "当然", "毕竟", "或者", "我要", "成功", "技术", "原来", "帖子", "最好", "过来", "只要", "记得", "电视", "不到",
"正常", "等等", "告诉", "非常", "之后", "准备", "基本", "封面", "上海", "不想", "要是", "小哥", "每天", "系列", "大概", "十五", "容易",
"唱", "由", "加", "已", "以", "无", "贴"
])
class CountWords:
def __init__(self, database, table, country):
self.frequency = dict()
self.file_names = list()
self.current_country = country
self.thread_pool_size = 8
self.is_frequency_sorted = False
self.var_names = ["word", "frequency"]
with open("/Users/Excited/localmysqlrootssh.txt", "r")as f:
local_info = f.readlines() #host, username, passwd, port
local_info = list(map(str.strip, local_info))
try:
self.connection = mysql.connect(
host=local_info[0],
user=local_info[1],
passwd=local_info[2],
db=database,
port=int(local_info[3]),
charset="utf8"
)
except mysql.Error as e:
print("Error: %s" % e)
self.cursor = self.connection.cursor()
self.table = table
def filter_frequency_with(self, target_filter):
for item in target_filter:
if self.frequency.get(item, -1) != -1:
self.frequency.pop(item)
def add_dictionary_from(self, target_dict):
for item in target_dict:
jieba.add_word(item, 3)
def get_all_data_file_name(self):
abs_path = "/Users/Excited/PycharmProjects/bias-comments-mining/data/%s/"%self.current_country
for parent_file_name in os.walk(abs_path):
for child_file_name in parent_file_name[-1]:
if child_file_name[-4:] == ".txt":
self.file_names.append(parent_file_name[0] + child_file_name)
print("found %d files in total"%len(self.file_names))
def read_from_file_and_count(self):
def _read_from_file_and_count(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
if len(lines) < 10:
return
for line in lines:
if not isinstance(line, str) or len(line) < 4 or len(line) > 500:
continue
vline = self.validate(line)
splited_words = [item for item in jieba.cut(vline)]
for splited_word in splited_words:
self.frequency[splited_word] = self.frequency.get(splited_word, 0) + 1
self.file_names.remove(file_name)
print("finish counting %s" % file_name)
executor = tpe(self.thread_pool_size)
executor.map(_read_from_file_and_count, self.file_names)
executor.shutdown(wait=True)
def validate(self, line):
length = len(line)
mark_list = list()
frontIndex = 0
endIndex = 1
while True:
if endIndex >= length and endIndex - frontIndex < 3:
break
if endIndex - frontIndex < 3:
endIndex += 1
continue
if line[frontIndex] == line[frontIndex + 1] == line[frontIndex + 2]:
currentCharacter = line[frontIndex]
frontIndex += 1
while frontIndex < length and line[frontIndex] == currentCharacter:
mark_list.append(frontIndex)
frontIndex += 1
endIndex = frontIndex + 1
else:
frontIndex += 1
if len(mark_list) == 0:
return line.strip()
unmarked = [i for i in range(length) if i not in mark_list]
return "".join([line[i] for i in unmarked]).strip()
def make_wordcloud(self, image_path):
back_coloring_path = PROJECT_ABS_PATH + image_path
font_path = PROJECT_ABS_PATH + "/bin/msyh.ttf"
saving_image_modify_by_shape = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_shape.png"
saving_image_modify_by_all = PROJECT_ABS_PATH + "/image/" + str(int(time.time())) + "_by_all.png"
back_coloring = imread(back_coloring_path)
wc = WordCloud(
font_path=font_path,
background_color="white",
max_words=300,
mask=back_coloring,
max_font_size=250,
random_state=42,
width=1080,
height=2048,
margin=2
)
wc.generate_from_frequencies(self.frequency)
image_colors = ImageColorGenerator(back_coloring)
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis = "off"
plt.figure()
plt.imshow(back_coloring, cmap=plt.get_cmap('gray'))
plt.axis = "off"
plt.show()
#wc.to_file(saving_image_modify_by_all)
def _sort_frequency(self):
self.frequency = sorted(self.frequency.items(), key=lambda x: x[1], reverse=True)
self.is_frequency_sorted = True
def save_frequency_to_sql(self):
if not self.is_frequency_sorted:
self._sort_frequency()
for pair in self.frequency:
self.addRow(pair)
def closeConnection(self):
if self.connection:
self.connection.close()
def __del__(self):
self.closeConnection()
def getFormat(self):
self.cursor.execute("desc %s"%self.table)
return self.cursor.fetchall()
def execute(self, command):
assert isinstance(command, str)
self.cursor.execute(command)
def india_treatment(self):
modify_word = {"阿三": 10000, "种姓": 5000, "厕所":3000, "强奸": 4391, "素质": 3223, "印度":-10000, "中国":-10000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def korea_treatment(self):
modify_word = {"明星": 5000, "韩剧": 4000, "哥哥": 2000, "韩国": -40000, "中国": -20000}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
if self.frequency.get("黑人", -1) != -1:
self.frequency.pop("黑人")
def japan_treatment(self):
modify_word = {"日本": -20141, "日本人": 14982, "日语":5000, "鬼子": 5426, "本子": 3864, "动漫": 6000, "留学": 3000, "小姐姐": 3000, "中国":-10000, "宅": 3236}
for key, value in modify_word.items():
if self.frequency.get(key, -1) != -1:
self.frequency[key] += value
else:
self.frequency[key] = value
def black_treatment(self):
for key, value in self.frequency.items():
self.frequency[key] += value * 1.3
def getOne(self, with_label = False):
try:
res = self.cursor.fetchone()
if not with_label:
return res
res_dict = dict(zip([item[0] for item in self.cursor.description], res))
return res_dict
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def getAll(self, with_label = False):
try:
res = self.cursor.fetchall()
if not with_label:
return res
res_list = list()
for row in res:
res_list.append(dict(zip([item[0] for item in self.cursor.description], row)))
return res_list
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback()
def addRow(self, data):
try:
command = "insert into " + self.table + "(" + ", ".join(["`" + str(item) + "`" for item in self.var_names]) + ")"
command += "VALUE(" + ", ".join(['"' + str(item) + '"' for item in data]) +");"
self.execute(command)
self.connection.commit()
except mysql.Error as e:
print("error: %s"%e)
self.connection.rollback()
except:
print("error")
self.connection.rollback() | 0.123405 | 0.15633 |
import argparse
from itertools import combinations
import os
import sys
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
import mdtraj as md
import numpy as np
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.externals import joblib
from msmbuilder.decomposition import tICA
MODEL_TYPE_KEY = "model-type"
PCA_MODEL = "pca"
SVD_MODEL = "svd"
ICA_MODEL = "ica"
TICA_MODEL = "tica"
MODEL_KEY = "model"
PROJECTION_KEY = "projected-coordinates"
LAG_TIME_KEY = "lag-time"
FEATURE_TYPE_KEY = "feature-type"
def extract_features(args):
print "reading trajectory"
traj = md.load(args.input_traj,
top=args.pdb_file)
if args.select_residues:
selections = []
ranges = args.select_residues.split(",")
for range_ in args.select_residues.split(","):
if "-" in range_:
left, right = map(int, range_.split("-"))
selections.append("(residue %s to %s)" % (left, right))
else:
singleton = int(range_)
selections.append("(residue %s)" % singleton)
selection_str = " or ".join(selections)
selected_atoms = traj.topology.select(selection_str)
traj = traj.atom_slice(selected_atoms)
if args.feature_type == "positions":
print "aligning frames"
traj.superpose(traj)
features = traj.xyz.reshape(traj.n_frames,
traj.n_atoms * 3)
elif args.feature_type == "transformed-dihedrals":
print "computing dihedrals"
_, phi_angles = md.compute_phi(traj,
periodic=False)
_, psi_angles = md.compute_psi(traj,
periodic=False)
phi_sin = np.sin(phi_angles)
phi_cos = np.cos(phi_angles)
psi_sin = np.sin(psi_angles)
psi_cos = np.cos(psi_angles)
features = np.hstack([phi_sin,
phi_cos,
psi_sin,
psi_cos])
elif args.feature_type == "transformed-dihedrals-chi":
print "computing dihedrals"
_, phi_angles = md.compute_phi(traj,
periodic=False)
_, psi_angles = md.compute_psi(traj,
periodic=False)
_, chi_angles = md.compute_chi1(traj,
periodic=False)
phi_sin = np.sin(phi_angles)
phi_cos = np.cos(phi_angles)
psi_sin = np.sin(psi_angles)
psi_cos = np.cos(psi_angles)
chi_sin = np.sin(chi_angles)
chi_cos = np.cos(chi_angles)
features = np.hstack([phi_sin,
phi_cos,
psi_sin,
psi_cos,
chi_sin,
chi_cos])
elif args.feature_type == "residue-residue-distances":
print "computing residue-residue distances"
features, _ = md.compute_contacts(traj,
scheme="ca",
periodic=False)
elif args.feature_type == "inverse-residue-residue-distances":
print "computing inverse residue-residue distances"
features, _ = md.compute_contacts(traj,
scheme="ca",
periodic=False)
features = np.reciprocal(features)
else:
raise Exception, "Unknown feature type '%s'", args.features
return features, args.feature_type
def train_model(args):
features, feature_type = extract_features(args)
print "Fitting %s model" % args.model
if args.model == "PCA":
model = PCA(n_components = args.n_components)
model_type = PCA_MODEL
projected = model.fit_transform(features)
elif args.model == "SVD":
model = TruncatedSVD(n_components = args.n_components)
model_type = SVD_MODEL
projected = model.fit_transform(features)
elif args.model == "ICA":
model = FastICA(n_components = args.n_components)
model_type = ICA_MODEL
projected = model.fit_transform(features)
elif args.model == "tICA":
model = tICA(n_components = args.n_components,
kinetic_mapping=True,
lag_time = args.lag_time)
model_type = TICA_MODEL
projected = model.fit_transform([features])[0]
else:
raise Exception, "Unknown model type '%s'", args.model
print "Writing model"
model = { LAG_TIME_KEY : args.lag_time,
MODEL_TYPE_KEY : model_type,
MODEL_KEY : model,
PROJECTION_KEY : projected,
FEATURE_TYPE_KEY : feature_type }
joblib.dump(model, args.model_file)
def explained_variance_analysis(args):
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
data = joblib.load(args.model_file)
model = data[MODEL_KEY]
plt.clf()
plt.grid(True)
plt.plot(model.explained_variance_ratio_, "m.-")
plt.xlabel("Principal Component", fontsize=16)
plt.ylabel("Explained Variance Ratio", fontsize=16)
plt.ylim([0., 1.])
fig_flname = os.path.join(args.figures_dir, "explained_variance_ratios.png")
plt.savefig(fig_flname,
DPI=300)
def timescale_analysis(args):
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
data = joblib.load(args.model_file)
if data[MODEL_TYPE_KEY] != TICA_MODEL:
raise Exception, "Timescales can only be calculated for tICA"
model = data[MODEL_KEY]
lag_time = data[LAG_TIME_KEY]
timescales = np.abs(model.timescales_ * args.timestep)
for ts in timescales:
plt.semilogy([0, 1],
[ts, ts],
"k-")
plt.ylabel("Timescale (ns, log10)", fontsize=16)
plt.xlim([0., 1.])
plt.ylim([np.power(10., np.floor(min(np.log10(timescales)))),
np.power(10., np.ceil(max(np.log10(timescales))))])
fig_flname = os.path.join(args.figures_dir, "timescales.png")
plt.savefig(fig_flname,
DPI=300)
def pairwise(iterable):
iterable = iter(iterable)
try:
while True:
a = next(iterable)
b = next(iterable)
yield a, b
except StopIteration:
pass
def plot_projections(args):
if len(args.pairs) % 2 != 0:
print "Error: PCs must be provided in pairs of 2"
sys.exit(1)
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
model = joblib.load(args.model_file)
projected = model[PROJECTION_KEY]
# avoid affecting styles of other plots
import seaborn as sns
for p1, p2 in pairwise(args.pairs):
plt.clf()
sns.kdeplot(projected[:, p1],
projected[:, p2])
plt.xlabel("Component %s" % p1, fontsize=16)
plt.ylabel("Component %s" % p2, fontsize=16)
plt.tight_layout()
fig_flname = os.path.join(args.figures_dir,
"component_projection_%s_%s.png" % (str(p1), str(p2)))
plt.savefig(fig_flname,
DPI=300)
def plot_projected_timeseries(args):
model = joblib.load(args.model_file)
projected = model[PROJECTION_KEY]
for dim in args.dimensions:
plt.plot(projected[:, dim],
label=str(dim))
plt.xlabel("Time (frames)", fontsize=16)
plt.ylabel("Projected Value", fontsize=16)
plt.tight_layout()
plt.legend()
fig_flname = os.path.join(args.figures_dir,
"projected_timeseries")
for dim in args.dimensions:
fig_flname += "_%s" % dim
fig_flname += ".png"
plt.savefig(fig_flname,
DPI=300)
def parseargs():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode")
comp_parser = subparsers.add_parser("train-model",
help="Train model")
comp_parser.add_argument("--n-components",
type=int,
required=True,
help="Number of PCs to compute")
comp_parser.add_argument("--pdb-file",
type=str,
required=True,
help="Input PDB file")
comp_parser.add_argument("--input-traj",
type=str,
required=True,
help="Input trajectory file")
comp_parser.add_argument("--model-file",
type=str,
required=True,
help="File to which to save model")
comp_parser.add_argument("--feature-type",
type=str,
required=True,
choices=["positions",
"transformed-dihedrals",
"transformed-dihedrals-chi",
"residue-residue-distances",
"inverse-residue-residue-distances"],
help="feature-type")
comp_parser.add_argument("--model",
type=str,
required=True,
choices=["PCA",
"SVD",
"ICA",
"tICA"],
help="model type")
comp_parser.add_argument("--lag-time",
type=int,
default=1,
help="Subsample trajectory")
comp_parser.add_argument("--select-residues",
type=str,
default=None,
help="Specify subset of residues")
eva_parser = subparsers.add_parser("explained-variance-analysis",
help="Plot explained variances of PCs")
eva_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
eva_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
ts_parser = subparsers.add_parser("timescale-analysis",
help="Plot tICA timescales")
ts_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
ts_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
ts_parser.add_argument("--timestep",
type=float,
required=True,
help="Elapsed time between frames")
proj_parser = subparsers.add_parser("plot-projections",
help="Plot structures onto projections")
proj_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
proj_parser.add_argument("--pairs",
type=int,
nargs="+",
required=True,
help="Pairs of PCs to plot")
proj_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
proj_ts_parser = subparsers.add_parser("plot-projected-timeseries",
help="Plot projections over time")
proj_ts_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
proj_ts_parser.add_argument("--dimensions",
type=int,
nargs="+",
required=True,
help="Dimensions to plot")
proj_ts_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
if args.mode == "train-model":
train_model(args)
elif args.mode == "explained-variance-analysis":
explained_variance_analysis(args)
elif args.mode =="timescale-analysis":
timescale_analysis(args)
elif args.mode == "plot-projections":
plot_projections(args)
elif args.mode == "plot-projected-timeseries":
plot_projected_timeseries(args)
else:
print "Unknown mode '%s'" % args.mode
sys.exit(1) | crewman_daniels/component_analysis.py | import argparse
from itertools import combinations
import os
import sys
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
import mdtraj as md
import numpy as np
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.externals import joblib
from msmbuilder.decomposition import tICA
MODEL_TYPE_KEY = "model-type"
PCA_MODEL = "pca"
SVD_MODEL = "svd"
ICA_MODEL = "ica"
TICA_MODEL = "tica"
MODEL_KEY = "model"
PROJECTION_KEY = "projected-coordinates"
LAG_TIME_KEY = "lag-time"
FEATURE_TYPE_KEY = "feature-type"
def extract_features(args):
print "reading trajectory"
traj = md.load(args.input_traj,
top=args.pdb_file)
if args.select_residues:
selections = []
ranges = args.select_residues.split(",")
for range_ in args.select_residues.split(","):
if "-" in range_:
left, right = map(int, range_.split("-"))
selections.append("(residue %s to %s)" % (left, right))
else:
singleton = int(range_)
selections.append("(residue %s)" % singleton)
selection_str = " or ".join(selections)
selected_atoms = traj.topology.select(selection_str)
traj = traj.atom_slice(selected_atoms)
if args.feature_type == "positions":
print "aligning frames"
traj.superpose(traj)
features = traj.xyz.reshape(traj.n_frames,
traj.n_atoms * 3)
elif args.feature_type == "transformed-dihedrals":
print "computing dihedrals"
_, phi_angles = md.compute_phi(traj,
periodic=False)
_, psi_angles = md.compute_psi(traj,
periodic=False)
phi_sin = np.sin(phi_angles)
phi_cos = np.cos(phi_angles)
psi_sin = np.sin(psi_angles)
psi_cos = np.cos(psi_angles)
features = np.hstack([phi_sin,
phi_cos,
psi_sin,
psi_cos])
elif args.feature_type == "transformed-dihedrals-chi":
print "computing dihedrals"
_, phi_angles = md.compute_phi(traj,
periodic=False)
_, psi_angles = md.compute_psi(traj,
periodic=False)
_, chi_angles = md.compute_chi1(traj,
periodic=False)
phi_sin = np.sin(phi_angles)
phi_cos = np.cos(phi_angles)
psi_sin = np.sin(psi_angles)
psi_cos = np.cos(psi_angles)
chi_sin = np.sin(chi_angles)
chi_cos = np.cos(chi_angles)
features = np.hstack([phi_sin,
phi_cos,
psi_sin,
psi_cos,
chi_sin,
chi_cos])
elif args.feature_type == "residue-residue-distances":
print "computing residue-residue distances"
features, _ = md.compute_contacts(traj,
scheme="ca",
periodic=False)
elif args.feature_type == "inverse-residue-residue-distances":
print "computing inverse residue-residue distances"
features, _ = md.compute_contacts(traj,
scheme="ca",
periodic=False)
features = np.reciprocal(features)
else:
raise Exception, "Unknown feature type '%s'", args.features
return features, args.feature_type
def train_model(args):
features, feature_type = extract_features(args)
print "Fitting %s model" % args.model
if args.model == "PCA":
model = PCA(n_components = args.n_components)
model_type = PCA_MODEL
projected = model.fit_transform(features)
elif args.model == "SVD":
model = TruncatedSVD(n_components = args.n_components)
model_type = SVD_MODEL
projected = model.fit_transform(features)
elif args.model == "ICA":
model = FastICA(n_components = args.n_components)
model_type = ICA_MODEL
projected = model.fit_transform(features)
elif args.model == "tICA":
model = tICA(n_components = args.n_components,
kinetic_mapping=True,
lag_time = args.lag_time)
model_type = TICA_MODEL
projected = model.fit_transform([features])[0]
else:
raise Exception, "Unknown model type '%s'", args.model
print "Writing model"
model = { LAG_TIME_KEY : args.lag_time,
MODEL_TYPE_KEY : model_type,
MODEL_KEY : model,
PROJECTION_KEY : projected,
FEATURE_TYPE_KEY : feature_type }
joblib.dump(model, args.model_file)
def explained_variance_analysis(args):
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
data = joblib.load(args.model_file)
model = data[MODEL_KEY]
plt.clf()
plt.grid(True)
plt.plot(model.explained_variance_ratio_, "m.-")
plt.xlabel("Principal Component", fontsize=16)
plt.ylabel("Explained Variance Ratio", fontsize=16)
plt.ylim([0., 1.])
fig_flname = os.path.join(args.figures_dir, "explained_variance_ratios.png")
plt.savefig(fig_flname,
DPI=300)
def timescale_analysis(args):
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
data = joblib.load(args.model_file)
if data[MODEL_TYPE_KEY] != TICA_MODEL:
raise Exception, "Timescales can only be calculated for tICA"
model = data[MODEL_KEY]
lag_time = data[LAG_TIME_KEY]
timescales = np.abs(model.timescales_ * args.timestep)
for ts in timescales:
plt.semilogy([0, 1],
[ts, ts],
"k-")
plt.ylabel("Timescale (ns, log10)", fontsize=16)
plt.xlim([0., 1.])
plt.ylim([np.power(10., np.floor(min(np.log10(timescales)))),
np.power(10., np.ceil(max(np.log10(timescales))))])
fig_flname = os.path.join(args.figures_dir, "timescales.png")
plt.savefig(fig_flname,
DPI=300)
def pairwise(iterable):
iterable = iter(iterable)
try:
while True:
a = next(iterable)
b = next(iterable)
yield a, b
except StopIteration:
pass
def plot_projections(args):
if len(args.pairs) % 2 != 0:
print "Error: PCs must be provided in pairs of 2"
sys.exit(1)
if not os.path.exists(args.figures_dir):
os.makedirs(args.figures_dir)
model = joblib.load(args.model_file)
projected = model[PROJECTION_KEY]
# avoid affecting styles of other plots
import seaborn as sns
for p1, p2 in pairwise(args.pairs):
plt.clf()
sns.kdeplot(projected[:, p1],
projected[:, p2])
plt.xlabel("Component %s" % p1, fontsize=16)
plt.ylabel("Component %s" % p2, fontsize=16)
plt.tight_layout()
fig_flname = os.path.join(args.figures_dir,
"component_projection_%s_%s.png" % (str(p1), str(p2)))
plt.savefig(fig_flname,
DPI=300)
def plot_projected_timeseries(args):
model = joblib.load(args.model_file)
projected = model[PROJECTION_KEY]
for dim in args.dimensions:
plt.plot(projected[:, dim],
label=str(dim))
plt.xlabel("Time (frames)", fontsize=16)
plt.ylabel("Projected Value", fontsize=16)
plt.tight_layout()
plt.legend()
fig_flname = os.path.join(args.figures_dir,
"projected_timeseries")
for dim in args.dimensions:
fig_flname += "_%s" % dim
fig_flname += ".png"
plt.savefig(fig_flname,
DPI=300)
def parseargs():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode")
comp_parser = subparsers.add_parser("train-model",
help="Train model")
comp_parser.add_argument("--n-components",
type=int,
required=True,
help="Number of PCs to compute")
comp_parser.add_argument("--pdb-file",
type=str,
required=True,
help="Input PDB file")
comp_parser.add_argument("--input-traj",
type=str,
required=True,
help="Input trajectory file")
comp_parser.add_argument("--model-file",
type=str,
required=True,
help="File to which to save model")
comp_parser.add_argument("--feature-type",
type=str,
required=True,
choices=["positions",
"transformed-dihedrals",
"transformed-dihedrals-chi",
"residue-residue-distances",
"inverse-residue-residue-distances"],
help="feature-type")
comp_parser.add_argument("--model",
type=str,
required=True,
choices=["PCA",
"SVD",
"ICA",
"tICA"],
help="model type")
comp_parser.add_argument("--lag-time",
type=int,
default=1,
help="Subsample trajectory")
comp_parser.add_argument("--select-residues",
type=str,
default=None,
help="Specify subset of residues")
eva_parser = subparsers.add_parser("explained-variance-analysis",
help="Plot explained variances of PCs")
eva_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
eva_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
ts_parser = subparsers.add_parser("timescale-analysis",
help="Plot tICA timescales")
ts_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
ts_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
ts_parser.add_argument("--timestep",
type=float,
required=True,
help="Elapsed time between frames")
proj_parser = subparsers.add_parser("plot-projections",
help="Plot structures onto projections")
proj_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
proj_parser.add_argument("--pairs",
type=int,
nargs="+",
required=True,
help="Pairs of PCs to plot")
proj_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
proj_ts_parser = subparsers.add_parser("plot-projected-timeseries",
help="Plot projections over time")
proj_ts_parser.add_argument("--figures-dir",
type=str,
required=True,
help="Figure output directory")
proj_ts_parser.add_argument("--dimensions",
type=int,
nargs="+",
required=True,
help="Dimensions to plot")
proj_ts_parser.add_argument("--model-file",
type=str,
required=True,
help="File from which to load model")
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
if args.mode == "train-model":
train_model(args)
elif args.mode == "explained-variance-analysis":
explained_variance_analysis(args)
elif args.mode =="timescale-analysis":
timescale_analysis(args)
elif args.mode == "plot-projections":
plot_projections(args)
elif args.mode == "plot-projected-timeseries":
plot_projected_timeseries(args)
else:
print "Unknown mode '%s'" % args.mode
sys.exit(1) | 0.424173 | 0.293797 |
from collections import defaultdict
from decimal import Decimal
from django.db import models, transaction
from django.db.models import Count
from django.db.models.fields.reverse_related import ForeignObjectRel
from article.models import ArticleType, OrProductType
from blame.models import ImmutableBlame, Blame
from crm.models import User
from money.models import CostField, Cost, Currency
from order.models import OrderLine, OrderCombinationLine
from supplier.models import Supplier, ArticleTypeSupplier
from swipe.settings import USED_SUPPLIERORDER_STRATEGY, USED_CURRENCY
from tools.util import raiseif
class SupplierOrder(ImmutableBlame):
"""
Order we place at a supplier
"""
supplier = models.ForeignKey(Supplier, on_delete=models.PROTECT)
def __str__(self):
return "Supplier: {}, User: {}".format(self.supplier, self.user_created)
@staticmethod
def create_supplier_order(user_modified, supplier, articles_ordered=None, allow_different_currency=False):
"""
Checks if supplier order information is correct and orders it at the correct supplier
:param user_modified: user to which the order is authorized
:param supplier: supplier which should order the products
:param articles_ordered:
:type articles_ordered: List[List[ArticleType, int, Cost]]
:param allow_different_currency: If true, removes checks for the currency to see if its the system currency
"""
ordered_dict = SupplierOrder.verify_data_assertions(user_modified, supplier, articles_ordered,
allow_different_currency)
demand_errors = SupplierOrder.verify_article_demand(ordered_dict)
if demand_errors:
err_msg = "Not enough demand for ordered articles: \n"
for article, number in demand_errors:
err_msg += \
" - Article {article} was ordered {number} times, " \
"but only {valid_number} were accounted for. \n".format(
article=article.name,
number=ordered_dict[article],
valid_number=ordered_dict[article]-number
)
raise InsufficientDemandError(err_msg)
# Create supplier order and modify customer orders
distribution = DistributionStrategy.get_strategy_from_string(USED_SUPPLIERORDER_STRATEGY)\
.get_distribution(articles_ordered)
return DistributionStrategy.distribute(user_modified, supplier, distribution, indirect=True)
@staticmethod
def verify_article_demand(articles_ordered=None):
"""
:param articles_ordered:
:type articles_ordered: Dict[ArticleType, int]
:return: List[Tuple[ArticleType, int]]
"""
raiseif(articles_ordered is None,
IncorrectDataError, "I must get articles that are ordered, I cannot check without")
errors = []
to_order = defaultdict(lambda: 0)
stockwish_table_lines = StockWishTableLine.objects.all()
for line in stockwish_table_lines:
to_order[line.article_type] += line.number
combo_order_lines = OrderCombinationLine.get_ol_combinations(state='O', include_price_field=False)
for line in combo_order_lines:
if not hasattr(line.wishable, 'sellabletype') or \
line.wishable.sellabletype is None:
raise UnimplementedError("Or products are not yet supported")
to_order[line.wishable.sellabletype.articletype] += line.number
for article, number in articles_ordered.items():
if to_order[article] < articles_ordered[article]:
errors.append((article, number - to_order[article]))
return errors
@staticmethod
def verify_data_assertions(user, supplier, articles_ordered, allow_different_currency):
"""
Checks basic assertions about the supplied data, including the supplier ability to supply the specified products
:param user: user to which the order is authorized
:param supplier: supplier which should order the products
:param articles_ordered
:type articles_ordered: List[List[ArticleType, int]]
:param allow_different_currency
"""
raiseif(not user, IncorrectDataError, "You must supply me with a user which does this action")
raiseif(not articles_ordered, IncorrectDataError, "You must supply me with articles that are being ordered")
raiseif(not isinstance(user, User), IncorrectDataError, "user must be a User")
# Ensure that the number of articles ordered is not less than 0
ordered_dict = defaultdict(lambda: 0)
for article, number, cost in articles_ordered:
raiseif(not isinstance(article, ArticleType),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
raiseif(not isinstance(number, int), IncorrectDataError,
"articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
raiseif(not isinstance(cost, Cost), IncorrectDataError,
"articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
if not allow_different_currency:
raiseif(cost.currency.iso != USED_CURRENCY,
IncorrectDataError,
"You can only use currency {} with the current settings".format(USED_CURRENCY))
raiseif(number <= 0, IncorrectDataError, "You cannot order negative amounts of products")
ordered_dict[article] += number
raiseif(not ArticleTypeSupplier.objects.get(article_type=article, supplier=supplier),
IncorrectDataError, "Article does not (yet) exist at supplier")
return ordered_dict
class SupplierOrderLine(Blame):
"""
Single ArticleType ordered at supplier and contained in a SupplierOrder. Can be linked to a Customers OrderLines or
be left empty for stock.
"""
# The document containing all these supplierOrderLines
supplier_order = models.ForeignKey(SupplierOrder, on_delete=models.PROTECT)
# An articleType. Must match the supplierArticleType
article_type = models.ForeignKey(ArticleType, on_delete=models.PROTECT)
# The articleType as the supplier knows it. Must match our own articleType
supplier_article_type = models.ForeignKey(ArticleTypeSupplier, on_delete=models.PROTECT)
# An orderLine to fulfill the wish of a customer for a product. Null for stockwish(anonymously)
order_line = models.ForeignKey(OrderLine, null=True, on_delete=models.PROTECT)
# The amount of money we are going to pay for this product excluding all taxes
line_cost = CostField()
# A state indicating if the customer order is completed yet.
state = models.CharField(max_length=5)
def __str__(self):
if not hasattr(self, 'supplier_order')or self.supplier_order is None:
supplier_order = "None"
else:
supplier_order = self.supplier_order.pk
if not hasattr(self, 'article_type') or self.article_type is None:
article_type = "None"
else:
article_type = str(self.article_type)
if not hasattr(self, 'supplier_article_type') or self.supplier_article_type is None:
supplier_article_type = "None"
else:
supplier_article_type = str(self.supplier_article_type.pk)
if not hasattr(self, 'order_line') or self.order_line is None:
order_line = "None"
else:
order_line = str(self.order_line.pk)
if not hasattr(self, 'line_cost') or self.line_cost is None:
line_cost = "None"
else:
line_cost = str(self.line_cost)
if not hasattr(self, 'state') or self.state is None:
state = "None"
else:
state = self.state
stri = "SupplierOrder: {}, ArticleType: {}, " \
"SupplierArticleType: {}, OrderLine: {}, Cost: {}, State: {}".format(supplier_order,
article_type,
supplier_article_type,
order_line, line_cost, state)
return stri
@transaction.atomic
def save(self, *args, **kwargs):
if self.order_line is not None:
if isinstance(self.order_line.wishable, OrProductType):
raiseif(
not ArticleType.objects.filter(
orproducttype__id=self.order_line.id,
id=self.article_type.id).exists(),
InvalidDataError, "Ordered article is not known to ordered OrProduct")
else:
# Customer article matches ordered article
raiseif(not self.order_line.wishable.sellabletype.articletype == self.article_type,
InvalidDataError, "The order's article type is not this line's ArticleType")
# +1 query for customer ordered lines
checked_ats = False
if not hasattr(self, 'supplier_article_type') or self.supplier_article_type is None:
sup_art_types = ArticleTypeSupplier.objects.filter(
article_type=self.article_type,
supplier=self.supplier_order.supplier)
# shouldn't get triggered, but +1 query
checked_ats = True
raiseif(len(sup_art_types) != 1, InvalidDataError, "There can only be one SupplierArticleType")
if not checked_ats: # should happen all the time
raiseif(self.supplier_article_type.supplier != self.supplier_order.supplier,
InvalidDataError, "The supplier_order's supplier must be the supplier of the "
"supplier_article_type") # Article can be ordered at supplier
# +2 query to get the supplier from the supplier_article_type and supplier_order
raiseif(self.supplier_article_type != ArticleTypeSupplier.objects
.get(article_type=self.article_type,
supplier=self.supplier_order.supplier),
InvalidDataError, "The supplier_article_type must be ") # optional +1 for article type
# Set the relevant state is not implemented
if self.pk is None:
self.state = 'O'
raiseif(self.state not in SupplierOrderState.STATE_CHOICES, InvalidDataError)
# Assert that everything is ok here
if self.pk is None:
if self.order_line is not None:
self.order_line.order_at_supplier(self.supplier_order.user_created)
# ^ If this doesn't happen at exactly the same time
# as the save of the SupOrdLn, you are screwed
# +1 query for the user_created from supplier_order
else:
StockWishTable.remove_products_from_table(self.user_modified, article_type=self.article_type, number=1,
supplier_order=self.supplier_order, stock_wish=None,
indirect=True)
# +1 query to remove one product from the stockwishtable, or to change the state of our order_line
super(SupplierOrderLine, self).save(**kwargs)
# +1 query to save the SOL itself
sos = SupplierOrderState(supplier_order_line=self, state=self.state, user_modified=self.user_modified)
sos.save()
# +1 query to save the state transition
else:
# Maybe some extra logic here?
super(SupplierOrderLine, self).save(**kwargs)
@transaction.atomic()
def transition(self, new_state, user_modified):
"""
Transitions an orderline from one state to another. This is the only safe means of transitioning, as data
integrity can not be guaranteed otherwise. Transitioning is only possible with objects stored in the database.
"""
if not self.pk or self.state is None:
raise ObjectNotSavedError()
elif self.state not in SupplierOrderState.STATE_CHOICES:
raise IncorrectStateError("State of orderline is not valid. Database is corrupted at Orderline", self.pk,
" with state ", self.state)
elif new_state not in SupplierOrderState.STATE_CHOICES:
raise IncorrectTransitionError("New state is not a valid state")
else:
nextstates = {
'O': ('B', 'C', 'A'),
'B': ('A', 'C')}
if new_state in nextstates[self.state]:
self.state = new_state
self.user_modified = user_modified
sols = SupplierOrderState(state=new_state, supplier_order_line=self, user_modified=user_modified)
sols.save()
self.save()
else:
raise IncorrectTransitionError(
"This transaction is not legal: {state} -> {new_state}".format(state=self.state,
new_state=new_state))
@staticmethod
def bulk_create_supplierorders(supplier_orderlines, supplier_order: SupplierOrder, user: User):
"""
Creates supplierOrderLines in bulk with one transaction. This should not be called directly as it contains
no checks for speed purposes. These checks are done in the main creation function so use that one for
the creation of supplierOrderLines.
:param supplier_orderlines:
:type supplier_orderlines: list[SupplierOrderLine]
:param supplier_order:
:param user:
:return:
"""
sol_states = []
ol_transitions = [] # type: list[OrderLine]
remove_from_stock_wishes = defaultdict(lambda: 0)
for sol in supplier_orderlines:
sol.supplier_order = supplier_order
sol.user_created = user
sol.user_modified = user
sol.state = 'O'
# Explicitly do not check if the articleType matches the articleType of the OrderLine
# Explicitly do not check if the supplierArticleType is set and matches the articleType
# Explicity do not check if the supplier can supply the article
if not sol.order_line:
remove_from_stock_wishes[sol.article_type] += 1
else:
ol_transitions.append(sol.order_line)
with transaction.atomic():
for art in remove_from_stock_wishes:
# Remove all products from table one by one
StockWishTable.remove_products_from_table(user, article_type=art, number=remove_from_stock_wishes[art],
supplier_order=supplier_order, stock_wish=None,
indirect=True)
SupplierOrderLine.objects.bulk_create(supplier_orderlines)
sols_nw = SupplierOrderLine.objects.filter(supplier_order=supplier_order)
for sol in sols_nw:
sol_states.append(SupplierOrderState(supplier_order_line=sol, state=sol.state,
user_modified=user, user_created=user))
SupplierOrderState.objects.bulk_create(sol_states)
for ol in ol_transitions:
ol.order_at_supplier(user)
def send_to_backorder(self, user_modified):
self.transition('B', user_modified)
@transaction.atomic()
def mark_as_arrived(self, user_modified):
if self.order_line is not None:
self.order_line.arrive_at_store(user_modified)
self.transition('A', user_modified)
@transaction.atomic
def cancel_line(self, user_modified, cancel_order=False):
# Has orderline
if self.order_line is not None:
# Either cancel the order outright or revert to basic state
if cancel_order:
self.order_line.cancel(user_modified)
else:
self.order_line.return_back_to_ordered_by_customer(user_modified)
else:
if not cancel_order:
StockWishTable.add_products_to_table(user_modified=user_modified, number=1,
indirect=True, article_type=self.article_type,
supplier_order=self.supplier_order)
self.transition('C', user_modified)
class SupplierOrderState(ImmutableBlame):
"""
A state log of a supplierOrderLine. The static lists indicate which states are available and
what they mean. This also indicates which states are in transit and which are closed.
"""
STATE_CHOICES = ('O', 'B', 'C', 'A')
STATE_CHOICES_MEANING = {'O': 'Ordered at supplier', 'B': 'Backorder', 'C': 'Cancelled',
'A': 'Arrived at store'}
OPEN_STATES = ('O', 'B')
CLOSED_STATES = ('C', 'A')
timestamp = models.DateTimeField(auto_now_add=True)
supplier_order_line = models.ForeignKey(SupplierOrderLine, on_delete=models.PROTECT)
state = models.CharField(max_length=5)
class StockWish(ImmutableBlame):
"""
Combination of wishes for ArticleTypes to be ordered at supplier.
"""
timestamp = models.DateTimeField(auto_now_add=True)
@staticmethod
@transaction.atomic
def create_stock_wish(user_modified, articles_ordered):
"""
Creates stock wishes integrally, this function is the preferred way of creating stock wishes
:param user_modified: User to be connected to the stockwish
:type user_modified: User
:param articles_ordered: tuples containing both ArticleTypes and a non-zero integer
:type articles_ordered:
:return:
"""
raiseif(user_modified is None, IncorrectDataError, "You must provide me with a user_modified")
raiseif(len(articles_ordered) == 0, IncorrectDataError, "You must order at least 1 article to save")
raiseif(not isinstance(user_modified, User), IncorrectDataError, "The user_modified argument must be a User")
for article, number in articles_ordered:
raiseif(not isinstance(article, ArticleType),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int]")
raiseif(not isinstance(number, int),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int]")
raiseif(number == 0, IncorrectDataError, "You may not order zero articles")
stock_wish = StockWish(user_modified=user_modified)
stock_wish.save()
for article, number in articles_ordered:
if number < 0:
StockWishTable.remove_products_from_table(
user_modified,
article,
-number,
indirect=True,
stock_wish=stock_wish,
supplier_order=None
)
else:
StockWishTable.add_products_to_table(
user_modified,
article,
number,
indirect=True,
stock_wish=stock_wish,
supplier_order=None
)
return stock_wish
class StockWishTableLine(Blame):
"""
Single line of all combined present wishes for a single ArticleType. Will be modified by StockWishes
and SupplierOrders.
"""
article_type = models.OneToOneField(ArticleType, on_delete=models.PROTECT)
number = models.IntegerField(default=0)
def save(self, indirect=False, *args, **kwargs):
raiseif(not indirect,
IndirectionError,
"StockWishTableLine must be called indirectly from StockWishTable")
super(StockWishTableLine, self).save(**kwargs)
class StockWishTable:
"""
Helper methods for creating Stock Wishes. Let functions that modify the stock wish table call these functions
"""
@staticmethod
def add_products_to_table(user_modified, article_type, number, indirect=False,
stock_wish=None, supplier_order=None):
raiseif(number <= 0, IncorrectDataError, "Number of products to add to table must be bigger than 0")
if not indirect:
raise IndirectionError("add_products_to_table must be called indirectly")
article_type_status = StockWishTableLine.objects.filter(article_type=article_type)
if len(article_type_status) == 0:
swtl = StockWishTableLine(article_type=article_type, number=number, user_modified=user_modified)
swtl.save(indirect=indirect)
log = StockWishTableLog(
number=number,
article_type=article_type,
stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
else:
article_type_status[0].number += number
article_type_status[0].save(indirect=indirect)
log = StockWishTableLog(
number=number,
article_type=article_type,
stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
@staticmethod
def remove_products_from_table(user_modified, article_type, number, indirect=False,
stock_wish=None, supplier_order=None):
raiseif(number <= 0, IncorrectDataError, "number of products to remove from table must be bigger than 0")
if not indirect:
raise IndirectionError("remove_products_from_table must be called indirectly")
article_type_statuses = StockWishTableLine.objects.filter(article_type=article_type)
if not article_type_statuses:
return
article_type_status = article_type_statuses[0]
if article_type_status.number - number < 0:
raise CannotRemoveFromWishTableError("For articleType, tried to remove {} from WishTable,"
"but only {} is present".format(number, article_type_status.number))
else:
if article_type_status.number - number == 0:
article_type_status.delete()
else:
article_type_status.number -= number
article_type_status.save(indirect=indirect)
log = StockWishTableLog(number=-number, article_type=article_type, stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
class StockWishTableLog(ImmutableBlame):
"""
Log of all edits of the stock wish. This logs which articleType is modified and by what amount and for which reason.
"""
# The modification count to the stock
number = models.IntegerField()
# The article type which is modified
article_type = models.ForeignKey(ArticleType, on_delete=models.PROTECT)
# A possible reason of the modification of the StockWishTable. If set, a SupplierOrder modded the StockWishTable.
# If set, stock_wish must be unset
supplier_order = models.ForeignKey(SupplierOrder, null=True, on_delete=models.PROTECT)
# A possible reason of the modification of the StockWishTable. If set, a StockWish modded the StockWishTable.
# If set, supplier_order must be unset
stock_wish = models.ForeignKey(StockWish, null=True, on_delete=models.PROTECT)
def save(self, indirect=False):
raiseif(not indirect, IndirectionError, "Saving must be done indirectly")
raiseif(self.supplier_order and self.stock_wish,
TooManyReasonsError, "With two reasons to order this product, "
"Choose either a supplier order or a stock wish")
raiseif(not (self.supplier_order or self.stock_wish),
NotEnoughReasonError, "Supply a reason for this modification")
# ^ reason is either supplier order or stock wish modification
super(StockWishTableLog, self).save()
def __str__(self):
if self.supplier_order is None:
sup_ord = "None"
else:
sup_ord = self.supplier_order.pk
if self.stock_wish is None:
stw = "None"
else:
stw = self.stock_wish.pk
return "{} x {}, SupplierOrder: {}, StockWish: {}".format(self.article_type, self.number, sup_ord, stw)
class SupplierOrderCombinationLine:
"""
A helper class to group SupplierOrderLines together based on shared properties. This allows for quick summaries
where summation of all lines was a bad alternative.
"""
number = 0
article_type = ArticleType
cost = CostField
state = ""
def __init__(self, number, article_type, cost, state):
self.number = number
self.article_type = article_type
self.cost = cost
self.state = state
def __str__(self):
dec = self.cost.amount.quantize(Decimal('0.01'))
stri = "{:<7}{:14}{:10}{:12}".format(self.number, self.article_type.name, str(self.cost.currency) + str(dec),
SupplierOrderState.STATE_CHOICES_MEANING[self.state])
return stri
@staticmethod
def prefix_field_names(model, prefix):
# noinspection PyProtectedMember
fields = model._meta.get_fields()
ret = []
for field in fields:
if not isinstance(field, ForeignObjectRel):
ret.append(prefix + field.name)
return ret
@staticmethod
def get_sol_combinations(supplier_order=None, article_type=None, state=None, qs=SupplierOrderLine.objects,
include_price_field=True, supplier=None):
result = []
filtr = {}
if supplier_order:
filtr['supplier_order'] = supplier_order
if article_type:
filtr['article_type'] = article_type
if state:
filtr['state'] = state
if supplier:
filtr['supplier_order__supplier'] = supplier
price_fields = []
if include_price_field:
price_fields = ['line_cost', 'line_cost_currency']
flds = price_fields + SupplierOrderCombinationLine.prefix_field_names(ArticleType, 'article_type__')
supplierorderlines = qs.filter(**filtr). \
values('state', *flds).annotate(Count('id'))
for o in supplierorderlines:
number = o['id__count']
state = o['state']
if not include_price_field:
amount = Decimal(-1)
currency = Currency(iso=USED_CURRENCY)
else:
amount = o['line_cost']
currency = Currency(iso=o['line_cost_currency'])
cost = Cost(amount=amount, currency=currency)
socl = SupplierOrderCombinationLine(number=number,
article_type=ArticleType(name=o['article_type__name'],
pk=o['article_type__id']),
cost=cost,
state=state)
result.append(socl)
return result
class DistributionStrategy:
"""
An interface for a consistent way of deciding the distribution of the products ordered at our suppliers. Also
contains a distributionfunction that actually handles the actual distribution of the articles for users who
prefer a manual way of operation.
"""
@staticmethod
def get_strategy_from_string(strategy):
if strategy == "IndiscriminateCustomerStockStrategy":
return IndiscriminateCustomerStockStrategy
else:
raise UnimplementedError("Strategy not implemented")
@staticmethod
def distribute(user, supplier, distribution, indirect=False):
"""
Creates the supplier order and distributes the SupplierOrderLines to any orders
:param user: a User for the SupplierOrder
:param supplier: Supplier for the SupplierOrder
:param distribution: A list of SupplierOrderLines
:param indirect: Indirection flag. Function must be called indirectly.
"""
raiseif(not isinstance(user, User), IncorrectDataError, "argument user is not instance of User")
raiseif(not isinstance(supplier, Supplier), IncorrectDataError, "argument supplier is not instance of Supplier")
raiseif(not indirect, IndirectionError, "Distribute must be called indirectly")
raiseif(not distribution, IncorrectDataError, "distribution is not supplied")
supplier_order = SupplierOrder(user_modified=user, supplier=supplier)
articles = set()
article_type_suppliers = {}
for supplier_order_line in distribution:
raiseif(
not isinstance(supplier_order_line, SupplierOrderLine),
IncorrectDataError, "argument distribution does not only contain SupplierOrderLine")
raiseif(not (supplier_order_line.order_line is None or
isinstance(supplier_order_line.order_line, OrderLine)),
IncorrectDataError, "supplier order line's order line link is not instance of OrderLine")
articles.add(supplier_order_line.article_type)
if supplier_order_line.order_line is not None:
# Discount the possibility of OrProducts for now
raiseif(supplier_order_line.article_type_id !=
supplier_order_line.order_line.wishable_id,
IncorrectDataError, "SupplierOrderLine's article type is not the same type as it's linked"
"OrderLine")
art_sup_types = ArticleTypeSupplier.objects.filter(article_type__in=articles, supplier=supplier)
for ats in art_sup_types:
article_type_suppliers[ats.article_type] = ats
# Add articleTypeSuppliers all at once
for supplier_order_line in distribution:
ats = article_type_suppliers.get(supplier_order_line.article_type)
if ats is None:
raise IncorrectDataError("Article {} does not "
"have an ArticleTypeSupplier".format(supplier_order_line.article_type))
supplier_order_line.supplier_article_type = ats
# We've checked everything, now we start saving
with transaction.atomic():
supplier_order.save()
SupplierOrderLine.bulk_create_supplierorders(distribution, supplier_order, user)
return supplier_order
@staticmethod
def get_distribution(article_type_number_combos):
"""
Proposes a distribution according to the specific strategy. Assume supply is not bigger than demand
:param article_type_number_combos: List[ArticleType, number, Cost]
:return: A list containing SupplierOrderLines
"""
raise UnimplementedError("Super distribution class has no implementation")
class IndiscriminateCustomerStockStrategy(DistributionStrategy):
"""
Prioritises the customers first by primary key of orderline, and then the stock.
"""
@staticmethod
def get_distribution(article_type_number_combos):
distribution = []
articletype_dict = defaultdict(lambda: 0)
for articletype, number, cost in article_type_number_combos:
articletype_dict[articletype] += number
articletypes = articletype_dict.keys()
relevant_orderlines = OrderLine.objects.filter(state='O', wishable__in=articletypes).order_by('pk')
# Match the orders one-by-one, stopping when all orders and wishes are fulfilled or article from the
# article type number combos run out
articletype_dict_supply = articletype_dict.copy()
for orderline in relevant_orderlines:
# Discount the possibility for OrProducts for now
if hasattr(orderline.wishable, 'sellabletype') and hasattr(orderline.wishable.sellabletype, 'articletype'):
if articletype_dict_supply[orderline.wishable.sellabletype.articletype] > 0:
sup_ord_line = SupplierOrderLine(article_type=orderline.wishable.sellabletype.articletype,
order_line=orderline, line_cost=None)
distribution.append(sup_ord_line)
articletype_dict_supply[orderline.wishable.sellabletype.articletype] -= 1
stock_wishes = StockWishTableLine.objects.filter(article_type__in=articletypes)
for wish in stock_wishes:
# Assert not more supply than demand
raiseif(wish.number < articletype_dict_supply[wish.article_type],
InsufficientDemandError, "there is not enough demand to order this many articles")
if articletype_dict_supply[wish.article_type] > 0:
for i in range(0, articletype_dict_supply[wish.article_type]):
sup_ord_line = SupplierOrderLine(article_type=wish.article_type, line_cost=None)
distribution.append(sup_ord_line)
articletype_dict_supply[wish.article_type] = 0
# Now connect the cost.
# Unfortunately, its n^2. This can be done more efficiently using maps, this should be worked out
# sat a later date.
cost_counter = article_type_number_combos.copy()
for single_counter in cost_counter:
ARTICLE_TYPE_LOCATION = 0
ARTICLE_TYPE_NUMBER_LOCATION = 1
ARTICLE_TYPE_COST_LOCATION = 2
while single_counter[ARTICLE_TYPE_NUMBER_LOCATION] > 0:
for supplier_order_line in distribution:
if supplier_order_line.article_type == single_counter[ARTICLE_TYPE_LOCATION] and \
(supplier_order_line.line_cost is None):
supplier_order_line.line_cost = single_counter[ARTICLE_TYPE_COST_LOCATION]
single_counter[ARTICLE_TYPE_NUMBER_LOCATION] -= 1
break
return distribution
class UnimplementedError(Exception):
"""
Used for still unimplemented features in the logistics scope.
"""
pass
class CannotRemoveFromWishTableError(Exception):
"""
The system tries to remove more from the WishTable than there is present. This is not consistent.
"""
pass
class IndirectionError(Exception):
"""
Thrown when a function is abusively used in an indirect manner(indirect-flag).
"""
pass
class InsufficientDemandError(Exception):
"""
There is more supply than demand.
"""
pass
class ObjectNotSavedError(Exception):
"""
A transition is attempted on an unsaved object.
"""
pass
class IncorrectStateError(Exception):
"""
An incorrect state is supplied.
"""
pass
class IncorrectTransitionError(Exception):
"""
An illegal transition is attempted.
"""
pass
class IncorrectDataError(Exception):
"""
Data is supplied in an incorrect manner or type.
"""
pass
class TooManyReasonsError(Exception):
"""
Two reasons were supplied for modifying the wishTable.
"""
pass
class NotEnoughReasonError(Exception):
"""
No reason was supplied for modifying the wishTable.
"""
pass
class InvalidDataError(Exception):
"""
Data is supplied that, after further inspection, does not meet the specified criteria.
"""
pass | backend/logistics/models.py | from collections import defaultdict
from decimal import Decimal
from django.db import models, transaction
from django.db.models import Count
from django.db.models.fields.reverse_related import ForeignObjectRel
from article.models import ArticleType, OrProductType
from blame.models import ImmutableBlame, Blame
from crm.models import User
from money.models import CostField, Cost, Currency
from order.models import OrderLine, OrderCombinationLine
from supplier.models import Supplier, ArticleTypeSupplier
from swipe.settings import USED_SUPPLIERORDER_STRATEGY, USED_CURRENCY
from tools.util import raiseif
class SupplierOrder(ImmutableBlame):
"""
Order we place at a supplier
"""
supplier = models.ForeignKey(Supplier, on_delete=models.PROTECT)
def __str__(self):
return "Supplier: {}, User: {}".format(self.supplier, self.user_created)
@staticmethod
def create_supplier_order(user_modified, supplier, articles_ordered=None, allow_different_currency=False):
"""
Checks if supplier order information is correct and orders it at the correct supplier
:param user_modified: user to which the order is authorized
:param supplier: supplier which should order the products
:param articles_ordered:
:type articles_ordered: List[List[ArticleType, int, Cost]]
:param allow_different_currency: If true, removes checks for the currency to see if its the system currency
"""
ordered_dict = SupplierOrder.verify_data_assertions(user_modified, supplier, articles_ordered,
allow_different_currency)
demand_errors = SupplierOrder.verify_article_demand(ordered_dict)
if demand_errors:
err_msg = "Not enough demand for ordered articles: \n"
for article, number in demand_errors:
err_msg += \
" - Article {article} was ordered {number} times, " \
"but only {valid_number} were accounted for. \n".format(
article=article.name,
number=ordered_dict[article],
valid_number=ordered_dict[article]-number
)
raise InsufficientDemandError(err_msg)
# Create supplier order and modify customer orders
distribution = DistributionStrategy.get_strategy_from_string(USED_SUPPLIERORDER_STRATEGY)\
.get_distribution(articles_ordered)
return DistributionStrategy.distribute(user_modified, supplier, distribution, indirect=True)
@staticmethod
def verify_article_demand(articles_ordered=None):
"""
:param articles_ordered:
:type articles_ordered: Dict[ArticleType, int]
:return: List[Tuple[ArticleType, int]]
"""
raiseif(articles_ordered is None,
IncorrectDataError, "I must get articles that are ordered, I cannot check without")
errors = []
to_order = defaultdict(lambda: 0)
stockwish_table_lines = StockWishTableLine.objects.all()
for line in stockwish_table_lines:
to_order[line.article_type] += line.number
combo_order_lines = OrderCombinationLine.get_ol_combinations(state='O', include_price_field=False)
for line in combo_order_lines:
if not hasattr(line.wishable, 'sellabletype') or \
line.wishable.sellabletype is None:
raise UnimplementedError("Or products are not yet supported")
to_order[line.wishable.sellabletype.articletype] += line.number
for article, number in articles_ordered.items():
if to_order[article] < articles_ordered[article]:
errors.append((article, number - to_order[article]))
return errors
@staticmethod
def verify_data_assertions(user, supplier, articles_ordered, allow_different_currency):
"""
Checks basic assertions about the supplied data, including the supplier ability to supply the specified products
:param user: user to which the order is authorized
:param supplier: supplier which should order the products
:param articles_ordered
:type articles_ordered: List[List[ArticleType, int]]
:param allow_different_currency
"""
raiseif(not user, IncorrectDataError, "You must supply me with a user which does this action")
raiseif(not articles_ordered, IncorrectDataError, "You must supply me with articles that are being ordered")
raiseif(not isinstance(user, User), IncorrectDataError, "user must be a User")
# Ensure that the number of articles ordered is not less than 0
ordered_dict = defaultdict(lambda: 0)
for article, number, cost in articles_ordered:
raiseif(not isinstance(article, ArticleType),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
raiseif(not isinstance(number, int), IncorrectDataError,
"articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
raiseif(not isinstance(cost, Cost), IncorrectDataError,
"articles_ordered must be iterable of Tuple[ArticleType, int, Cost]")
if not allow_different_currency:
raiseif(cost.currency.iso != USED_CURRENCY,
IncorrectDataError,
"You can only use currency {} with the current settings".format(USED_CURRENCY))
raiseif(number <= 0, IncorrectDataError, "You cannot order negative amounts of products")
ordered_dict[article] += number
raiseif(not ArticleTypeSupplier.objects.get(article_type=article, supplier=supplier),
IncorrectDataError, "Article does not (yet) exist at supplier")
return ordered_dict
class SupplierOrderLine(Blame):
"""
Single ArticleType ordered at supplier and contained in a SupplierOrder. Can be linked to a Customers OrderLines or
be left empty for stock.
"""
# The document containing all these supplierOrderLines
supplier_order = models.ForeignKey(SupplierOrder, on_delete=models.PROTECT)
# An articleType. Must match the supplierArticleType
article_type = models.ForeignKey(ArticleType, on_delete=models.PROTECT)
# The articleType as the supplier knows it. Must match our own articleType
supplier_article_type = models.ForeignKey(ArticleTypeSupplier, on_delete=models.PROTECT)
# An orderLine to fulfill the wish of a customer for a product. Null for stockwish(anonymously)
order_line = models.ForeignKey(OrderLine, null=True, on_delete=models.PROTECT)
# The amount of money we are going to pay for this product excluding all taxes
line_cost = CostField()
# A state indicating if the customer order is completed yet.
state = models.CharField(max_length=5)
def __str__(self):
if not hasattr(self, 'supplier_order')or self.supplier_order is None:
supplier_order = "None"
else:
supplier_order = self.supplier_order.pk
if not hasattr(self, 'article_type') or self.article_type is None:
article_type = "None"
else:
article_type = str(self.article_type)
if not hasattr(self, 'supplier_article_type') or self.supplier_article_type is None:
supplier_article_type = "None"
else:
supplier_article_type = str(self.supplier_article_type.pk)
if not hasattr(self, 'order_line') or self.order_line is None:
order_line = "None"
else:
order_line = str(self.order_line.pk)
if not hasattr(self, 'line_cost') or self.line_cost is None:
line_cost = "None"
else:
line_cost = str(self.line_cost)
if not hasattr(self, 'state') or self.state is None:
state = "None"
else:
state = self.state
stri = "SupplierOrder: {}, ArticleType: {}, " \
"SupplierArticleType: {}, OrderLine: {}, Cost: {}, State: {}".format(supplier_order,
article_type,
supplier_article_type,
order_line, line_cost, state)
return stri
@transaction.atomic
def save(self, *args, **kwargs):
if self.order_line is not None:
if isinstance(self.order_line.wishable, OrProductType):
raiseif(
not ArticleType.objects.filter(
orproducttype__id=self.order_line.id,
id=self.article_type.id).exists(),
InvalidDataError, "Ordered article is not known to ordered OrProduct")
else:
# Customer article matches ordered article
raiseif(not self.order_line.wishable.sellabletype.articletype == self.article_type,
InvalidDataError, "The order's article type is not this line's ArticleType")
# +1 query for customer ordered lines
checked_ats = False
if not hasattr(self, 'supplier_article_type') or self.supplier_article_type is None:
sup_art_types = ArticleTypeSupplier.objects.filter(
article_type=self.article_type,
supplier=self.supplier_order.supplier)
# shouldn't get triggered, but +1 query
checked_ats = True
raiseif(len(sup_art_types) != 1, InvalidDataError, "There can only be one SupplierArticleType")
if not checked_ats: # should happen all the time
raiseif(self.supplier_article_type.supplier != self.supplier_order.supplier,
InvalidDataError, "The supplier_order's supplier must be the supplier of the "
"supplier_article_type") # Article can be ordered at supplier
# +2 query to get the supplier from the supplier_article_type and supplier_order
raiseif(self.supplier_article_type != ArticleTypeSupplier.objects
.get(article_type=self.article_type,
supplier=self.supplier_order.supplier),
InvalidDataError, "The supplier_article_type must be ") # optional +1 for article type
# Set the relevant state is not implemented
if self.pk is None:
self.state = 'O'
raiseif(self.state not in SupplierOrderState.STATE_CHOICES, InvalidDataError)
# Assert that everything is ok here
if self.pk is None:
if self.order_line is not None:
self.order_line.order_at_supplier(self.supplier_order.user_created)
# ^ If this doesn't happen at exactly the same time
# as the save of the SupOrdLn, you are screwed
# +1 query for the user_created from supplier_order
else:
StockWishTable.remove_products_from_table(self.user_modified, article_type=self.article_type, number=1,
supplier_order=self.supplier_order, stock_wish=None,
indirect=True)
# +1 query to remove one product from the stockwishtable, or to change the state of our order_line
super(SupplierOrderLine, self).save(**kwargs)
# +1 query to save the SOL itself
sos = SupplierOrderState(supplier_order_line=self, state=self.state, user_modified=self.user_modified)
sos.save()
# +1 query to save the state transition
else:
# Maybe some extra logic here?
super(SupplierOrderLine, self).save(**kwargs)
@transaction.atomic()
def transition(self, new_state, user_modified):
"""
Transitions an orderline from one state to another. This is the only safe means of transitioning, as data
integrity can not be guaranteed otherwise. Transitioning is only possible with objects stored in the database.
"""
if not self.pk or self.state is None:
raise ObjectNotSavedError()
elif self.state not in SupplierOrderState.STATE_CHOICES:
raise IncorrectStateError("State of orderline is not valid. Database is corrupted at Orderline", self.pk,
" with state ", self.state)
elif new_state not in SupplierOrderState.STATE_CHOICES:
raise IncorrectTransitionError("New state is not a valid state")
else:
nextstates = {
'O': ('B', 'C', 'A'),
'B': ('A', 'C')}
if new_state in nextstates[self.state]:
self.state = new_state
self.user_modified = user_modified
sols = SupplierOrderState(state=new_state, supplier_order_line=self, user_modified=user_modified)
sols.save()
self.save()
else:
raise IncorrectTransitionError(
"This transaction is not legal: {state} -> {new_state}".format(state=self.state,
new_state=new_state))
@staticmethod
def bulk_create_supplierorders(supplier_orderlines, supplier_order: SupplierOrder, user: User):
"""
Creates supplierOrderLines in bulk with one transaction. This should not be called directly as it contains
no checks for speed purposes. These checks are done in the main creation function so use that one for
the creation of supplierOrderLines.
:param supplier_orderlines:
:type supplier_orderlines: list[SupplierOrderLine]
:param supplier_order:
:param user:
:return:
"""
sol_states = []
ol_transitions = [] # type: list[OrderLine]
remove_from_stock_wishes = defaultdict(lambda: 0)
for sol in supplier_orderlines:
sol.supplier_order = supplier_order
sol.user_created = user
sol.user_modified = user
sol.state = 'O'
# Explicitly do not check if the articleType matches the articleType of the OrderLine
# Explicitly do not check if the supplierArticleType is set and matches the articleType
# Explicity do not check if the supplier can supply the article
if not sol.order_line:
remove_from_stock_wishes[sol.article_type] += 1
else:
ol_transitions.append(sol.order_line)
with transaction.atomic():
for art in remove_from_stock_wishes:
# Remove all products from table one by one
StockWishTable.remove_products_from_table(user, article_type=art, number=remove_from_stock_wishes[art],
supplier_order=supplier_order, stock_wish=None,
indirect=True)
SupplierOrderLine.objects.bulk_create(supplier_orderlines)
sols_nw = SupplierOrderLine.objects.filter(supplier_order=supplier_order)
for sol in sols_nw:
sol_states.append(SupplierOrderState(supplier_order_line=sol, state=sol.state,
user_modified=user, user_created=user))
SupplierOrderState.objects.bulk_create(sol_states)
for ol in ol_transitions:
ol.order_at_supplier(user)
def send_to_backorder(self, user_modified):
self.transition('B', user_modified)
@transaction.atomic()
def mark_as_arrived(self, user_modified):
if self.order_line is not None:
self.order_line.arrive_at_store(user_modified)
self.transition('A', user_modified)
@transaction.atomic
def cancel_line(self, user_modified, cancel_order=False):
# Has orderline
if self.order_line is not None:
# Either cancel the order outright or revert to basic state
if cancel_order:
self.order_line.cancel(user_modified)
else:
self.order_line.return_back_to_ordered_by_customer(user_modified)
else:
if not cancel_order:
StockWishTable.add_products_to_table(user_modified=user_modified, number=1,
indirect=True, article_type=self.article_type,
supplier_order=self.supplier_order)
self.transition('C', user_modified)
class SupplierOrderState(ImmutableBlame):
"""
A state log of a supplierOrderLine. The static lists indicate which states are available and
what they mean. This also indicates which states are in transit and which are closed.
"""
STATE_CHOICES = ('O', 'B', 'C', 'A')
STATE_CHOICES_MEANING = {'O': 'Ordered at supplier', 'B': 'Backorder', 'C': 'Cancelled',
'A': 'Arrived at store'}
OPEN_STATES = ('O', 'B')
CLOSED_STATES = ('C', 'A')
timestamp = models.DateTimeField(auto_now_add=True)
supplier_order_line = models.ForeignKey(SupplierOrderLine, on_delete=models.PROTECT)
state = models.CharField(max_length=5)
class StockWish(ImmutableBlame):
"""
Combination of wishes for ArticleTypes to be ordered at supplier.
"""
timestamp = models.DateTimeField(auto_now_add=True)
@staticmethod
@transaction.atomic
def create_stock_wish(user_modified, articles_ordered):
"""
Creates stock wishes integrally, this function is the preferred way of creating stock wishes
:param user_modified: User to be connected to the stockwish
:type user_modified: User
:param articles_ordered: tuples containing both ArticleTypes and a non-zero integer
:type articles_ordered:
:return:
"""
raiseif(user_modified is None, IncorrectDataError, "You must provide me with a user_modified")
raiseif(len(articles_ordered) == 0, IncorrectDataError, "You must order at least 1 article to save")
raiseif(not isinstance(user_modified, User), IncorrectDataError, "The user_modified argument must be a User")
for article, number in articles_ordered:
raiseif(not isinstance(article, ArticleType),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int]")
raiseif(not isinstance(number, int),
IncorrectDataError, "articles_ordered must be iterable of Tuple[ArticleType, int]")
raiseif(number == 0, IncorrectDataError, "You may not order zero articles")
stock_wish = StockWish(user_modified=user_modified)
stock_wish.save()
for article, number in articles_ordered:
if number < 0:
StockWishTable.remove_products_from_table(
user_modified,
article,
-number,
indirect=True,
stock_wish=stock_wish,
supplier_order=None
)
else:
StockWishTable.add_products_to_table(
user_modified,
article,
number,
indirect=True,
stock_wish=stock_wish,
supplier_order=None
)
return stock_wish
class StockWishTableLine(Blame):
"""
Single line of all combined present wishes for a single ArticleType. Will be modified by StockWishes
and SupplierOrders.
"""
article_type = models.OneToOneField(ArticleType, on_delete=models.PROTECT)
number = models.IntegerField(default=0)
def save(self, indirect=False, *args, **kwargs):
raiseif(not indirect,
IndirectionError,
"StockWishTableLine must be called indirectly from StockWishTable")
super(StockWishTableLine, self).save(**kwargs)
class StockWishTable:
"""
Helper methods for creating Stock Wishes. Let functions that modify the stock wish table call these functions
"""
@staticmethod
def add_products_to_table(user_modified, article_type, number, indirect=False,
stock_wish=None, supplier_order=None):
raiseif(number <= 0, IncorrectDataError, "Number of products to add to table must be bigger than 0")
if not indirect:
raise IndirectionError("add_products_to_table must be called indirectly")
article_type_status = StockWishTableLine.objects.filter(article_type=article_type)
if len(article_type_status) == 0:
swtl = StockWishTableLine(article_type=article_type, number=number, user_modified=user_modified)
swtl.save(indirect=indirect)
log = StockWishTableLog(
number=number,
article_type=article_type,
stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
else:
article_type_status[0].number += number
article_type_status[0].save(indirect=indirect)
log = StockWishTableLog(
number=number,
article_type=article_type,
stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
@staticmethod
def remove_products_from_table(user_modified, article_type, number, indirect=False,
stock_wish=None, supplier_order=None):
raiseif(number <= 0, IncorrectDataError, "number of products to remove from table must be bigger than 0")
if not indirect:
raise IndirectionError("remove_products_from_table must be called indirectly")
article_type_statuses = StockWishTableLine.objects.filter(article_type=article_type)
if not article_type_statuses:
return
article_type_status = article_type_statuses[0]
if article_type_status.number - number < 0:
raise CannotRemoveFromWishTableError("For articleType, tried to remove {} from WishTable,"
"but only {} is present".format(number, article_type_status.number))
else:
if article_type_status.number - number == 0:
article_type_status.delete()
else:
article_type_status.number -= number
article_type_status.save(indirect=indirect)
log = StockWishTableLog(number=-number, article_type=article_type, stock_wish=stock_wish,
supplier_order=supplier_order, user_modified=user_modified)
log.save(indirect=True)
class StockWishTableLog(ImmutableBlame):
"""
Log of all edits of the stock wish. This logs which articleType is modified and by what amount and for which reason.
"""
# The modification count to the stock
number = models.IntegerField()
# The article type which is modified
article_type = models.ForeignKey(ArticleType, on_delete=models.PROTECT)
# A possible reason of the modification of the StockWishTable. If set, a SupplierOrder modded the StockWishTable.
# If set, stock_wish must be unset
supplier_order = models.ForeignKey(SupplierOrder, null=True, on_delete=models.PROTECT)
# A possible reason of the modification of the StockWishTable. If set, a StockWish modded the StockWishTable.
# If set, supplier_order must be unset
stock_wish = models.ForeignKey(StockWish, null=True, on_delete=models.PROTECT)
def save(self, indirect=False):
raiseif(not indirect, IndirectionError, "Saving must be done indirectly")
raiseif(self.supplier_order and self.stock_wish,
TooManyReasonsError, "With two reasons to order this product, "
"Choose either a supplier order or a stock wish")
raiseif(not (self.supplier_order or self.stock_wish),
NotEnoughReasonError, "Supply a reason for this modification")
# ^ reason is either supplier order or stock wish modification
super(StockWishTableLog, self).save()
def __str__(self):
if self.supplier_order is None:
sup_ord = "None"
else:
sup_ord = self.supplier_order.pk
if self.stock_wish is None:
stw = "None"
else:
stw = self.stock_wish.pk
return "{} x {}, SupplierOrder: {}, StockWish: {}".format(self.article_type, self.number, sup_ord, stw)
class SupplierOrderCombinationLine:
"""
A helper class to group SupplierOrderLines together based on shared properties. This allows for quick summaries
where summation of all lines was a bad alternative.
"""
number = 0
article_type = ArticleType
cost = CostField
state = ""
def __init__(self, number, article_type, cost, state):
self.number = number
self.article_type = article_type
self.cost = cost
self.state = state
def __str__(self):
dec = self.cost.amount.quantize(Decimal('0.01'))
stri = "{:<7}{:14}{:10}{:12}".format(self.number, self.article_type.name, str(self.cost.currency) + str(dec),
SupplierOrderState.STATE_CHOICES_MEANING[self.state])
return stri
@staticmethod
def prefix_field_names(model, prefix):
# noinspection PyProtectedMember
fields = model._meta.get_fields()
ret = []
for field in fields:
if not isinstance(field, ForeignObjectRel):
ret.append(prefix + field.name)
return ret
@staticmethod
def get_sol_combinations(supplier_order=None, article_type=None, state=None, qs=SupplierOrderLine.objects,
include_price_field=True, supplier=None):
result = []
filtr = {}
if supplier_order:
filtr['supplier_order'] = supplier_order
if article_type:
filtr['article_type'] = article_type
if state:
filtr['state'] = state
if supplier:
filtr['supplier_order__supplier'] = supplier
price_fields = []
if include_price_field:
price_fields = ['line_cost', 'line_cost_currency']
flds = price_fields + SupplierOrderCombinationLine.prefix_field_names(ArticleType, 'article_type__')
supplierorderlines = qs.filter(**filtr). \
values('state', *flds).annotate(Count('id'))
for o in supplierorderlines:
number = o['id__count']
state = o['state']
if not include_price_field:
amount = Decimal(-1)
currency = Currency(iso=USED_CURRENCY)
else:
amount = o['line_cost']
currency = Currency(iso=o['line_cost_currency'])
cost = Cost(amount=amount, currency=currency)
socl = SupplierOrderCombinationLine(number=number,
article_type=ArticleType(name=o['article_type__name'],
pk=o['article_type__id']),
cost=cost,
state=state)
result.append(socl)
return result
class DistributionStrategy:
"""
An interface for a consistent way of deciding the distribution of the products ordered at our suppliers. Also
contains a distributionfunction that actually handles the actual distribution of the articles for users who
prefer a manual way of operation.
"""
@staticmethod
def get_strategy_from_string(strategy):
if strategy == "IndiscriminateCustomerStockStrategy":
return IndiscriminateCustomerStockStrategy
else:
raise UnimplementedError("Strategy not implemented")
@staticmethod
def distribute(user, supplier, distribution, indirect=False):
"""
Creates the supplier order and distributes the SupplierOrderLines to any orders
:param user: a User for the SupplierOrder
:param supplier: Supplier for the SupplierOrder
:param distribution: A list of SupplierOrderLines
:param indirect: Indirection flag. Function must be called indirectly.
"""
raiseif(not isinstance(user, User), IncorrectDataError, "argument user is not instance of User")
raiseif(not isinstance(supplier, Supplier), IncorrectDataError, "argument supplier is not instance of Supplier")
raiseif(not indirect, IndirectionError, "Distribute must be called indirectly")
raiseif(not distribution, IncorrectDataError, "distribution is not supplied")
supplier_order = SupplierOrder(user_modified=user, supplier=supplier)
articles = set()
article_type_suppliers = {}
for supplier_order_line in distribution:
raiseif(
not isinstance(supplier_order_line, SupplierOrderLine),
IncorrectDataError, "argument distribution does not only contain SupplierOrderLine")
raiseif(not (supplier_order_line.order_line is None or
isinstance(supplier_order_line.order_line, OrderLine)),
IncorrectDataError, "supplier order line's order line link is not instance of OrderLine")
articles.add(supplier_order_line.article_type)
if supplier_order_line.order_line is not None:
# Discount the possibility of OrProducts for now
raiseif(supplier_order_line.article_type_id !=
supplier_order_line.order_line.wishable_id,
IncorrectDataError, "SupplierOrderLine's article type is not the same type as it's linked"
"OrderLine")
art_sup_types = ArticleTypeSupplier.objects.filter(article_type__in=articles, supplier=supplier)
for ats in art_sup_types:
article_type_suppliers[ats.article_type] = ats
# Add articleTypeSuppliers all at once
for supplier_order_line in distribution:
ats = article_type_suppliers.get(supplier_order_line.article_type)
if ats is None:
raise IncorrectDataError("Article {} does not "
"have an ArticleTypeSupplier".format(supplier_order_line.article_type))
supplier_order_line.supplier_article_type = ats
# We've checked everything, now we start saving
with transaction.atomic():
supplier_order.save()
SupplierOrderLine.bulk_create_supplierorders(distribution, supplier_order, user)
return supplier_order
@staticmethod
def get_distribution(article_type_number_combos):
"""
Proposes a distribution according to the specific strategy. Assume supply is not bigger than demand
:param article_type_number_combos: List[ArticleType, number, Cost]
:return: A list containing SupplierOrderLines
"""
raise UnimplementedError("Super distribution class has no implementation")
class IndiscriminateCustomerStockStrategy(DistributionStrategy):
"""
Prioritises the customers first by primary key of orderline, and then the stock.
"""
@staticmethod
def get_distribution(article_type_number_combos):
distribution = []
articletype_dict = defaultdict(lambda: 0)
for articletype, number, cost in article_type_number_combos:
articletype_dict[articletype] += number
articletypes = articletype_dict.keys()
relevant_orderlines = OrderLine.objects.filter(state='O', wishable__in=articletypes).order_by('pk')
# Match the orders one-by-one, stopping when all orders and wishes are fulfilled or article from the
# article type number combos run out
articletype_dict_supply = articletype_dict.copy()
for orderline in relevant_orderlines:
# Discount the possibility for OrProducts for now
if hasattr(orderline.wishable, 'sellabletype') and hasattr(orderline.wishable.sellabletype, 'articletype'):
if articletype_dict_supply[orderline.wishable.sellabletype.articletype] > 0:
sup_ord_line = SupplierOrderLine(article_type=orderline.wishable.sellabletype.articletype,
order_line=orderline, line_cost=None)
distribution.append(sup_ord_line)
articletype_dict_supply[orderline.wishable.sellabletype.articletype] -= 1
stock_wishes = StockWishTableLine.objects.filter(article_type__in=articletypes)
for wish in stock_wishes:
# Assert not more supply than demand
raiseif(wish.number < articletype_dict_supply[wish.article_type],
InsufficientDemandError, "there is not enough demand to order this many articles")
if articletype_dict_supply[wish.article_type] > 0:
for i in range(0, articletype_dict_supply[wish.article_type]):
sup_ord_line = SupplierOrderLine(article_type=wish.article_type, line_cost=None)
distribution.append(sup_ord_line)
articletype_dict_supply[wish.article_type] = 0
# Now connect the cost.
# Unfortunately, its n^2. This can be done more efficiently using maps, this should be worked out
# sat a later date.
cost_counter = article_type_number_combos.copy()
for single_counter in cost_counter:
ARTICLE_TYPE_LOCATION = 0
ARTICLE_TYPE_NUMBER_LOCATION = 1
ARTICLE_TYPE_COST_LOCATION = 2
while single_counter[ARTICLE_TYPE_NUMBER_LOCATION] > 0:
for supplier_order_line in distribution:
if supplier_order_line.article_type == single_counter[ARTICLE_TYPE_LOCATION] and \
(supplier_order_line.line_cost is None):
supplier_order_line.line_cost = single_counter[ARTICLE_TYPE_COST_LOCATION]
single_counter[ARTICLE_TYPE_NUMBER_LOCATION] -= 1
break
return distribution
class UnimplementedError(Exception):
"""
Used for still unimplemented features in the logistics scope.
"""
pass
class CannotRemoveFromWishTableError(Exception):
"""
The system tries to remove more from the WishTable than there is present. This is not consistent.
"""
pass
class IndirectionError(Exception):
"""
Thrown when a function is abusively used in an indirect manner(indirect-flag).
"""
pass
class InsufficientDemandError(Exception):
"""
There is more supply than demand.
"""
pass
class ObjectNotSavedError(Exception):
"""
A transition is attempted on an unsaved object.
"""
pass
class IncorrectStateError(Exception):
"""
An incorrect state is supplied.
"""
pass
class IncorrectTransitionError(Exception):
"""
An illegal transition is attempted.
"""
pass
class IncorrectDataError(Exception):
"""
Data is supplied in an incorrect manner or type.
"""
pass
class TooManyReasonsError(Exception):
"""
Two reasons were supplied for modifying the wishTable.
"""
pass
class NotEnoughReasonError(Exception):
"""
No reason was supplied for modifying the wishTable.
"""
pass
class InvalidDataError(Exception):
"""
Data is supplied that, after further inspection, does not meet the specified criteria.
"""
pass | 0.821975 | 0.444444 |
import numpy as np
class ObjectStatic:
"""
Static data for an object. This data won't change between frames.
"""
def __init__(self, name: str, object_id: int, mass: float, segmentation_color: np.array, size: np.array,
category: str, kinematic: bool, dynamic_friction: float, static_friction: float, bounciness: float):
"""
:param name: The name of the object.
:param object_id: The unique ID of the object.
:param mass: The mass of the object.
:param segmentation_color: The segmentation color of the object.
:param size: The size of the object.
:param dynamic_friction: The dynamic friction of the object.
:param static_friction: The static friction of the object.
:param bounciness: The bounciness of the object.
:param kinematic: If True, this object is kinematic, and won't respond to physics.
"""
""":field
The unique ID of the object.
"""
self.object_id: int = object_id
""":field
[The name of the model.](https://github.com/threedworld-mit/tdw/blob/master/Documentation/python/librarian/model_librarian.md)
"""
self.name: str = name.lower()
""":field
The semantic category of the object.
"""
self.category: str = category
""":field
If True, this object is kinematic, and won't respond to physics.
"""
self.kinematic = kinematic
""":field
The RGB segmentation color for the object as a numpy array: `[r, g, b]`
"""
self.segmentation_color = segmentation_color
""":field
The mass of the object.
"""
self.mass = mass
""":field
The size of the object as a numpy array: `[width, height, length]`
"""
self.size = size
""":field
The dynamic friction of the object.
"""
self.dynamic_friction: float = dynamic_friction
""":field
The static friction of the object.
"""
self.static_friction: float = static_friction
""":field
The bounciness of the object.
"""
self.bounciness: float = bounciness | Python/tdw/object_data/object_static.py | import numpy as np
class ObjectStatic:
"""
Static data for an object. This data won't change between frames.
"""
def __init__(self, name: str, object_id: int, mass: float, segmentation_color: np.array, size: np.array,
category: str, kinematic: bool, dynamic_friction: float, static_friction: float, bounciness: float):
"""
:param name: The name of the object.
:param object_id: The unique ID of the object.
:param mass: The mass of the object.
:param segmentation_color: The segmentation color of the object.
:param size: The size of the object.
:param dynamic_friction: The dynamic friction of the object.
:param static_friction: The static friction of the object.
:param bounciness: The bounciness of the object.
:param kinematic: If True, this object is kinematic, and won't respond to physics.
"""
""":field
The unique ID of the object.
"""
self.object_id: int = object_id
""":field
[The name of the model.](https://github.com/threedworld-mit/tdw/blob/master/Documentation/python/librarian/model_librarian.md)
"""
self.name: str = name.lower()
""":field
The semantic category of the object.
"""
self.category: str = category
""":field
If True, this object is kinematic, and won't respond to physics.
"""
self.kinematic = kinematic
""":field
The RGB segmentation color for the object as a numpy array: `[r, g, b]`
"""
self.segmentation_color = segmentation_color
""":field
The mass of the object.
"""
self.mass = mass
""":field
The size of the object as a numpy array: `[width, height, length]`
"""
self.size = size
""":field
The dynamic friction of the object.
"""
self.dynamic_friction: float = dynamic_friction
""":field
The static friction of the object.
"""
self.static_friction: float = static_friction
""":field
The bounciness of the object.
"""
self.bounciness: float = bounciness | 0.933035 | 0.736472 |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Almacen',
fields=[
('idalmacen', models.AutoField(primary_key=True, serialize=False)),
('almacen', models.TextField()),
],
),
migrations.CreateModel(
name='ClasificacionEvento',
fields=[
('idClasificacion_evento', models.AutoField(primary_key=True, serialize=False)),
('Clasificacion_evento', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Cliente',
fields=[
('idCliente', models.AutoField(primary_key=True, serialize=False)),
('Nombre_cliente', models.CharField(max_length=45)),
('Apellido_cliente', models.CharField(max_length=45)),
('Telefono_cliente', models.CharField(max_length=45)),
('Correo_electronico', models.CharField(max_length=45)),
('Otros_datos_cliente', models.TextField()),
],
),
migrations.CreateModel(
name='DetalleEvento',
fields=[
('idDetalle_evento', models.AutoField(primary_key=True, serialize=False)),
('FechaInicio', models.DateField(default=django.utils.timezone.now)),
('FechaFin', models.DateField()),
('ClasificacionEvento_idClasificacionEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.ClasificacionEvento')),
],
),
migrations.CreateModel(
name='Evento',
fields=[
('idEvento', models.AutoField(primary_key=True, serialize=False)),
('NombreEvento', models.CharField(max_length=45)),
('CodigoEvento', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='EventoCliente',
fields=[
('idEventoCliente', models.AutoField(primary_key=True, serialize=False)),
('ClienteIdCliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Cliente')),
('EventoIdEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento')),
],
),
migrations.CreateModel(
name='EventoMaterial',
fields=[
('idEventoMaterial', models.AutoField(primary_key=True, serialize=False)),
('EventoIdEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento')),
],
),
migrations.CreateModel(
name='Pallet_Type',
fields=[
('idpallet_type', models.AutoField(primary_key=True, serialize=False)),
('pallet_type', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('idProduct', models.AutoField(primary_key=True, serialize=False)),
('material', models.IntegerField()),
('description', models.TextField()),
('ple', models.IntegerField()),
('cajas', models.IntegerField()),
('unidades', models.FloatField()),
('expiration_date', models.DateField()),
('fecha_em', models.DateField(auto_now_add=True)),
('bar_code', models.BigIntegerField()),
('cant_ideal', models.BigIntegerField()),
('almacen_idalmacen', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Almacen')),
('pallet_type_idpallet_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Pallet_Type')),
],
),
migrations.CreateModel(
name='Ubication',
fields=[
('idubication', models.AutoField(primary_key=True, serialize=False)),
('ubication', models.TextField()),
],
),
migrations.CreateModel(
name='Unity',
fields=[
('idunity', models.AutoField(primary_key=True, serialize=False)),
('unity', models.CharField(max_length=45)),
],
),
migrations.AddField(
model_name='product',
name='ubication_idubication',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Ubication'),
),
migrations.AddField(
model_name='product',
name='unidad_idunidad',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Unity'),
),
migrations.AddField(
model_name='eventomaterial',
name='MaterialIdMaterial',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Product'),
),
migrations.AddField(
model_name='detalleevento',
name='Evento_idEvento',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento'),
),
] | full_inventory/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Almacen',
fields=[
('idalmacen', models.AutoField(primary_key=True, serialize=False)),
('almacen', models.TextField()),
],
),
migrations.CreateModel(
name='ClasificacionEvento',
fields=[
('idClasificacion_evento', models.AutoField(primary_key=True, serialize=False)),
('Clasificacion_evento', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Cliente',
fields=[
('idCliente', models.AutoField(primary_key=True, serialize=False)),
('Nombre_cliente', models.CharField(max_length=45)),
('Apellido_cliente', models.CharField(max_length=45)),
('Telefono_cliente', models.CharField(max_length=45)),
('Correo_electronico', models.CharField(max_length=45)),
('Otros_datos_cliente', models.TextField()),
],
),
migrations.CreateModel(
name='DetalleEvento',
fields=[
('idDetalle_evento', models.AutoField(primary_key=True, serialize=False)),
('FechaInicio', models.DateField(default=django.utils.timezone.now)),
('FechaFin', models.DateField()),
('ClasificacionEvento_idClasificacionEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.ClasificacionEvento')),
],
),
migrations.CreateModel(
name='Evento',
fields=[
('idEvento', models.AutoField(primary_key=True, serialize=False)),
('NombreEvento', models.CharField(max_length=45)),
('CodigoEvento', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='EventoCliente',
fields=[
('idEventoCliente', models.AutoField(primary_key=True, serialize=False)),
('ClienteIdCliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Cliente')),
('EventoIdEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento')),
],
),
migrations.CreateModel(
name='EventoMaterial',
fields=[
('idEventoMaterial', models.AutoField(primary_key=True, serialize=False)),
('EventoIdEvento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento')),
],
),
migrations.CreateModel(
name='Pallet_Type',
fields=[
('idpallet_type', models.AutoField(primary_key=True, serialize=False)),
('pallet_type', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('idProduct', models.AutoField(primary_key=True, serialize=False)),
('material', models.IntegerField()),
('description', models.TextField()),
('ple', models.IntegerField()),
('cajas', models.IntegerField()),
('unidades', models.FloatField()),
('expiration_date', models.DateField()),
('fecha_em', models.DateField(auto_now_add=True)),
('bar_code', models.BigIntegerField()),
('cant_ideal', models.BigIntegerField()),
('almacen_idalmacen', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Almacen')),
('pallet_type_idpallet_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Pallet_Type')),
],
),
migrations.CreateModel(
name='Ubication',
fields=[
('idubication', models.AutoField(primary_key=True, serialize=False)),
('ubication', models.TextField()),
],
),
migrations.CreateModel(
name='Unity',
fields=[
('idunity', models.AutoField(primary_key=True, serialize=False)),
('unity', models.CharField(max_length=45)),
],
),
migrations.AddField(
model_name='product',
name='ubication_idubication',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Ubication'),
),
migrations.AddField(
model_name='product',
name='unidad_idunidad',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Unity'),
),
migrations.AddField(
model_name='eventomaterial',
name='MaterialIdMaterial',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Product'),
),
migrations.AddField(
model_name='detalleevento',
name='Evento_idEvento',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='full_inventory.Evento'),
),
] | 0.526099 | 0.139484 |
import chainer
from chainer import Variable
import chainer.functions as F
import numpy as np
import copy
from losses import loss_fun
from transport_costs import cost_fun
from attacks import wrm_attack
def get_batch(iterator, xp):
batch = iterator.next()
batchsize = len(batch)
x = []
y = []
for j in range(batchsize):
_x = batch[j][0]
_y = batch[j][1]
if isinstance(_x, (list, tuple)):
for k in range(len(_x)):
x.append(np.asarray(_x[k]).astype("f"))
y.append(np.asarray(_y[k]).astype(np.int32))
else:
x.append(np.asarray(batch[j][0]).astype("f"))
y.append(np.asarray(batch[j][1]).astype(np.int32))
x = xp.asarray(x)
y = xp.asarray(y, dtype=xp.int32)
return Variable(x), Variable(y)
def validation_loss_and_acc(cls, iterator, loss_type=None, n=50000):
@chainer.training.make_extension()
def _evaluate(trainer):
iterator.reset()
losses = []
accs = []
for i in range(0, n, iterator.batch_size):
x, y = get_batch(iterator, cls.xp)
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
logit = cls(x)
loss = loss_fun(logit, y, loss_type)
acc = F.accuracy(logit, y)
losses.append(chainer.cuda.to_cpu(loss.array))
accs.append(chainer.cuda.to_cpu(acc.array))
chainer.reporter.report({
'val_loss': np.mean(np.asarray(losses)),
'val_acc': np.mean(np.asarray(accs))
})
return _evaluate
def adversarial_validation_loss_and_acc(cls, iterator, steps=5, gamma=1.0, alpha=1.0,
loss_type=None, c_type=None,
clip_x=False, n=50000):
@chainer.training.make_extension()
def _evaluate(trainer):
iterator.reset()
phis = []
losses = []
accs = []
phis_0 = []
phis_1 = []
phis_2 = []
with chainer.using_config('train', False):
for _ in range(0, n, iterator.batch_size):
x, y = get_batch(iterator, cls.xp)
x_adv, _phis = wrm_attack(cls=cls, x=x, y=y, gamma=gamma, steps=steps,
loss_type=loss_type, c_type=c_type, alpha=alpha, clip_x=clip_x, return_phis=True)
logit = cls(x_adv)
loss = loss_fun(logit, y, loss_type, reduce='mean')
cost = cost_fun(x1=x_adv, y1=y, x2=x, y2=y, type=c_type, reduce='mean')
phi = loss - gamma * cost
acc = F.accuracy(logit, y)
phis_0.append(chainer.cuda.to_cpu(_phis[0]))
phis_1.append(chainer.cuda.to_cpu(_phis[1]))
phis_2.append(chainer.cuda.to_cpu(_phis[2]))
phis.append(chainer.cuda.to_cpu(phi.array))
losses.append(chainer.cuda.to_cpu(loss.array))
accs.append(chainer.cuda.to_cpu(acc.array))
chainer.reporter.report({
'val_phi': np.mean(np.asarray(phis)),
'val_phi_0': np.mean(np.asarray(phis_0)),
'val_phi_1': np.mean(np.asarray(phis_1)),
'val_phi_2': np.mean(np.asarray(phis_2)),
'adv_val_loss': np.mean(np.asarray(losses)),
'adv_val_acc': np.mean(np.asarray(accs))
})
return _evaluate | extentions.py | import chainer
from chainer import Variable
import chainer.functions as F
import numpy as np
import copy
from losses import loss_fun
from transport_costs import cost_fun
from attacks import wrm_attack
def get_batch(iterator, xp):
batch = iterator.next()
batchsize = len(batch)
x = []
y = []
for j in range(batchsize):
_x = batch[j][0]
_y = batch[j][1]
if isinstance(_x, (list, tuple)):
for k in range(len(_x)):
x.append(np.asarray(_x[k]).astype("f"))
y.append(np.asarray(_y[k]).astype(np.int32))
else:
x.append(np.asarray(batch[j][0]).astype("f"))
y.append(np.asarray(batch[j][1]).astype(np.int32))
x = xp.asarray(x)
y = xp.asarray(y, dtype=xp.int32)
return Variable(x), Variable(y)
def validation_loss_and_acc(cls, iterator, loss_type=None, n=50000):
@chainer.training.make_extension()
def _evaluate(trainer):
iterator.reset()
losses = []
accs = []
for i in range(0, n, iterator.batch_size):
x, y = get_batch(iterator, cls.xp)
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
logit = cls(x)
loss = loss_fun(logit, y, loss_type)
acc = F.accuracy(logit, y)
losses.append(chainer.cuda.to_cpu(loss.array))
accs.append(chainer.cuda.to_cpu(acc.array))
chainer.reporter.report({
'val_loss': np.mean(np.asarray(losses)),
'val_acc': np.mean(np.asarray(accs))
})
return _evaluate
def adversarial_validation_loss_and_acc(cls, iterator, steps=5, gamma=1.0, alpha=1.0,
loss_type=None, c_type=None,
clip_x=False, n=50000):
@chainer.training.make_extension()
def _evaluate(trainer):
iterator.reset()
phis = []
losses = []
accs = []
phis_0 = []
phis_1 = []
phis_2 = []
with chainer.using_config('train', False):
for _ in range(0, n, iterator.batch_size):
x, y = get_batch(iterator, cls.xp)
x_adv, _phis = wrm_attack(cls=cls, x=x, y=y, gamma=gamma, steps=steps,
loss_type=loss_type, c_type=c_type, alpha=alpha, clip_x=clip_x, return_phis=True)
logit = cls(x_adv)
loss = loss_fun(logit, y, loss_type, reduce='mean')
cost = cost_fun(x1=x_adv, y1=y, x2=x, y2=y, type=c_type, reduce='mean')
phi = loss - gamma * cost
acc = F.accuracy(logit, y)
phis_0.append(chainer.cuda.to_cpu(_phis[0]))
phis_1.append(chainer.cuda.to_cpu(_phis[1]))
phis_2.append(chainer.cuda.to_cpu(_phis[2]))
phis.append(chainer.cuda.to_cpu(phi.array))
losses.append(chainer.cuda.to_cpu(loss.array))
accs.append(chainer.cuda.to_cpu(acc.array))
chainer.reporter.report({
'val_phi': np.mean(np.asarray(phis)),
'val_phi_0': np.mean(np.asarray(phis_0)),
'val_phi_1': np.mean(np.asarray(phis_1)),
'val_phi_2': np.mean(np.asarray(phis_2)),
'adv_val_loss': np.mean(np.asarray(losses)),
'adv_val_acc': np.mean(np.asarray(accs))
})
return _evaluate | 0.4856 | 0.228415 |
import os
import buildbot
import buildbot.process.factory
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand, SetProperty
from buildbot.steps.slave import RemoveDirectory
from buildbot.process.properties import WithProperties, Property
from zorg.buildbot.builders.Util import getVisualStudioEnvironment
from zorg.buildbot.builders.Util import extractSlaveEnvironment
from zorg.buildbot.commands.CmakeCommand import CmakeCommand
from zorg.buildbot.commands.NinjaCommand import NinjaCommand
from zorg.buildbot.conditions.FileConditions import FileDoesNotExist
from zorg.buildbot.process.factory import LLVMBuildFactory
def getLLDBuildFactory(
clean = True,
jobs = None,
extra_configure_args = None,
env = None):
# Set defaults
if jobs is None:
jobs = "%(jobs)s"
if extra_configure_args is None:
extra_configure_args = []
# Prepare environmental variables. Set here all env we want everywhere.
merged_env = {
'CC' : "clang",
'CXX' : "clang++",
'TERM' : 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set anything.
merged_env.update(env)
f = LLVMBuildFactory(
depends_on_projects=['llvm', 'lld'],
llvm_srcdir="llvm.src",
llvm_objdir="llvm.obj")
# Get LLVM and Lld
f.addSVNSteps()
# Clean directory, if requested.
cleanBuildRequested = lambda step: step.build.getProperty("clean") or clean
f.addStep(RemoveDirectory(name='clean ' + f.llvm_objdir,
dir=f.llvm_objdir,
haltOnFailure=False,
flunkOnFailure=False,
doStepIf=cleanBuildRequested
))
# Create configuration files with cmake
f.addStep(CmakeCommand(name="cmake-configure",
description=["cmake configure"],
haltOnFailure=True,
options=extra_configure_args,
path="../%s" % f.llvm_srcdir,
env=merged_env,
workdir=f.llvm_objdir,
doStepIf=FileDoesNotExist(
"./%s/CMakeCache.txt" % f.llvm_objdir)))
# Build Lld
f.addStep(ShellCommand(name="build_Lld",
command=['nice', '-n', '10',
'make', WithProperties("-j%s" % jobs)],
haltOnFailure=True,
description=["build lld"],
env=merged_env,
workdir=f.llvm_objdir))
# Test Lld
f.addStep(ShellCommand(name="test_lld",
command=["make", "lld-test"],
haltOnFailure=True,
description=["test lld"],
env=merged_env,
workdir=f.llvm_objdir))
return f
def getLLDWinBuildFactory(
clean = True,
# Default values for VS devenv and build configuration
vs = None, # What to run to configure Visual Studio utils.
target_arch = None, # Native.
extra_configure_args = None,
env = None):
# Set defaults
if vs is None:
vs = r"""%VS140COMNTOOLS%""" # Visual Studio 2015.
if extra_configure_args is None:
extra_configure_args = []
if env is None:
env = {}
f = LLVMBuildFactory(
depends_on_projects=['llvm', 'lld'],
llvm_srcdir="llvm.src",
llvm_objdir="llvm.obj")
# Get LLVM and Lld
f.addSVNSteps()
# Clean directory, if requested.
cleanBuildRequested = lambda step: step.build.getProperty("clean") or clean
f.addStep(RemoveDirectory(name='clean ' + f.llvm_objdir,
dir=f.llvm_objdir,
haltOnFailure=False,
flunkOnFailure=False,
doStepIf=cleanBuildRequested
))
# If set up environment step is requested, do this now.
if vs:
f.addStep(SetProperty(
command=getVisualStudioEnvironment(vs, target_arch),
extract_fn=extractSlaveEnvironment))
assert not env, "Can't have custom builder env vars with VS"
env = Property('slave_env')
# Always build with ninja.
cmake_options = ["-G", "Ninja"]
# Reconsile configure args with the defaults we want.
if not any(a.startswith('-DCMAKE_BUILD_TYPE=') for a in extra_configure_args):
cmake_options.append('-DCMAKE_BUILD_TYPE=Release')
if not any(a.startswith('-DLLVM_ENABLE_WERROR=') for a in extra_configure_args):
cmake_options.append('-DLLVM_ENABLE_WERROR=ON')
if not any(a.startswith('-DLLVM_ENABLE_ASSERTIONS=') for a in extra_configure_args):
cmake_options.append('-DLLVM_ENABLE_ASSERTIONS=ON')
if not any(a.startswith('-DLLVM_LIT_ARGS=') for a in extra_configure_args):
cmake_options.append('-DLLVM_LIT_ARGS=\"-v\"')
cmake_options += extra_configure_args
# Note: ShellCommand does not pass the params with special symbols right.
# The " ".join is a workaround for this bug.
f.addStep(CmakeCommand(name="cmake-configure",
description=["cmake configure"],
haltOnFailure=True,
warnOnWarnings=True,
options=cmake_options,
path="../%s" % f.llvm_srcdir,
env=env,
workdir=f.llvm_objdir,
doStepIf=FileDoesNotExist(
"./%s/CMakeCache.txt" % f.llvm_objdir)))
# Build Lld.
f.addStep(NinjaCommand(name='build lld',
haltOnFailure=True,
warnOnWarnings=True,
description='build lld',
workdir=f.llvm_objdir,
env=env))
# Test Lld
f.addStep(NinjaCommand(name='test lld',
targets=['lld-test'],
haltOnFailure=True,
warnOnWarnings=True,
description='test lld',
workdir=f.llvm_objdir,
env=env))
return f | zorg/buildbot/builders/LLDBuilder.py | import os
import buildbot
import buildbot.process.factory
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand, SetProperty
from buildbot.steps.slave import RemoveDirectory
from buildbot.process.properties import WithProperties, Property
from zorg.buildbot.builders.Util import getVisualStudioEnvironment
from zorg.buildbot.builders.Util import extractSlaveEnvironment
from zorg.buildbot.commands.CmakeCommand import CmakeCommand
from zorg.buildbot.commands.NinjaCommand import NinjaCommand
from zorg.buildbot.conditions.FileConditions import FileDoesNotExist
from zorg.buildbot.process.factory import LLVMBuildFactory
def getLLDBuildFactory(
clean = True,
jobs = None,
extra_configure_args = None,
env = None):
# Set defaults
if jobs is None:
jobs = "%(jobs)s"
if extra_configure_args is None:
extra_configure_args = []
# Prepare environmental variables. Set here all env we want everywhere.
merged_env = {
'CC' : "clang",
'CXX' : "clang++",
'TERM' : 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set anything.
merged_env.update(env)
f = LLVMBuildFactory(
depends_on_projects=['llvm', 'lld'],
llvm_srcdir="llvm.src",
llvm_objdir="llvm.obj")
# Get LLVM and Lld
f.addSVNSteps()
# Clean directory, if requested.
cleanBuildRequested = lambda step: step.build.getProperty("clean") or clean
f.addStep(RemoveDirectory(name='clean ' + f.llvm_objdir,
dir=f.llvm_objdir,
haltOnFailure=False,
flunkOnFailure=False,
doStepIf=cleanBuildRequested
))
# Create configuration files with cmake
f.addStep(CmakeCommand(name="cmake-configure",
description=["cmake configure"],
haltOnFailure=True,
options=extra_configure_args,
path="../%s" % f.llvm_srcdir,
env=merged_env,
workdir=f.llvm_objdir,
doStepIf=FileDoesNotExist(
"./%s/CMakeCache.txt" % f.llvm_objdir)))
# Build Lld
f.addStep(ShellCommand(name="build_Lld",
command=['nice', '-n', '10',
'make', WithProperties("-j%s" % jobs)],
haltOnFailure=True,
description=["build lld"],
env=merged_env,
workdir=f.llvm_objdir))
# Test Lld
f.addStep(ShellCommand(name="test_lld",
command=["make", "lld-test"],
haltOnFailure=True,
description=["test lld"],
env=merged_env,
workdir=f.llvm_objdir))
return f
def getLLDWinBuildFactory(
clean = True,
# Default values for VS devenv and build configuration
vs = None, # What to run to configure Visual Studio utils.
target_arch = None, # Native.
extra_configure_args = None,
env = None):
# Set defaults
if vs is None:
vs = r"""%VS140COMNTOOLS%""" # Visual Studio 2015.
if extra_configure_args is None:
extra_configure_args = []
if env is None:
env = {}
f = LLVMBuildFactory(
depends_on_projects=['llvm', 'lld'],
llvm_srcdir="llvm.src",
llvm_objdir="llvm.obj")
# Get LLVM and Lld
f.addSVNSteps()
# Clean directory, if requested.
cleanBuildRequested = lambda step: step.build.getProperty("clean") or clean
f.addStep(RemoveDirectory(name='clean ' + f.llvm_objdir,
dir=f.llvm_objdir,
haltOnFailure=False,
flunkOnFailure=False,
doStepIf=cleanBuildRequested
))
# If set up environment step is requested, do this now.
if vs:
f.addStep(SetProperty(
command=getVisualStudioEnvironment(vs, target_arch),
extract_fn=extractSlaveEnvironment))
assert not env, "Can't have custom builder env vars with VS"
env = Property('slave_env')
# Always build with ninja.
cmake_options = ["-G", "Ninja"]
# Reconsile configure args with the defaults we want.
if not any(a.startswith('-DCMAKE_BUILD_TYPE=') for a in extra_configure_args):
cmake_options.append('-DCMAKE_BUILD_TYPE=Release')
if not any(a.startswith('-DLLVM_ENABLE_WERROR=') for a in extra_configure_args):
cmake_options.append('-DLLVM_ENABLE_WERROR=ON')
if not any(a.startswith('-DLLVM_ENABLE_ASSERTIONS=') for a in extra_configure_args):
cmake_options.append('-DLLVM_ENABLE_ASSERTIONS=ON')
if not any(a.startswith('-DLLVM_LIT_ARGS=') for a in extra_configure_args):
cmake_options.append('-DLLVM_LIT_ARGS=\"-v\"')
cmake_options += extra_configure_args
# Note: ShellCommand does not pass the params with special symbols right.
# The " ".join is a workaround for this bug.
f.addStep(CmakeCommand(name="cmake-configure",
description=["cmake configure"],
haltOnFailure=True,
warnOnWarnings=True,
options=cmake_options,
path="../%s" % f.llvm_srcdir,
env=env,
workdir=f.llvm_objdir,
doStepIf=FileDoesNotExist(
"./%s/CMakeCache.txt" % f.llvm_objdir)))
# Build Lld.
f.addStep(NinjaCommand(name='build lld',
haltOnFailure=True,
warnOnWarnings=True,
description='build lld',
workdir=f.llvm_objdir,
env=env))
# Test Lld
f.addStep(NinjaCommand(name='test lld',
targets=['lld-test'],
haltOnFailure=True,
warnOnWarnings=True,
description='test lld',
workdir=f.llvm_objdir,
env=env))
return f | 0.356671 | 0.092565 |
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
set_module_args,
ModuleTestCase,
AnsibleExitJson,
AnsibleFailJson,
)
from ansible_collections.community.dns.plugins.modules import wait_for_txt
from ..module_utils.resolver_helper import (
mock_resolver,
mock_query_udp,
create_mock_answer,
create_mock_response,
)
# We need dnspython
dns = pytest.importorskip('dns')
def mock_sleep(delay):
pass
def mock_monotonic(call_sequence):
def f():
assert len(call_sequence) > 0, 'monotonic() was called more often than expected'
value = call_sequence[0]
del call_sequence[0]
return value
return f
class TestWaitForTXT(ModuleTestCase):
def test_single(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
{
'target': 'ns.example.org',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '172.16.17.32'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.org'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
('172.16.17.32', ): [
{
'target': dns.name.from_unicode(u'example.org'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
), dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'example.org')
)]),
},
{
'query_target': dns.name.from_unicode(u'org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.org'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.org'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
]
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf'],
'ns.example.org': ['asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 1
def test_double(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'mail.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"any bar"'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
'mode': 'equals',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
'timeout': 10,
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 2
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': ['any bar'],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_subset(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'as df'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"foo bar"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"foo bar"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'as df'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'asdf',
'asdf',
'foo bar',
],
'mode': 'subset',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['foo bar', 'another one', 'asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
def test_superset(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf ""'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
'bee',
],
'mode': 'superset',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 2
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf', 'bee'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': [],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_superset_not_empty(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'bumble',
'bee',
],
'mode': 'superset_not_empty',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['bumble', 'bee'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_equals(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble bee'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'foo',
'bumble bee',
'wizard',
],
'mode': 'equals',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['bumble bee', 'wizard', 'foo'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_equals_ordered(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'foo',
'bumble bee',
'wizard',
],
'mode': 'equals_ordered',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['foo', 'bumble bee', 'wizard'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_timeout(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'mail.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"any bar"'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdfasdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with patch('ansible_collections.community.dns.plugins.modules.wait_for_txt.monotonic',
mock_monotonic([0, 0.01, 1.2, 6.013, 7.41, 12.021])):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
'mode': 'equals',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
'timeout': 12,
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Timeout (1 out of 2 check(s) passed).'
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdfasdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': ['any bar'],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_nxdomain(self):
resolver = mock_resolver(['1.1.1.1'], {})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NXDOMAIN),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected DNS error: The DNS query name does not exist: com.'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0
def test_servfail(self):
resolver = mock_resolver(['1.1.1.1'], {})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.SERVFAIL),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected resolving error: Error SERVFAIL'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0
def test_cname_loop(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
{
'target': 'ns.example.org',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '172.16.17.32'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
), dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'example.org')
)]),
},
{
'query_target': dns.name.from_unicode(u'org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.org'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.org'),
), dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.example.com')
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected resolving error: Found CNAME loop starting at www.example.com'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0 | venv/lib/python3.8/site-packages/ansible_collections/community/dns/tests/unit/plugins/modules/test_wait_for_txt.py |
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
set_module_args,
ModuleTestCase,
AnsibleExitJson,
AnsibleFailJson,
)
from ansible_collections.community.dns.plugins.modules import wait_for_txt
from ..module_utils.resolver_helper import (
mock_resolver,
mock_query_udp,
create_mock_answer,
create_mock_response,
)
# We need dnspython
dns = pytest.importorskip('dns')
def mock_sleep(delay):
pass
def mock_monotonic(call_sequence):
def f():
assert len(call_sequence) > 0, 'monotonic() was called more often than expected'
value = call_sequence[0]
del call_sequence[0]
return value
return f
class TestWaitForTXT(ModuleTestCase):
def test_single(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
{
'target': 'ns.example.org',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '172.16.17.32'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.org'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
('172.16.17.32', ): [
{
'target': dns.name.from_unicode(u'example.org'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
), dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'example.org')
)]),
},
{
'query_target': dns.name.from_unicode(u'org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.org'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.org'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
]
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf'],
'ns.example.org': ['asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 1
def test_double(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'mail.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"any bar"'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
'mode': 'equals',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
'timeout': 10,
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 2
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': ['any bar'],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_subset(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'as df'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"foo bar"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"foo bar"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"another one"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'as df'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'asdf',
'asdf',
'foo bar',
],
'mode': 'subset',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['foo bar', 'another one', 'asdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
def test_superset(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf ""'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
'bee',
],
'mode': 'superset',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 2
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdf', 'bee'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': [],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_superset_not_empty(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bee'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'bumble',
'bee',
],
'mode': 'superset_not_empty',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['bumble', 'bee'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_equals(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'bumble bee'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'foo',
'bumble bee',
'wizard',
],
'mode': 'equals',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['bumble bee', 'wizard', 'foo'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_equals_ordered(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
)),
},
{
'target': dns.name.from_unicode(u'example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'foo'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"bumble bee"'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'wizard'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleExitJson) as exc:
set_module_args({
'records': [
{
'name': 'example.com',
'values': [
'foo',
'bumble bee',
'wizard',
],
'mode': 'equals_ordered',
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['changed'] is False
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'example.com'
assert exc.value.args[0]['records'][0]['done'] is True
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['foo', 'bumble bee', 'wizard'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 4
def test_timeout(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
],
('192.168.127.12', ): [
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
)),
},
{
'target': dns.name.from_unicode(u'mail.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'mail.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, '"any bar"'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'fdsa'),
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdf'),
)),
},
{
'target': dns.name.from_unicode(u'www.example.com'),
'rdtype': dns.rdatatype.TXT,
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'www.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.TXT, 'asdfasdf'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
{
'query_target': dns.name.from_unicode(u'mail.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, authority=[dns.rrset.from_rdata(
'mail.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with patch('ansible_collections.community.dns.plugins.modules.wait_for_txt.monotonic',
mock_monotonic([0, 0.01, 1.2, 6.013, 7.41, 12.021])):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
'mode': 'equals',
},
{
'name': 'mail.example.com',
'values': [
'foo bar',
'any bar',
],
'mode': 'superset',
},
],
'timeout': 12,
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Timeout (1 out of 2 check(s) passed).'
assert exc.value.args[0]['completed'] == 1
assert len(exc.value.args[0]['records']) == 2
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert exc.value.args[0]['records'][0]['values'] == {
'ns.example.com': ['asdfasdf'],
}
assert exc.value.args[0]['records'][0]['check_count'] == 3
assert exc.value.args[0]['records'][1]['name'] == 'mail.example.com'
assert exc.value.args[0]['records'][1]['done'] is True
assert exc.value.args[0]['records'][1]['values'] == {
'ns.example.com': ['any bar'],
}
assert exc.value.args[0]['records'][1]['check_count'] == 1
def test_nxdomain(self):
resolver = mock_resolver(['1.1.1.1'], {})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NXDOMAIN),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected DNS error: The DNS query name does not exist: com.'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0
def test_servfail(self):
resolver = mock_resolver(['1.1.1.1'], {})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.SERVFAIL),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected resolving error: Error SERVFAIL'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0
def test_cname_loop(self):
resolver = mock_resolver(['1.1.1.1'], {
('1.1.1.1', ): [
{
'target': 'ns.example.com',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.com',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '192.168.127.12'),
)),
},
{
'target': 'ns.example.org',
'lifetime': 10,
'result': create_mock_answer(dns.rrset.from_rdata(
'ns.example.org',
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A, '172.16.17.32'),
)),
},
],
})
udp_sequence = [
{
'query_target': dns.name.from_unicode(u'com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.com'),
)]),
},
{
'query_target': dns.name.from_unicode(u'www.example.com'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA, 'ns.example.com. ns.example.com. 12345 7200 120 2419200 10800'),
), dns.rrset.from_rdata(
'www.example.com',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'example.org')
)]),
},
{
'query_target': dns.name.from_unicode(u'org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.org'),
)]),
},
{
'query_target': dns.name.from_unicode(u'example.org'),
'query_type': dns.rdatatype.NS,
'nameserver': '1.1.1.1',
'kwargs': {
'timeout': 10,
},
'result': create_mock_response(dns.rcode.NOERROR, answer=[dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS, 'ns.example.org'),
), dns.rrset.from_rdata(
'example.org',
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.example.com')
)]),
},
]
with patch('dns.resolver.get_default_resolver', resolver):
with patch('dns.resolver.Resolver', resolver):
with patch('dns.query.udp', mock_query_udp(udp_sequence)):
with patch('time.sleep', mock_sleep):
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({
'records': [
{
'name': 'www.example.com',
'values': [
'asdf',
],
},
],
})
wait_for_txt.main()
print(exc.value.args[0])
assert exc.value.args[0]['msg'] == 'Unexpected resolving error: Found CNAME loop starting at www.example.com'
assert exc.value.args[0]['completed'] == 0
assert len(exc.value.args[0]['records']) == 1
assert exc.value.args[0]['records'][0]['name'] == 'www.example.com'
assert exc.value.args[0]['records'][0]['done'] is False
assert 'values' not in exc.value.args[0]['records'][0]
assert exc.value.args[0]['records'][0]['check_count'] == 0 | 0.630002 | 0.22482 |
import codecs
import re
from codecs import StreamReaderWriter
from typing import Optional
from logging2.handlers.abc import Handler
from logging2.levels import LogLevel
# NOTE: This module does not provide handlers for rotating log files. The rationale behind that is that all *NIX systems
# have software specifically designed to do that, and it's much faster and reliable. Let's separate
# concerns here: this logging software is meant to be both Pythonic and fast. There's nothing Pythonic or fast about
# reinventing the wheel. A great utility is ``logrotate``, which is available for Debian, Red Hat, and BSD systems.
# Linux Manpage: https://linux.die.net/man/8/logrotate
# FreeBSD Manpage: https://www.freebsd.org/cgi/man.cgi?query=logrotate&manpath=SuSE+Linux/i386+11.3
class FileHandler(Handler):
"""A type of ``Handler`` that writes messages to a file on the local system
"""
def __init__(
self,
file_path: str,
mode: Optional[str] = "a",
encoding: Optional[str] = "utf8",
errors: Optional[str] = "strict",
buffering: Optional[int] = 1,
name: Optional[str] = None,
level: Optional[LogLevel] = None,
):
"""Instantiates a new ``FileHandler``
:param file_path: the path (full or relative) to the log file
:param mode: the file mode
:param encoding: the file encoding
:param errors: how should errors be handled
:param buffering: should the line be buffered
:param name: the name of the handler
:param level: the minimum level of verbosity/priority of the messages this will log
"""
self.fh: StreamReaderWriter = codecs.open(
file_path, mode=mode, encoding=encoding, errors=errors, buffering=buffering
)
super().__init__(name=name, level=level)
self.encoding: str = encoding
def write(self, message: str, level: LogLevel) -> None:
"""Writes the full log entry to the configured file
:param message: the entire message to be written, full formatted
:param level: the priority level of the message
"""
if level >= self.min_level:
self.fh.write(bytes(message, self.encoding).decode(self.encoding))
self.fh.flush()
def _create_name(self) -> str:
"""Creates the name for the handler - called from ``__init__`` if a name is not given.
:returns: the name of the file
"""
fname = self.fh.name.split("/")[-1]
return re.sub("[^\w.]", "", str(fname)) | logging2/handlers/files.py | import codecs
import re
from codecs import StreamReaderWriter
from typing import Optional
from logging2.handlers.abc import Handler
from logging2.levels import LogLevel
# NOTE: This module does not provide handlers for rotating log files. The rationale behind that is that all *NIX systems
# have software specifically designed to do that, and it's much faster and reliable. Let's separate
# concerns here: this logging software is meant to be both Pythonic and fast. There's nothing Pythonic or fast about
# reinventing the wheel. A great utility is ``logrotate``, which is available for Debian, Red Hat, and BSD systems.
# Linux Manpage: https://linux.die.net/man/8/logrotate
# FreeBSD Manpage: https://www.freebsd.org/cgi/man.cgi?query=logrotate&manpath=SuSE+Linux/i386+11.3
class FileHandler(Handler):
"""A type of ``Handler`` that writes messages to a file on the local system
"""
def __init__(
self,
file_path: str,
mode: Optional[str] = "a",
encoding: Optional[str] = "utf8",
errors: Optional[str] = "strict",
buffering: Optional[int] = 1,
name: Optional[str] = None,
level: Optional[LogLevel] = None,
):
"""Instantiates a new ``FileHandler``
:param file_path: the path (full or relative) to the log file
:param mode: the file mode
:param encoding: the file encoding
:param errors: how should errors be handled
:param buffering: should the line be buffered
:param name: the name of the handler
:param level: the minimum level of verbosity/priority of the messages this will log
"""
self.fh: StreamReaderWriter = codecs.open(
file_path, mode=mode, encoding=encoding, errors=errors, buffering=buffering
)
super().__init__(name=name, level=level)
self.encoding: str = encoding
def write(self, message: str, level: LogLevel) -> None:
"""Writes the full log entry to the configured file
:param message: the entire message to be written, full formatted
:param level: the priority level of the message
"""
if level >= self.min_level:
self.fh.write(bytes(message, self.encoding).decode(self.encoding))
self.fh.flush()
def _create_name(self) -> str:
"""Creates the name for the handler - called from ``__init__`` if a name is not given.
:returns: the name of the file
"""
fname = self.fh.name.split("/")[-1]
return re.sub("[^\w.]", "", str(fname)) | 0.800224 | 0.234341 |
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from hdrbp._util import (
basic_repr,
basic_str,
compute_correlation,
compute_diversification_ratio,
compute_drawdowns,
compute_gini,
compute_prices,
compute_risk_contributions,
compute_turnover,
compute_variance,
count_dates_per_year,
count_years,
)
logger = logging.getLogger(__name__)
@basic_str
@basic_repr
class MetricCalculator(ABC):
@property
def name(self):
return repr(self)
@abstractmethod
def calculate(self, result: pd.DataFrame) -> float:
pass
class GeometricMeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
log_returns = np.log1p(returns)
mean_log_return = np.mean(log_returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
mean_log_return = dates_per_year * mean_log_return
geometric_mean_return = np.expm1(mean_log_return)
return geometric_mean_return
class MeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
mean_return = np.mean(returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
mean_return = dates_per_year * mean_return
return mean_return
class Volatility(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
volatility = np.std(returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
volatility = np.sqrt(dates_per_year) * volatility
return volatility
class SharpeRatio(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
mean_return = MeanReturn(self._annualized).calculate(result)
volatility = Volatility(self._annualized).calculate(result)
shape_ratio = mean_return / volatility
return shape_ratio
class MeanTurnover(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
result = _filter_rebalance_dates(result)
turnovers = result.apply(
lambda df: compute_turnover(
df["before_rebalance_assets"],
df["before_rebalance_weights"],
df["assets"],
df["weights"],
),
axis="columns",
)
turnovers = turnovers.values
if self._annualized:
dates = pd.to_datetime(result["date"].values)
year_count = count_years(dates)
mean_turnover = np.sum(turnovers) / year_count
else:
mean_turnover = np.mean(turnovers)
return mean_turnover
class MaxDrawdown(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
prices = compute_prices(returns)
drawdowns = compute_drawdowns(prices)
max_drawdown = np.max(drawdowns)
return max_drawdown
class ValueAtRisk(MetricCalculator):
def __init__(self, probability: float = 0.95, annualized: bool = False) -> None:
self._probability = probability
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
value_at_risk = np.quantile(returns, 1 - self._probability)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
value_at_risk = np.sqrt(dates_per_year) * value_at_risk
return value_at_risk
class ExpectedShortfall(MetricCalculator):
def __init__(self, probability: float = 0.95, annualized: bool = False) -> None:
self._probability = probability
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
cut_off = np.quantile(returns, 1 - self._probability)
cut_off_returns = returns[returns <= cut_off]
expected_shortfall = np.mean(cut_off_returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
expected_shortfall = np.sqrt(dates_per_year) * expected_shortfall
return expected_shortfall
class MeanWeightGini(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
weights = result["weights"]
weights_gini = weights.apply(compute_gini)
weights_gini = weights_gini.values
mean_weight_gini = np.mean(weights_gini)
return mean_weight_gini
class MeanRiskContributionGini(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
risk_contributions = result.apply(
lambda df: compute_risk_contributions(df["covariances"], df["weights"]),
axis="columns",
)
risk_contributions_gini = risk_contributions.apply(compute_gini)
risk_contributions_gini = risk_contributions_gini.values
mean_risk_contribution_gini = np.mean(risk_contributions_gini)
return mean_risk_contribution_gini
class MeanVariance(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
variances = result.apply(
lambda df: compute_variance(df["covariances"], df["weights"]),
axis="columns",
)
variances = variances.values
mean_variance = np.mean(variances)
return mean_variance
class MeanCorrelation(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
correlations = result.apply(
lambda df: compute_correlation(df["covariances"], df["weights"]),
axis="columns",
)
correlations = correlations.values
mean_correlation = np.mean(correlations)
return mean_correlation
class MeanDiversificationRatio(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
diversification_ratios = result.apply(
lambda df: compute_diversification_ratio(df["covariances"], df["weights"]),
axis="columns",
)
diversification_ratios = diversification_ratios.values
mean_diversification_ratio = np.mean(diversification_ratios)
return mean_diversification_ratio
def _filter_valid_returns(result):
is_valid_returns = result["return"].notna()
filtered_result = result[is_valid_returns]
return filtered_result
def _filter_rebalance_dates(result):
is_rebalance = result["is_rebalance"].values
filtered_result = result[is_rebalance]
return filtered_result
def calculate_group_metrics(result: pd.DataFrame, calculators: list[MetricCalculator]) -> pd.Series:
covariance_estimator = result["covariance_estimator"].values[0]
weight_optimizer = result["weight_optimizer"].values[0]
logger.debug(
f"Backtester: Calculating metrics of group "
f"{covariance_estimator=}"
f" and "
f"{weight_optimizer=}"
)
metrics = {}
for calculator in calculators:
name = calculator.name
metric = calculator.calculate(result)
metrics.update({name: metric})
metrics = pd.Series(metrics)
return metrics | hdrbp/metric.py | import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from hdrbp._util import (
basic_repr,
basic_str,
compute_correlation,
compute_diversification_ratio,
compute_drawdowns,
compute_gini,
compute_prices,
compute_risk_contributions,
compute_turnover,
compute_variance,
count_dates_per_year,
count_years,
)
logger = logging.getLogger(__name__)
@basic_str
@basic_repr
class MetricCalculator(ABC):
@property
def name(self):
return repr(self)
@abstractmethod
def calculate(self, result: pd.DataFrame) -> float:
pass
class GeometricMeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
log_returns = np.log1p(returns)
mean_log_return = np.mean(log_returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
mean_log_return = dates_per_year * mean_log_return
geometric_mean_return = np.expm1(mean_log_return)
return geometric_mean_return
class MeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
mean_return = np.mean(returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
mean_return = dates_per_year * mean_return
return mean_return
class Volatility(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
volatility = np.std(returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
volatility = np.sqrt(dates_per_year) * volatility
return volatility
class SharpeRatio(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
mean_return = MeanReturn(self._annualized).calculate(result)
volatility = Volatility(self._annualized).calculate(result)
shape_ratio = mean_return / volatility
return shape_ratio
class MeanTurnover(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
result = _filter_rebalance_dates(result)
turnovers = result.apply(
lambda df: compute_turnover(
df["before_rebalance_assets"],
df["before_rebalance_weights"],
df["assets"],
df["weights"],
),
axis="columns",
)
turnovers = turnovers.values
if self._annualized:
dates = pd.to_datetime(result["date"].values)
year_count = count_years(dates)
mean_turnover = np.sum(turnovers) / year_count
else:
mean_turnover = np.mean(turnovers)
return mean_turnover
class MaxDrawdown(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
prices = compute_prices(returns)
drawdowns = compute_drawdowns(prices)
max_drawdown = np.max(drawdowns)
return max_drawdown
class ValueAtRisk(MetricCalculator):
def __init__(self, probability: float = 0.95, annualized: bool = False) -> None:
self._probability = probability
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
value_at_risk = np.quantile(returns, 1 - self._probability)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
value_at_risk = np.sqrt(dates_per_year) * value_at_risk
return value_at_risk
class ExpectedShortfall(MetricCalculator):
def __init__(self, probability: float = 0.95, annualized: bool = False) -> None:
self._probability = probability
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
cut_off = np.quantile(returns, 1 - self._probability)
cut_off_returns = returns[returns <= cut_off]
expected_shortfall = np.mean(cut_off_returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
expected_shortfall = np.sqrt(dates_per_year) * expected_shortfall
return expected_shortfall
class MeanWeightGini(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
weights = result["weights"]
weights_gini = weights.apply(compute_gini)
weights_gini = weights_gini.values
mean_weight_gini = np.mean(weights_gini)
return mean_weight_gini
class MeanRiskContributionGini(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
risk_contributions = result.apply(
lambda df: compute_risk_contributions(df["covariances"], df["weights"]),
axis="columns",
)
risk_contributions_gini = risk_contributions.apply(compute_gini)
risk_contributions_gini = risk_contributions_gini.values
mean_risk_contribution_gini = np.mean(risk_contributions_gini)
return mean_risk_contribution_gini
class MeanVariance(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
variances = result.apply(
lambda df: compute_variance(df["covariances"], df["weights"]),
axis="columns",
)
variances = variances.values
mean_variance = np.mean(variances)
return mean_variance
class MeanCorrelation(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
correlations = result.apply(
lambda df: compute_correlation(df["covariances"], df["weights"]),
axis="columns",
)
correlations = correlations.values
mean_correlation = np.mean(correlations)
return mean_correlation
class MeanDiversificationRatio(MetricCalculator):
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_rebalance_dates(result)
diversification_ratios = result.apply(
lambda df: compute_diversification_ratio(df["covariances"], df["weights"]),
axis="columns",
)
diversification_ratios = diversification_ratios.values
mean_diversification_ratio = np.mean(diversification_ratios)
return mean_diversification_ratio
def _filter_valid_returns(result):
is_valid_returns = result["return"].notna()
filtered_result = result[is_valid_returns]
return filtered_result
def _filter_rebalance_dates(result):
is_rebalance = result["is_rebalance"].values
filtered_result = result[is_rebalance]
return filtered_result
def calculate_group_metrics(result: pd.DataFrame, calculators: list[MetricCalculator]) -> pd.Series:
covariance_estimator = result["covariance_estimator"].values[0]
weight_optimizer = result["weight_optimizer"].values[0]
logger.debug(
f"Backtester: Calculating metrics of group "
f"{covariance_estimator=}"
f" and "
f"{weight_optimizer=}"
)
metrics = {}
for calculator in calculators:
name = calculator.name
metric = calculator.calculate(result)
metrics.update({name: metric})
metrics = pd.Series(metrics)
return metrics | 0.895543 | 0.449876 |
from datetime import datetime
from typing import Any, Dict
from core.forms import GameForm
from core.test.tests_helpers import create_game, create_platform
from django.core.exceptions import ValidationError
from django.test import TestCase
class GameFormTests(TestCase):
def setUp(self) -> None:
self.platform_1 = create_platform()
def test_game_needs_at_least_one_platform(self) -> None:
game_data = {
"name": "a unique name",
"platforms": [],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
game_data["platforms"] = [self.platform_1]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
def test_game_name_is_unique(self) -> None:
game_data = {
"name": "a unique name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
create_game(platforms=game_data["platforms"], name=game_data["name"])
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("name" in game_form.errors.keys())
def test_game_name_uniqueness_is_case_insensitive(self) -> None:
game_data = {
"name": "A Case Sensitive Unique Name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
create_game(platforms=game_data["platforms"], name=game_data["name"])
game_data["name"] = game_data["name"].lower()
game_form = GameForm(game_data)
with self.assertRaises(ValidationError) as error:
game_form.is_valid()
self.assertTrue("already exists" in str(error.exception))
def test_game_dlc_needs_parent_game(self) -> None:
game_1 = create_game(platforms=[self.platform_1])
game_data = {
"name": "an irrelevant name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": None,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("parent_game" in game_form.errors.keys())
self.assertTrue("must specify a parent game" in game_form.errors["parent_game"][0])
game_data["parent_game"] = game_1.id
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
def test_game_dlc_parent_cannot_be_also_a_dlc(self) -> None:
game_1 = create_game(platforms=[self.platform_1])
game_1_dlc = create_game(platforms=[self.platform_1], dlc_or_expansion=True, parent_game=game_1.id)
game_data = {
"name": "an irrelevant name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": game_1_dlc.id,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("parent_game" in game_form.errors.keys())
self.assertTrue("cannot have as parent another game DLC" in game_form.errors["parent_game"][0])
def test_game_dlc_platform_must_be_subset_of_parent_game(self) -> None:
platform_2 = create_platform()
platform_3 = create_platform()
game_1 = create_game(platforms=[self.platform_1, platform_2])
# subset = superset
game_data = {
"name": "an irrelevant name",
"platforms": (platform.id for platform in game_1.platforms.all()),
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": game_1.id,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
# subset < superset
game_data["platforms"] = [self.platform_1]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
# subset != superset
game_data["platforms"] = [self.platform_1, platform_3]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("platforms" in game_form.errors.keys())
self.assertTrue("subset/all of parent game platforms" in game_form.errors["platforms"][0])
game_data["platforms"] = [platform_3]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("platforms" in game_form.errors.keys())
self.assertTrue("subset/all of parent game platforms" in game_form.errors["platforms"][0]) | finishedgames/core/test/test_game_form.py | from datetime import datetime
from typing import Any, Dict
from core.forms import GameForm
from core.test.tests_helpers import create_game, create_platform
from django.core.exceptions import ValidationError
from django.test import TestCase
class GameFormTests(TestCase):
def setUp(self) -> None:
self.platform_1 = create_platform()
def test_game_needs_at_least_one_platform(self) -> None:
game_data = {
"name": "a unique name",
"platforms": [],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
game_data["platforms"] = [self.platform_1]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
def test_game_name_is_unique(self) -> None:
game_data = {
"name": "a unique name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
create_game(platforms=game_data["platforms"], name=game_data["name"])
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("name" in game_form.errors.keys())
def test_game_name_uniqueness_is_case_insensitive(self) -> None:
game_data = {
"name": "A Case Sensitive Unique Name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
} # type: Dict[str, Any]
create_game(platforms=game_data["platforms"], name=game_data["name"])
game_data["name"] = game_data["name"].lower()
game_form = GameForm(game_data)
with self.assertRaises(ValidationError) as error:
game_form.is_valid()
self.assertTrue("already exists" in str(error.exception))
def test_game_dlc_needs_parent_game(self) -> None:
game_1 = create_game(platforms=[self.platform_1])
game_data = {
"name": "an irrelevant name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": None,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("parent_game" in game_form.errors.keys())
self.assertTrue("must specify a parent game" in game_form.errors["parent_game"][0])
game_data["parent_game"] = game_1.id
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
def test_game_dlc_parent_cannot_be_also_a_dlc(self) -> None:
game_1 = create_game(platforms=[self.platform_1])
game_1_dlc = create_game(platforms=[self.platform_1], dlc_or_expansion=True, parent_game=game_1.id)
game_data = {
"name": "an irrelevant name",
"platforms": [self.platform_1.id],
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": game_1_dlc.id,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("parent_game" in game_form.errors.keys())
self.assertTrue("cannot have as parent another game DLC" in game_form.errors["parent_game"][0])
def test_game_dlc_platform_must_be_subset_of_parent_game(self) -> None:
platform_2 = create_platform()
platform_3 = create_platform()
game_1 = create_game(platforms=[self.platform_1, platform_2])
# subset = superset
game_data = {
"name": "an irrelevant name",
"platforms": (platform.id for platform in game_1.platforms.all()),
"publish_date": datetime.now().year,
"dlc_or_expansion": True,
"parent_game": game_1.id,
} # type: Dict[str, Any]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
# subset < superset
game_data["platforms"] = [self.platform_1]
game_form = GameForm(game_data)
self.assertTrue(game_form.is_valid(), game_form.errors)
# subset != superset
game_data["platforms"] = [self.platform_1, platform_3]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("platforms" in game_form.errors.keys())
self.assertTrue("subset/all of parent game platforms" in game_form.errors["platforms"][0])
game_data["platforms"] = [platform_3]
game_form = GameForm(game_data)
self.assertFalse(game_form.is_valid())
self.assertTrue("platforms" in game_form.errors.keys())
self.assertTrue("subset/all of parent game platforms" in game_form.errors["platforms"][0]) | 0.651244 | 0.454048 |
import os
import random
import en_core_web_sm
import stringx
import tensorflow as tf
import tensorflow.logging as log
from tensorflow.python.lib.io import file_io
nlp = en_core_web_sm.load()
# acceptable ways to end a sentence
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', ")"]
STOPLIST = frozenset(['@highlight'])
def __int64_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def __bytes_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def __float_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def __is_stopword(s):
return s in STOPLIST
def contains_number(s):
for c in s:
if c.isdigit():
return True
return False
def __fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if '@highlight' in line:
return line
if line == '':
return line
for et in END_TOKENS:
if line.endswith(et):
return line
return line + ' .'
def tokenize(s):
res = []
doc = nlp(s)
for token in doc:
t = token.text.strip().lower()
if t == '' or __is_stopword(t):
continue
res.append(t)
return res
def preprocess(s):
s = stringx.to_str(s)
sep = '\n'
lines = s.split(sep)
ls = []
for line in lines:
line = line.strip()
line = stringx.to_ascii_str(line)
# fix missing period must come after to_ascii conversion
# because some punctuation falls outside ascii e.g. latex
line = __fix_missing_period(line)
ls.append(line)
return tokenize(sep.join(ls))
def split_train_val_test(paths, train_size=0.7, test_size=0.1, shuffle=True):
if shuffle:
random.shuffle(paths)
_len = len(paths)
if train_size < 1:
train_size = max(int(train_size * _len), 1)
if test_size < 1:
test_size = max(int(test_size * _len), 1)
val_size = _len - train_size - test_size
log.info('train_size={}, val_size={}, test_size={}'.format(repr(train_size), repr(val_size), repr(test_size)))
train = set(paths[:train_size])
val = set(paths[train_size:train_size + val_size])
test = set(paths[-test_size:])
intersect = train.intersection(val).intersection(test)
if len(intersect) != 0:
raise ValueError('intersect of train,val,test sets should be empty')
return train, val, test
def article_example(article, abstract):
article = stringx.to_bytes(article)
abstract = stringx.to_bytes(abstract)
return tf.train.Example(features=tf.train.Features(feature={
'article': __bytes_feature(article),
'abstract': __bytes_feature(abstract)
}))
def __parse_proto(example_proto):
features = {
'article': tf.FixedLenFeature((), tf.string, default_value=''),
'abstract': tf.FixedLenFeature((), tf.string, default_value='')
}
parsed = tf.parse_single_example(example_proto, features)
return parsed['article'], parsed['abstract']
def __preprocess_article_and_abstract(article, abstract):
sep = ' '
return sep.join(preprocess(article)), sep.join(preprocess(abstract))
def dataset(data_path, batch_size=1, shuffle=False, repeat=False):
names = file_io.list_directory(data_path)
_paths = []
for name in names:
_paths.append(os.path.join(data_path, name))
ds = tf.data.TFRecordDataset(_paths)
ds = ds.map(__parse_proto)
ds = ds.map(
lambda article, abstract: tuple(tf.py_func(
__preprocess_article_and_abstract,
[article, abstract],
[tf.string, tf.string],
name='preprocess_article_and_abstract'
)))
if shuffle:
ds = ds.shuffle(buffer_size=100)
ds = ds.batch(batch_size, drop_remainder=True)
if repeat:
ds = ds.repeat()
return ds | trainer/etl.py | import os
import random
import en_core_web_sm
import stringx
import tensorflow as tf
import tensorflow.logging as log
from tensorflow.python.lib.io import file_io
nlp = en_core_web_sm.load()
# acceptable ways to end a sentence
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', ")"]
STOPLIST = frozenset(['@highlight'])
def __int64_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def __bytes_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def __float_feature(value):
value = value if type(value) == list else [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def __is_stopword(s):
return s in STOPLIST
def contains_number(s):
for c in s:
if c.isdigit():
return True
return False
def __fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if '@highlight' in line:
return line
if line == '':
return line
for et in END_TOKENS:
if line.endswith(et):
return line
return line + ' .'
def tokenize(s):
res = []
doc = nlp(s)
for token in doc:
t = token.text.strip().lower()
if t == '' or __is_stopword(t):
continue
res.append(t)
return res
def preprocess(s):
s = stringx.to_str(s)
sep = '\n'
lines = s.split(sep)
ls = []
for line in lines:
line = line.strip()
line = stringx.to_ascii_str(line)
# fix missing period must come after to_ascii conversion
# because some punctuation falls outside ascii e.g. latex
line = __fix_missing_period(line)
ls.append(line)
return tokenize(sep.join(ls))
def split_train_val_test(paths, train_size=0.7, test_size=0.1, shuffle=True):
if shuffle:
random.shuffle(paths)
_len = len(paths)
if train_size < 1:
train_size = max(int(train_size * _len), 1)
if test_size < 1:
test_size = max(int(test_size * _len), 1)
val_size = _len - train_size - test_size
log.info('train_size={}, val_size={}, test_size={}'.format(repr(train_size), repr(val_size), repr(test_size)))
train = set(paths[:train_size])
val = set(paths[train_size:train_size + val_size])
test = set(paths[-test_size:])
intersect = train.intersection(val).intersection(test)
if len(intersect) != 0:
raise ValueError('intersect of train,val,test sets should be empty')
return train, val, test
def article_example(article, abstract):
article = stringx.to_bytes(article)
abstract = stringx.to_bytes(abstract)
return tf.train.Example(features=tf.train.Features(feature={
'article': __bytes_feature(article),
'abstract': __bytes_feature(abstract)
}))
def __parse_proto(example_proto):
features = {
'article': tf.FixedLenFeature((), tf.string, default_value=''),
'abstract': tf.FixedLenFeature((), tf.string, default_value='')
}
parsed = tf.parse_single_example(example_proto, features)
return parsed['article'], parsed['abstract']
def __preprocess_article_and_abstract(article, abstract):
sep = ' '
return sep.join(preprocess(article)), sep.join(preprocess(abstract))
def dataset(data_path, batch_size=1, shuffle=False, repeat=False):
names = file_io.list_directory(data_path)
_paths = []
for name in names:
_paths.append(os.path.join(data_path, name))
ds = tf.data.TFRecordDataset(_paths)
ds = ds.map(__parse_proto)
ds = ds.map(
lambda article, abstract: tuple(tf.py_func(
__preprocess_article_and_abstract,
[article, abstract],
[tf.string, tf.string],
name='preprocess_article_and_abstract'
)))
if shuffle:
ds = ds.shuffle(buffer_size=100)
ds = ds.batch(batch_size, drop_remainder=True)
if repeat:
ds = ds.repeat()
return ds | 0.529263 | 0.280145 |
from ctypes import *
from os import EX_CANTCREAT
import threading
import queue
import time
import copy
COM_OK = 0
COM_ERROR = 1
COM_ABORT = 2
COM_TIMEOUT = 3
MASTER_BROADCAST=0x07FF
MASTER_P2P_MASK =0x0400
RX_FILTER_MASK_ALL=0xFFFFFFFF
RX_FILTER_MASK_ONE=0x00000000
RX_VERBOSE = 1
class VCI_INIT_CONFIG(Structure):
_fields_ = [("AccCode", c_uint),
("AccMask", c_uint),
("Reserved", c_uint),
("Filter", c_ubyte),
("Timing0", c_ubyte),
("Timing1", c_ubyte),
("Mode", c_ubyte)
]
class VCI_CAN_OBJ(Structure):
_fields_ = [("ID", c_uint),
("TimeStamp", c_uint),
("TimeFlag", c_ubyte),
("SendType", c_ubyte),
("RemoteFlag", c_ubyte),
("ExternFlag", c_ubyte),
("DataLen", c_ubyte),
("Data", c_ubyte*8),
("Reserved", c_ubyte*3)
]
#two channels could be used simultaneously
class USB_CAN:
def __init__(self, CAN_ID=MASTER_BROADCAST, baud=1000000):
self.VCI_USBCAN2 = 4
self.STATUS_OK = 1
self.canDLL = cdll.LoadLibrary('./libcontrolcan.so')
self.CAN_ID = CAN_ID
self.channelStatus = [0, 0]
self.RX_FILTER_TYPE_ALL = 0
self.RX_FILTER_TYPE_ONE = 1
self.RxFilType = self.RX_FILTER_TYPE_ALL
self.RxNodeID = 0
self.receiving_alive=0
self.keyboard_alive=0
self.threads=[]
self.kbdQueue = queue.Queue()
self.rxBytesQueue = queue.Queue()
self.rxFrameQueue = queue.PriorityQueue()
self.rxTimeout=200
self.latestRxData=bytearray()
self.latestRxDataLen=0
rx_vci_can_obj_type = VCI_CAN_OBJ*2500
self.rx_vci_can_obj = rx_vci_can_obj_type()
for rxobj in self.rx_vci_can_obj:
rxobj.TimeFlag=1
self.tx_vci_can_obj = VCI_CAN_OBJ()
ret = self.canDLL.VCI_OpenDevice(self.VCI_USBCAN2, 0, 0)
if ret == self.STATUS_OK:
print('[INFO]Open USB_CAN Device successful')
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 0)
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 1)
else:
print('[INFO]Open USB_CAN Device failed')
def open(self, chn=0,filterType=0,rxID=0):
self.RxNodeID = rxID
self.RxFilType = filterType
#determine filter parameters from filter Type and nodeID
filterAcc = self.RxNodeID<<21
if(self.RxFilType == 0):
filterMask = RX_FILTER_MASK_ALL
else:
filterMask = RX_FILTER_MASK_ONE
#Init CAN channel
self.vci_initconfig = VCI_INIT_CONFIG(
filterAcc, filterMask, 0, 0, 0x00, 0x14, 0) # 1M baudrate, 87.5%, normal mode, all ID acceptable
ret = self.canDLL.VCI_InitCAN(
self.VCI_USBCAN2, 0, chn, byref(self.vci_initconfig))
if ret != self.STATUS_OK:
print('[INFO]Init CAN Channel {} fail'.format(chn))
#Start CAN channel
ret = self.canDLL.VCI_StartCAN(self.VCI_USBCAN2, 0, chn)
if ret == self.STATUS_OK:
self.channelStatus[chn] = 1
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, chn)
else:
print('[INFO]Start CAN Channel {} fail'.format(chn))
return ret
def setCANID(self,canID=MASTER_BROADCAST):
self.CAN_ID = canID
def start_keyboard(self):
self.keyboard_alive = 1
# print("[INFO]Keyboard Input Enabled")
self.keyboardThread = threading.Thread(target=self.keyboard_thread)
self.threads.append(self.keyboardThread)
self.keyboardThread.start()
def getInput(self):
return self.kbdQueue.get()
def keyboard_thread(self):
while(self.keyboard_alive==1):
try:
input_str = input()
if (len(input_str) > 0):
self.kbdQueue.put(input_str)
except:
print("[INFO]quit keyboard thread")
break
def start_receiving(self, chn=0):
self.receiving_alive=1
# print("[INFO]Receiving thread started")
self.receivingThread = threading.Thread(target=self.receiving_thread,args=(chn,))
self.threads.append(self.receivingThread)
self.receivingThread.start()
def receiving_thread(self, chn=0):
if(self.channelStatus[chn] == 1):
while(self.receiving_alive):
rxNB=0
while rxNB <= 0 and self.receiving_alive:
rxNB = self.canDLL.VCI_Receive(self.VCI_USBCAN2, 0, chn, byref(self.rx_vci_can_obj), 2500, 0)
#temp_rx_vci_can_obj= self.rx_vci_can_obj[:rxNB]
#keep this block fast, otherwise the received data will be errupted
dlc=bytearray()
for i in range(rxNB):
dlc.extend(bytearray(self.rx_vci_can_obj[i].Data[:self.rx_vci_can_obj[i].DataLen]))
for dlcbyte in dlc:
self.rxBytesQueue.put(dlcbyte)
print(dlc.decode('iso-8859-1'),end="")
rxNB=0
else:
print("[INFO]Rx Channel {} Not opened".format(chn))
def transmit(self,pdata,num,chn=0):
ret=COM_OK
frameNB=num//8
remBytesNB=num%8
pdataInd=0
for i in range(frameNB):
if(ret==COM_OK):
ret = self.transmit_Frame(pdata[pdataInd:],8,chn)
pdataInd+=8
time.sleep(0.0001)
if(ret==COM_OK and remBytesNB!=0):
ret = self.transmit_Frame(pdata[pdataInd:],remBytesNB,chn)
return ret
def transmit_Frame(self, frameData, dalalen, chn=0):
try:
ret = COM_OK
if(self.channelStatus[chn] == 1):
self.tx_vci_can_obj.ID=self.CAN_ID
self.tx_vci_can_obj.SendType=1
self.tx_vci_can_obj.DataLen=dalalen
for i in range(dalalen):
self.tx_vci_can_obj.Data[i]=frameData[i]
if(self.canDLL.VCI_Transmit(self.VCI_USBCAN2, 0, chn, byref(self.tx_vci_can_obj), 1)!=1):
ret=COM_ERROR
# else:
# print("[INFO]Tx frame ID:0x{:x}, Len {}".format(self.tx_vci_can_obj.ID,self.tx_vci_can_obj.DataLen))
else:
print("[INFO]Tx Error, Channel {} Not opened".format(chn))
return ret
except:
print("[INFO]Tx Frame timeout")
return COM_TIMEOUT
def clearRxBuffer(self):
self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 0)
self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 1)
while(self.rxBytesQueue.qsize()!=0):
self.rxBytesQueue.get()
def receive(self,pdata,num,chn=0):
dataInd=0
tstart=time.time()
try:
while(dataInd<num):
if(self.rxBytesQueue.qsize()!=0):
pdata[dataInd]=self.rxBytesQueue.get()
dataInd+=1
if(dataInd==num):
return COM_OK
if(time.time()-tstart>self.rxTimeout):
return COM_TIMEOUT
except:
return COM_ERROR
def close(self):
ret = self.canDLL.VCI_CloseDevice(self.VCI_USBCAN2, 0)
print("[INFO]CAN device closed")
self.receiving_alive=0
self.keyboard_alive=0
for threadOb in self.threads:
threadOb.join()
if __name__ == "__main__":
print("[INFO]This is a USB CAN Test program")
usbcan = USB_CAN()
usbcan.open(0)
usbcan.open(1)
try:
usbcan.start_receiving(0)
usbcan.start_keyboard()
except:
usbcan.close() | Host/usbCAN/usbCAN.py | from ctypes import *
from os import EX_CANTCREAT
import threading
import queue
import time
import copy
COM_OK = 0
COM_ERROR = 1
COM_ABORT = 2
COM_TIMEOUT = 3
MASTER_BROADCAST=0x07FF
MASTER_P2P_MASK =0x0400
RX_FILTER_MASK_ALL=0xFFFFFFFF
RX_FILTER_MASK_ONE=0x00000000
RX_VERBOSE = 1
class VCI_INIT_CONFIG(Structure):
_fields_ = [("AccCode", c_uint),
("AccMask", c_uint),
("Reserved", c_uint),
("Filter", c_ubyte),
("Timing0", c_ubyte),
("Timing1", c_ubyte),
("Mode", c_ubyte)
]
class VCI_CAN_OBJ(Structure):
_fields_ = [("ID", c_uint),
("TimeStamp", c_uint),
("TimeFlag", c_ubyte),
("SendType", c_ubyte),
("RemoteFlag", c_ubyte),
("ExternFlag", c_ubyte),
("DataLen", c_ubyte),
("Data", c_ubyte*8),
("Reserved", c_ubyte*3)
]
#two channels could be used simultaneously
class USB_CAN:
def __init__(self, CAN_ID=MASTER_BROADCAST, baud=1000000):
self.VCI_USBCAN2 = 4
self.STATUS_OK = 1
self.canDLL = cdll.LoadLibrary('./libcontrolcan.so')
self.CAN_ID = CAN_ID
self.channelStatus = [0, 0]
self.RX_FILTER_TYPE_ALL = 0
self.RX_FILTER_TYPE_ONE = 1
self.RxFilType = self.RX_FILTER_TYPE_ALL
self.RxNodeID = 0
self.receiving_alive=0
self.keyboard_alive=0
self.threads=[]
self.kbdQueue = queue.Queue()
self.rxBytesQueue = queue.Queue()
self.rxFrameQueue = queue.PriorityQueue()
self.rxTimeout=200
self.latestRxData=bytearray()
self.latestRxDataLen=0
rx_vci_can_obj_type = VCI_CAN_OBJ*2500
self.rx_vci_can_obj = rx_vci_can_obj_type()
for rxobj in self.rx_vci_can_obj:
rxobj.TimeFlag=1
self.tx_vci_can_obj = VCI_CAN_OBJ()
ret = self.canDLL.VCI_OpenDevice(self.VCI_USBCAN2, 0, 0)
if ret == self.STATUS_OK:
print('[INFO]Open USB_CAN Device successful')
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 0)
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 1)
else:
print('[INFO]Open USB_CAN Device failed')
def open(self, chn=0,filterType=0,rxID=0):
self.RxNodeID = rxID
self.RxFilType = filterType
#determine filter parameters from filter Type and nodeID
filterAcc = self.RxNodeID<<21
if(self.RxFilType == 0):
filterMask = RX_FILTER_MASK_ALL
else:
filterMask = RX_FILTER_MASK_ONE
#Init CAN channel
self.vci_initconfig = VCI_INIT_CONFIG(
filterAcc, filterMask, 0, 0, 0x00, 0x14, 0) # 1M baudrate, 87.5%, normal mode, all ID acceptable
ret = self.canDLL.VCI_InitCAN(
self.VCI_USBCAN2, 0, chn, byref(self.vci_initconfig))
if ret != self.STATUS_OK:
print('[INFO]Init CAN Channel {} fail'.format(chn))
#Start CAN channel
ret = self.canDLL.VCI_StartCAN(self.VCI_USBCAN2, 0, chn)
if ret == self.STATUS_OK:
self.channelStatus[chn] = 1
ret = self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, chn)
else:
print('[INFO]Start CAN Channel {} fail'.format(chn))
return ret
def setCANID(self,canID=MASTER_BROADCAST):
self.CAN_ID = canID
def start_keyboard(self):
self.keyboard_alive = 1
# print("[INFO]Keyboard Input Enabled")
self.keyboardThread = threading.Thread(target=self.keyboard_thread)
self.threads.append(self.keyboardThread)
self.keyboardThread.start()
def getInput(self):
return self.kbdQueue.get()
def keyboard_thread(self):
while(self.keyboard_alive==1):
try:
input_str = input()
if (len(input_str) > 0):
self.kbdQueue.put(input_str)
except:
print("[INFO]quit keyboard thread")
break
def start_receiving(self, chn=0):
self.receiving_alive=1
# print("[INFO]Receiving thread started")
self.receivingThread = threading.Thread(target=self.receiving_thread,args=(chn,))
self.threads.append(self.receivingThread)
self.receivingThread.start()
def receiving_thread(self, chn=0):
if(self.channelStatus[chn] == 1):
while(self.receiving_alive):
rxNB=0
while rxNB <= 0 and self.receiving_alive:
rxNB = self.canDLL.VCI_Receive(self.VCI_USBCAN2, 0, chn, byref(self.rx_vci_can_obj), 2500, 0)
#temp_rx_vci_can_obj= self.rx_vci_can_obj[:rxNB]
#keep this block fast, otherwise the received data will be errupted
dlc=bytearray()
for i in range(rxNB):
dlc.extend(bytearray(self.rx_vci_can_obj[i].Data[:self.rx_vci_can_obj[i].DataLen]))
for dlcbyte in dlc:
self.rxBytesQueue.put(dlcbyte)
print(dlc.decode('iso-8859-1'),end="")
rxNB=0
else:
print("[INFO]Rx Channel {} Not opened".format(chn))
def transmit(self,pdata,num,chn=0):
ret=COM_OK
frameNB=num//8
remBytesNB=num%8
pdataInd=0
for i in range(frameNB):
if(ret==COM_OK):
ret = self.transmit_Frame(pdata[pdataInd:],8,chn)
pdataInd+=8
time.sleep(0.0001)
if(ret==COM_OK and remBytesNB!=0):
ret = self.transmit_Frame(pdata[pdataInd:],remBytesNB,chn)
return ret
def transmit_Frame(self, frameData, dalalen, chn=0):
try:
ret = COM_OK
if(self.channelStatus[chn] == 1):
self.tx_vci_can_obj.ID=self.CAN_ID
self.tx_vci_can_obj.SendType=1
self.tx_vci_can_obj.DataLen=dalalen
for i in range(dalalen):
self.tx_vci_can_obj.Data[i]=frameData[i]
if(self.canDLL.VCI_Transmit(self.VCI_USBCAN2, 0, chn, byref(self.tx_vci_can_obj), 1)!=1):
ret=COM_ERROR
# else:
# print("[INFO]Tx frame ID:0x{:x}, Len {}".format(self.tx_vci_can_obj.ID,self.tx_vci_can_obj.DataLen))
else:
print("[INFO]Tx Error, Channel {} Not opened".format(chn))
return ret
except:
print("[INFO]Tx Frame timeout")
return COM_TIMEOUT
def clearRxBuffer(self):
self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 0)
self.canDLL.VCI_ClearBuffer(self.VCI_USBCAN2, 0, 1)
while(self.rxBytesQueue.qsize()!=0):
self.rxBytesQueue.get()
def receive(self,pdata,num,chn=0):
dataInd=0
tstart=time.time()
try:
while(dataInd<num):
if(self.rxBytesQueue.qsize()!=0):
pdata[dataInd]=self.rxBytesQueue.get()
dataInd+=1
if(dataInd==num):
return COM_OK
if(time.time()-tstart>self.rxTimeout):
return COM_TIMEOUT
except:
return COM_ERROR
def close(self):
ret = self.canDLL.VCI_CloseDevice(self.VCI_USBCAN2, 0)
print("[INFO]CAN device closed")
self.receiving_alive=0
self.keyboard_alive=0
for threadOb in self.threads:
threadOb.join()
if __name__ == "__main__":
print("[INFO]This is a USB CAN Test program")
usbcan = USB_CAN()
usbcan.open(0)
usbcan.open(1)
try:
usbcan.start_receiving(0)
usbcan.start_keyboard()
except:
usbcan.close() | 0.13102 | 0.100923 |
from flask_restful import Resource
import libs.http_status as status
import libs.json_response as response
from libs.validator import Validator, require_json
from managers.todo_manager import TodoManager
from pprint import pprint
manager = TodoManager()
class TodosResource(Resource):
def get(self):
data = manager.getTodos()
return response.response(data, status.HTTP_OK.get('code'))
@require_json
@Validator("todo_validator.TodoCreationSchema")
def post(self, data=None, errors=None):
if errors:
return response.error(errors, status.HTTP_BAD_REQUEST.get('code'))
try:
result = manager.createTodo(data)
if result:
return response.response(result, status.HTTP_CREATED.get('code'))
else:
return response.error(
status.HTTP_BAD_REQUEST.get('message'),
status.HTTP_BAD_REQUEST.get('code')
)
except ValueError as error:
return response.error(
str(error),
status.HTTP_BAD_REQUEST.get('code')
)
class TodoResource(Resource):
def get(self, todo_id):
manager = TodoManager()
data = manager.getTodoById(todo_id)
if data:
return response.response(data, status.HTTP_OK.get('code'))
else:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code'))
@require_json
@Validator("todo_validator.TodoUpdateSchema")
def patch(self, todo_id, data=None, errors=None):
if errors or not data:
return response.error(errors, status.HTTP_BAD_REQUEST.get('code'))
todo = manager.getTodoById(todo_id)
if not todo:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code'))
try:
result = manager.updateTodoById(todo_id, data)
if result:
return response.response(None, status.HTTP_NOTHING.get('code'))
else:
return response.error(
status.HTTP_BAD_REQUEST.get('message'),
status.HTTP_BAD_REQUEST.get('code')
)
except ValueError as error:
return response.error(
str(error),
status.HTTP_BAD_REQUEST.get('code')
)
def delete(self, todo_id):
manager = TodoManager()
data = manager.getTodoById(todo_id)
if data:
result = manager.deleteTodoById(todo_id)
if result:
return response.response(None, status.HTTP_NOTHING.get('code'))
else:
return response.error(
status.HTTP_INTERNAL_ERROR.get('message'),
status.HTTP_INTERNAL_ERROR.get('code')
)
else:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code')) | routes/todos.py | from flask_restful import Resource
import libs.http_status as status
import libs.json_response as response
from libs.validator import Validator, require_json
from managers.todo_manager import TodoManager
from pprint import pprint
manager = TodoManager()
class TodosResource(Resource):
def get(self):
data = manager.getTodos()
return response.response(data, status.HTTP_OK.get('code'))
@require_json
@Validator("todo_validator.TodoCreationSchema")
def post(self, data=None, errors=None):
if errors:
return response.error(errors, status.HTTP_BAD_REQUEST.get('code'))
try:
result = manager.createTodo(data)
if result:
return response.response(result, status.HTTP_CREATED.get('code'))
else:
return response.error(
status.HTTP_BAD_REQUEST.get('message'),
status.HTTP_BAD_REQUEST.get('code')
)
except ValueError as error:
return response.error(
str(error),
status.HTTP_BAD_REQUEST.get('code')
)
class TodoResource(Resource):
def get(self, todo_id):
manager = TodoManager()
data = manager.getTodoById(todo_id)
if data:
return response.response(data, status.HTTP_OK.get('code'))
else:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code'))
@require_json
@Validator("todo_validator.TodoUpdateSchema")
def patch(self, todo_id, data=None, errors=None):
if errors or not data:
return response.error(errors, status.HTTP_BAD_REQUEST.get('code'))
todo = manager.getTodoById(todo_id)
if not todo:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code'))
try:
result = manager.updateTodoById(todo_id, data)
if result:
return response.response(None, status.HTTP_NOTHING.get('code'))
else:
return response.error(
status.HTTP_BAD_REQUEST.get('message'),
status.HTTP_BAD_REQUEST.get('code')
)
except ValueError as error:
return response.error(
str(error),
status.HTTP_BAD_REQUEST.get('code')
)
def delete(self, todo_id):
manager = TodoManager()
data = manager.getTodoById(todo_id)
if data:
result = manager.deleteTodoById(todo_id)
if result:
return response.response(None, status.HTTP_NOTHING.get('code'))
else:
return response.error(
status.HTTP_INTERNAL_ERROR.get('message'),
status.HTTP_INTERNAL_ERROR.get('code')
)
else:
return response.error({
'message': status.HTTP_NOT_FOUND.get('message')
}, status.HTTP_NOT_FOUND.get('code')) | 0.253122 | 0.071494 |
from OpenPNM.Geometry import models as gm
from OpenPNM.Geometry import GenericGeometry
class Stick_and_Ball(GenericGeometry):
r"""
Stick and Ball subclass of GenericGeometry. This subclass is meant as a
basic default geometry to get started quickly.
Parameters
----------
name : string
The name of the object, which is also used as the label where this
geometry is defined.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._generate()
def _generate(self):
self.models.add(propname='pore.seed',
model=gm.pore_misc.random,
regen_mode='constant')
self.models.add(propname='throat.seed',
model=gm.throat_misc.neighbor,
mode='min')
self.models.add(propname='pore.diameter',
model=gm.pore_diameter.sphere,
psd_name='weibull_min',
psd_shape=2.5,
psd_loc=0,
psd_scale=0.5)
self.models.add(propname='pore.area',
model=gm.pore_area.spherical)
self.models.add(propname='pore.volume',
model=gm.pore_volume.sphere)
self.models.add(propname='throat.diameter',
model=gm.throat_diameter.cylinder,
tsd_name='weibull_min',
tsd_shape=2.5,
tsd_loc=0,
tsd_scale=0.5)
self.models.add(propname='throat.length',
model=gm.throat_length.straight)
self.models.add(propname='throat.volume',
model=gm.throat_volume.cylinder)
self.models.add(propname='throat.area',
model=gm.throat_area.cylinder)
self.models.add(propname='throat.surface_area',
model=gm.throat_surface_area.cylinder) | OpenPNM/Geometry/__Stick_and_Ball__.py | from OpenPNM.Geometry import models as gm
from OpenPNM.Geometry import GenericGeometry
class Stick_and_Ball(GenericGeometry):
r"""
Stick and Ball subclass of GenericGeometry. This subclass is meant as a
basic default geometry to get started quickly.
Parameters
----------
name : string
The name of the object, which is also used as the label where this
geometry is defined.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._generate()
def _generate(self):
self.models.add(propname='pore.seed',
model=gm.pore_misc.random,
regen_mode='constant')
self.models.add(propname='throat.seed',
model=gm.throat_misc.neighbor,
mode='min')
self.models.add(propname='pore.diameter',
model=gm.pore_diameter.sphere,
psd_name='weibull_min',
psd_shape=2.5,
psd_loc=0,
psd_scale=0.5)
self.models.add(propname='pore.area',
model=gm.pore_area.spherical)
self.models.add(propname='pore.volume',
model=gm.pore_volume.sphere)
self.models.add(propname='throat.diameter',
model=gm.throat_diameter.cylinder,
tsd_name='weibull_min',
tsd_shape=2.5,
tsd_loc=0,
tsd_scale=0.5)
self.models.add(propname='throat.length',
model=gm.throat_length.straight)
self.models.add(propname='throat.volume',
model=gm.throat_volume.cylinder)
self.models.add(propname='throat.area',
model=gm.throat_area.cylinder)
self.models.add(propname='throat.surface_area',
model=gm.throat_surface_area.cylinder) | 0.870501 | 0.230194 |
from .schfile import SchFile
'''
Given a top schematic file name, SchDict will
read all the schematics and put them in a dictionary.
If they are instantiated multiple times, they will only
be read/parsed once, but each instantiation will have its
own dictionary entry with its own timestamp and parent.
'''
class SheetInstantiation(object):
def __init__(self, database, sheetname, sheetfile, timestamp='', parent=None, antecedents=set()):
namesep = ' / '
sheetname = sheetname.replace(namesep, namesep.replace(' ', '_'))
if timestamp is None:
timestamp = sheetfile
if parent is not None:
sheetname = parent.sheetname + namesep + sheetname
timestamp = parent.timestamp + '/' + timestamp
if sheetname in database:
raise SystemExit('Sheet %s in database multiple times' % sheetname)
if timestamp in database.timestamps:
raise SystemExit('Sheet %s timestamp same as sheet %s') % (sheetname, database[timestamp].sheetname)
if sheetfile in antecedents:
raise SystemExit('Loops not permitted in page hierarchy:\n %s' %
', '.join(sorted((antecedents))))
database[sheetname] = self
database.timestamps[timestamp] = self
database.priorityorder.append(sheetname)
self.sheetname = sheetname
self.sheetfile = sheetfile
self.timestamp = timestamp
self.sheetdata = database.readfile(sheetfile)
antecedents.add(sheetfile)
for item in self.sheetdata.items:
if isinstance(item, item.Sheet):
SheetInstantiation(database,
item.fields[0].name, item.fields[1].name,
item.timestamp, self, antecedents)
antecedents.remove(sheetfile)
class SchDict(dict):
def __init__(self, topschfile=None):
self.timestamps = {}
self.filecache = {}
self.priorityorder = []
if topschfile is not None:
self.projdir = topschfile[-1]
sheetfname = topschfile.basename
sheetname = sheetfname.rsplit('.sch', 1)[0]
self.topsheet = SheetInstantiation(self, sheetname, sheetfname)
def readfile(self, sheetfile):
sheetdata = self.filecache.get(sheetfile)
if sheetdata is None:
sheetdata = SchFile(self.projdir[sheetfile])
self.filecache[sheetfile] = sheetdata
return sheetdata | kipy/fileobjs/sch/schdict.py | from .schfile import SchFile
'''
Given a top schematic file name, SchDict will
read all the schematics and put them in a dictionary.
If they are instantiated multiple times, they will only
be read/parsed once, but each instantiation will have its
own dictionary entry with its own timestamp and parent.
'''
class SheetInstantiation(object):
def __init__(self, database, sheetname, sheetfile, timestamp='', parent=None, antecedents=set()):
namesep = ' / '
sheetname = sheetname.replace(namesep, namesep.replace(' ', '_'))
if timestamp is None:
timestamp = sheetfile
if parent is not None:
sheetname = parent.sheetname + namesep + sheetname
timestamp = parent.timestamp + '/' + timestamp
if sheetname in database:
raise SystemExit('Sheet %s in database multiple times' % sheetname)
if timestamp in database.timestamps:
raise SystemExit('Sheet %s timestamp same as sheet %s') % (sheetname, database[timestamp].sheetname)
if sheetfile in antecedents:
raise SystemExit('Loops not permitted in page hierarchy:\n %s' %
', '.join(sorted((antecedents))))
database[sheetname] = self
database.timestamps[timestamp] = self
database.priorityorder.append(sheetname)
self.sheetname = sheetname
self.sheetfile = sheetfile
self.timestamp = timestamp
self.sheetdata = database.readfile(sheetfile)
antecedents.add(sheetfile)
for item in self.sheetdata.items:
if isinstance(item, item.Sheet):
SheetInstantiation(database,
item.fields[0].name, item.fields[1].name,
item.timestamp, self, antecedents)
antecedents.remove(sheetfile)
class SchDict(dict):
def __init__(self, topschfile=None):
self.timestamps = {}
self.filecache = {}
self.priorityorder = []
if topschfile is not None:
self.projdir = topschfile[-1]
sheetfname = topschfile.basename
sheetname = sheetfname.rsplit('.sch', 1)[0]
self.topsheet = SheetInstantiation(self, sheetname, sheetfname)
def readfile(self, sheetfile):
sheetdata = self.filecache.get(sheetfile)
if sheetdata is None:
sheetdata = SchFile(self.projdir[sheetfile])
self.filecache[sheetfile] = sheetdata
return sheetdata | 0.459076 | 0.258674 |