text stringlengths 957 885k |
|---|
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
from gensim.models import word2vec
from gensim.models.doc2vec import Word2Vec
from keras.layers import Activation, Embedding, Merge, Reshape
from keras.models import Sequential
from keras.preprocessing.sequence import skipgrams, make_sampling_table
from keras.preprocessing.text import Tokenizer, base_filter
from utils import maybe_download, unzip, read_analogies
parser = argparse.ArgumentParser(description='Keras skip-gram with negative sampling')
parser.add_argument('--save_path', type=str, default='vectors.txt',
help='Directory to write the model.')
parser.add_argument('--eval_data', type=str, default=None,
help='Analogy questions. '
'See README.md for how to get questions-words.txt.')
parser.add_argument('--embedding_size', type=int, default=200,
help='The embedding dimension size.')
parser.add_argument('--epochs_to_train', type=int, default=5,
help='Number of epochs to train.'
'Each epoch processes the training data once completely.')
parser.add_argument('--num_neg_samples', type=int, default=5,
help='Negative samples per training example.')
parser.add_argument('--window_size', type=int, default=4,
help='The number of words to predict to the left and right '
'of the target word.')
parser.add_argument('--min_count', type=int, default=5,
help='The minimum number of word occurrences for it to be '
'included in the vocabulary.')
parser.add_argument('--sampling_factor', type=float, default=1e-3,
help='Subsample threshold for word occurrence. Words that appear '
'with higher frequency will be randomly down-sampled. Set '
'to 0 to disable.')
args = parser.parse_args()
zip_filename = maybe_download('http://mattmahoney.net/dc/text8.zip')
text_file = unzip(zip_filename)
sentences = word2vec.Text8Corpus(text_file)
sentences = [' '.join(sent) for sent in sentences]
tokenizer = Tokenizer(filters=base_filter() + "'")
tokenizer.fit_on_texts(sentences)
sentences = tokenizer.texts_to_sequences(sentences)
V = len(tokenizer.word_index) + 1
def build_model():
target_word = Sequential()
target_word.add(Embedding(V, args.embedding_size, input_length=1))
context = Sequential()
context.add(Embedding(V, args.embedding_size, input_length=1))
model = Sequential()
model.add(Merge([target_word, context], mode='dot', dot_axes=2))
model.add(Reshape((1,), input_shape=(1, 1)))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
return model
def train_model(model):
sampling_table = make_sampling_table(V, sampling_factor=args.sampling_factor)
for epoch in range(args.epochs_to_train):
loss = 0.
for i, sent in enumerate(sentences):
print('{}/{}'.format(i, len(sentences)))
couples, labels = skipgrams(sequence=sent, vocabulary_size=V, window_size=args.window_size,
negative_samples=args.num_neg_samples, sampling_table=sampling_table)
if couples:
words, contexts = zip(*couples)
words = np.array(words, dtype=np.int32)
contexts = np.array(contexts, dtype=np.int32)
y = np.array(labels, dtype=np.int32)
loss += model.train_on_batch([words, contexts], y)
print('num epoch: {} loss: {}'.format(epoch, loss))
return model
def save_model(model):
with open(args.save_path, 'w') as f:
f.write(' '.join([str(V - 1), str(args.embedding_size)]))
f.write('\n')
vectors = model.get_weights()[0]
for word, i in tokenizer.word_index.items():
f.write(word)
f.write(' ')
f.write(' '.join(map(str, list(vectors[i, :]))))
f.write('\n')
def eval_model():
w2v = Word2Vec.load_word2vec_format(args.save_path, binary=False)
word2id = dict([(w, i) for i, w in enumerate(w2v.index2word)])
analogy_questions = read_analogies(args.eval_data, word2id)
correct = 0
total = len(analogy_questions)
for question in analogy_questions:
a, b, c, d = question # E.g. [Athens, Greece, Baghdad, Iraq]
analogies = w2v.most_similar(positive=[b, c], negative=[a], topn=4)
for analogy in analogies:
word, _ = analogy
if d == word:
# Predicted Correctly!
correct += 1
break
print('Eval %4d/%d accuracy = %4.1f%%' % (correct, total, correct * 100.0 / total))
def main():
"""
Train a word2vec model.
"""
#if not args.train_data or not args.eval_data or not args.save_path:
if not args.save_path:
print('--train_data --eval_data and --save_path must be specified.')
sys.exit(1)
model = build_model()
model = train_model(model)
save_model(model)
eval_model()
if __name__ == '__main__':
main()
|
import json
import requests
import time
from discord_webhook import DiscordWebhook, DiscordEmbed
webhook_url = 'https://discordapp.com/api/webhooks/672159508675690497/4UtaClAc7rKMJsEvbR4iYf-Razv4M3ZWtkYDOxBzLfiDzJhI7RSFpoLn6iijBiRcaNOR'
webhook = DiscordWebhook(webhook_url)
pid = '508214-660'
headers = {
'Connection': 'keep-alive',
'accept': 'application/json',
'Origin': 'https://www.goat.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded',
'Sec-Fetch-Site': 'cross-site',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://www.goat.com/search?query='+ pid,
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
params = {
'x-algolia-agent': 'Algolia for vanilla JavaScript 3.25.1',
'x-algolia-application-id': '2FWOTDVM2O',
'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a',
}
data = {
"distinct": 'true',
'facetFilters': 'product_category: shoes',
'facets': 'size',
'hitsPerPage': '48',
'numericFilters': '[]',
'page': '0',
'query': pid,
'clickAnalytics': "true"
}
response = requests.post('https://2fwotdvm2o-dsn.algolia.net/1/indexes/product_variants_v2/query', headers=headers, params=params,json=data)
response_json = response.json()
response_json_dict = response_json['hits'][0]
product_id = response_json_dict['product_template_id']
print(product_id)
def obtainBasicInfo():
webhook = DiscordWebhook(url=webhook_url)
r_api = requests.get('https://www.goat.com/web-api/v1/product_variants?productTemplateId='+ str(product_id),headers=headers)
data = r_api.json()
embed = DiscordEmbed(title=response_json_dict['name'], url=headers['Referer'], color=242424)
embed.set_thumbnail(url=response_json_dict['main_picture_url'])
sizes = []
shoe_conditions = []
box_conditions = []
prices = []
for i in data:
sizes.append(str(i['size']))
shoe_conditions.append(i['shoeCondition'])
box_conditions.append(i['boxCondition'])
prices.append(str(int(i['lowestPriceCents']['amountUsdCents'])/100))
print(' Size: ' + str(i['size']) + '\n' + ' Shoe condition: ' + i['shoeCondition'] + '\n' + ' Box condition: ' + i['boxCondition'] + '\n' + ' $' + str(int(i['lowestPriceCents']['amountUsdCents'])/100) + '\n' + '-----------------')
embed.add_embed_field(name='Size', value=(str(i['size'])))
embed.add_embed_field(name='Shoe Condition', value=str(i['shoeCondition']))
embed.add_embed_field(name='Box Condition', value=str(i['boxCondition']))
embed.add_embed_field(name='Price', value='$' + str(int(i['lowestPriceCents']['amountUsdCents'])/100))
webhook.add_embed(embed)
send_hook = webhook.execute()
time.sleep(2)
embed.fields = []
print(sizes)
print(shoe_conditions)
print(box_conditions)
print(prices)
obtainBasicInfo()
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MLM datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (mlm_DetectFeatTxtTokDataset, TxtTokLmdb,
pad_tensors, get_gather_index)
from pytorch_pretrained_bert import BertTokenizer
from cytoolz import curry
import spacy
from sprl import *
from spacy.tokenizer import Tokenizer
# import nltk
# nltk.download('punkt')
# from nltk.tokenize import word_tokenize
nlp = spacy.load('/content/UNITER/models/en_core_web_lg-sprl')
device = torch.device("cuda")
@curry
def bert_tokenize(tokenizer, text):
ids = []
for word in text.strip().split():
ws = tokenizer.tokenize(word)
if not ws:
# some special char
continue
ids.extend(tokenizer.convert_tokens_to_ids(ws))
return ids
tokenizer1 = BertTokenizer.from_pretrained("bert-base-cased", do_lower_case=False)
tokenizer2 = bert_tokenize(tokenizer1)
def mask_spatial(example, vocab_range, mask):
input_ids = []
output_label = []
# 1. spacy tokenize the sentence and sprl-spacy find the spatial words
old_tokens = nlp(example['sentence'])
old_tokens = [t.text for t in old_tokens]
relations = sprl(example['sentence'], nlp, model_relext_filename='models/model_svm_relations.pkl')
# 2. replace the spatial tokens with mask only if bert tokenize it as one word
mask_to_old_bert_token = {}
for rel in relations:
start, end = rel[1].start, rel[1].end
all_single = True
for i in range(start, end):
bert_token = tokenizer1.tokenize(old_tokens[i])
tid = tokenizer1.convert_tokens_to_ids(bert_token)
if len(tid) == 1:
mask_to_old_bert_token[i] = tid[0]
#old_tokens[i] = '[MASK]'
else:
all_single = False
break
if all_single:
for i in range(start, end):
old_tokens[i] = '[MASK]'
# 3. use bert to tokenize and generate input_ids and output_label
for i, token in enumerate(old_tokens):
if token != '[MASK]':
wd = tokenizer1.tokenize(token)
ids = tokenizer1.convert_tokens_to_ids(wd)
output_label.extend([-1]*len(ids))
input_ids.extend(ids)
else:
input_ids.append(mask)
output_label.append(mask_to_old_bert_token[i])
if all(o == -1 for o in output_label):
# at least mask 1
#output_label[0] = example['input_ids'][0]
output_label[0] = tokenizer1.convert_tokens_to_ids(tokenizer1.tokenize('.'))[0]
input_ids[0] = mask
assert len(input_ids) == len(output_label)
return input_ids, output_label
def random_word(example, vocab_range, mask):
"""
Masking some random prepositional tokens for Language Model task with probabilities as in
the original BERT paper.
:param tokens: list of int, tokenized sentence.
:param vocab_range: for choosing a random word
:return: (list of int, list of int), masked tokens and related labels for
LM prediction
"""
output_label = []
old_tokens = word_tokenize(example['sentence'])
input_ids = []
is_subset = False
i = 0
while i < len(old_tokens):
word = old_tokens[i].lower()
two, three = None, None
if i + 1 < len(old_tokens):
two = ' '.join([word, old_tokens[i+1].lower()])
if i + 2 < len(old_tokens):
three = ' '.join([word, old_tokens[i+1].lower(), old_tokens[i+2].lower()])
if word in prepositions:
output_label, input_ids = random_replace(1, old_tokens, i, mask, vocab_range, output_label, input_ids)
i += 1
elif two in prepositions:
output_label, input_ids = random_replace(2, old_tokens, i, mask, vocab_range, output_label, input_ids)
i += 2
elif three in prepositions:
output_label, input_ids = random_replace(3, old_tokens, i, mask, vocab_range, output_label, input_ids)
i += 3
else:
wd = tokenizer1.tokenize(word)
ids = tokenizer1.convert_tokens_to_ids(wd)
output_label.extend([-1]*len(ids))
input_ids.extend(ids)
i += 1
example['input_ids'] = input_ids
# print("Mask example['sent']:", example['sentence'])
# print("Mask example['input_ids']:", example['input_ids'])
if all(o == -1 for o in output_label):
# at least mask 1
output_label[0] = example['input_ids'][0]
input_ids[0] = mask
# print(f'len(input_ids) is {len(input_ids)}')
# print(f'len(output_label) is {len(output_label)}')
# input_ids, txt_labels
return input_ids, output_label
def random_replace(num_token, token_list, i, mask, vocab_range, output_label, input_ids):
for ct in range(i, i + num_token):
wd = tokenizer1.tokenize(token_list[ct])
tid = tokenizer1.convert_tokens_to_ids(wd)
if len(tid) == 1:
token_list[ct] = '[MASK]'
input_ids.append(mask)
output_label.append(tid[0])
else:
output_label.extend(tid)
input_ids.extend(tid)
return output_label, input_ids
class SpatialMlmDataset(mlm_DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, TxtTokLmdb)
super().__init__(txt_db, img_db)
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- txt_labels : (L, ), [-1, -1, wid, -1, -1, -1]
0's padded so that (L + num_bb) % 8 == 0
"""
example = super().__getitem__(i)
# text input
input_ids, txt_labels = self.create_mlm_io(example)
# img input
img_feat, img_pos_feat, num_bb = self.mlm_get_img_feat(
example['img_fname'])
#attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
attn_masks = torch.ones(len(input_ids), dtype=torch.long)
return input_ids.to(device), img_feat.to(device), img_pos_feat.to(device), attn_masks.to(device), txt_labels.to(device)
def create_mlm_io(self, example):
input_ids, txt_labels = mask_spatial(example,
self.txt_db.v_range,
self.txt_db.mask)
input_ids = torch.tensor([self.txt_db.cls_]
+ input_ids
+ [self.txt_db.sep])
txt_labels = torch.tensor([-1] + txt_labels + [-1])
return input_ids, txt_labels
def mlm_get_img_feat(self, fname_list):
img_feats = []
img_pos_feats = []
num_bb = 0
for i, img in enumerate(fname_list):
feat, pos, nbb = self._get_img_feat(img)
img_feats.append(feat)
img_pos_feats.append(pos)
num_bb += nbb
img_feat = torch.cat(img_feats, dim=0)
img_pos_feat = torch.cat(img_pos_feats, dim=0)
return img_feat.to(device), img_pos_feat.to(device), num_bb
def spatial_mlm_collate(inputs):
"""
Return:
:input_ids (n, max_L) padded with 0
:position_ids (n, max_L) padded with 0
:txt_lens list of [txt_len]
:img_feat (n, max_num_bb, feat_dim)
:img_pos_feat (n, max_num_bb, 7)
:num_bbs list of [num_bb]
:attn_masks (n, max_{L + num_bb}) padded with 0
:txt_labels (n, max_L) padded with -1
"""
(input_ids, img_feats, img_pos_feats, attn_masks, txt_labels
) = map(list, unzip(inputs))
# text batches
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0).to(device)
txt_labels = pad_sequence(txt_labels, batch_first=True, padding_value=-1).to(device)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0).to(device)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs).to(device)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs).to(device)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0).to(device)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = None
#gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'txt_labels': txt_labels}
# print(f'batch size is {len(batch["input_ids"])}')
return batch
prepositions = [
"aboard",
"about",
"above",
"absent",
"across",
"after",
"against",
"along",
"alongside",
"amid",
"amidst",
"among",
"amongst",
"around",
"as",
"astride",
"at",
"atop",
"before",
"afore",
"behind",
"below",
"beneath",
"beside",
"besides",
"between",
#"beyond",
"by",
"circa",
#"despite",
"down",
#"during",
#"except",
"for",
"from",
"in",
"inside",
"into",
#"less",
#"like",
#"minus",
"near",
"nearer",
"nearest",
#"notwithstanding",
#"of",
"off",
"on",
"onto",
"opposite",
"outside",
"over",
"past",
"per",
"save",
"since",
"through",
#"throughout",
#"to",
"toward",
"towards",
"under",
"underneath",
#"until",
"up",
"upon",
"upside",
#"versus",
#"via",
"with",
"within",
#"without",
#"worth",
#"according to",
"adjacent to",
"ahead of",
"apart from",
#"as of",
#"as per",
"as regards",
"aside from",
"astern of",
"back to",
#"because of",
"close to",
#"due to",
#"except for",
"far from",
"inside of",
#"instead of",
"left of",
"near to",
"next to",
"opposite of",
"opposite to",
"out from",
"out of",
"outside of",
#"owing to",
#"prior to",
#"pursuant to",
#"rather than",
#"regardless of",
"right of",
#"subsequent to",
#"such as",
#"thanks to",
#"up to",
#"as far as",
#"as opposed to",
#"as soon as",
#"as well as",
#"at the behest of",
#"by means of",
#"by virtue of",
#"for the sake of",
#"in accordance with",
#"in addition to",
#"in case of",
"in front of",
"in lieu of",
#"in place of",
"in point of",
#"in spite of",
#"on account of",
#"on behalf of",
"on top of",
#"with regard to",
#"with respect to",
"with a view to",
] |
<reponame>mightyang/yangTools<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : yangTools.py
# Author : yang <<EMAIL>>
# Date : 31.12.2018
# Last Modified Date: 14.03.2019
# Last Modified By : yang <<EMAIL>>
import nuke
import ytEnvInit
from PySide2 import QtWidgets, QtCore
import ytNode, ytVariables, ytCallbacks, ytVersion
from ytWidgets import ytOutlineWidget
from ytLoggingSettings import yl, logging
import ytPlugins
from plugin import *
import threading
class yangTools(object):
def __init__(self):
yl.debug('initialize yangTools')
self.version = ytVersion.ytVersion()
yl.debug('version of yangTools: %s' % self.version.getVersion())
self.isShow = False
yl.debug('initialize root node')
self.rootNode = ytNode.ytNode('root', nuke.root())
self.initGui()
self.initPlugin()
self.connectGuiSignal()
self.addYtCallback()
def initGui(self):
yl.debug('initialize gui of yangTools')
self.outlineGui = ytOutlineWidget(self.getNukeMainWindow())
self.outlineGui.outlineTreeView.model().setHeader(['name', 'type'])
self.outlineGui.outlineTreeView.model().setRoot(self.rootNode)
self.outlineGui.logHandle.setLevel(logging.DEBUG)
def initPlugin(self):
yl.debug('initialize plugins')
for p in ytPlugins.plugins:
self.outlineGui.addPlugin(p)
p.addStartedCallback((self.outlineGui.updateIcon, ()))
p.addStoppedCallback((self.outlineGui.updateIcon, ()))
def getNodePath(self, node):
yl.debug('get node path')
path = node.getName()
while True:
parent = node.getParent()
if parent:
path = parent.getName() + '.' + path
node = parent
else:
break
return path
# def getNodeTree(self, space=None):
# yl.debug('get node tree')
# if space is None:
# space = ytNode.ytNode('root', nuke.root())
# ns = nuke.toNode(space.getPath()).nodes()
# if len(ns) > 0:
# for n in ns:
# pn = ytNode.ytNode(n.name(), n, space)
# pn.setSelection(n['selected'].value(), ytVariables.ytCaller.yt_caller_nuke)
# if n.Class() == 'Group':
# self.getNodeTree(pn)
def printNodeTree(self, space=None, level=0):
yl.debug('print node tree')
if space is None:
space = self.rootNode
print '\nroot'
children = space.getChildren()
if len(children) > 0:
prefix = '│' * (level != 0) + ' ' * level
for index, child in enumerate(children):
if index == len(children) - 1:
print '%-60s%s' % (prefix + '└─' + child.getName(), str(child.getSelection()))
elif index != len(children):
print '%-60s%s' % (prefix + '├─' + child.getName(), str(child.getSelection()))
if child.getNode().Class() == 'Group':
self.printNodeTree(child, level + 1)
def nukeCreateNodeCallback(self):
'''the callback that called while creating node in nuke'''
yl.debug('nukeCreateNodeCallback')
node = nuke.thisNode()
if '.' not in node.fullName():
parent = self.getPkgNodeByPath()
else:
parent = self.getPkgNodeByPath('.'.join(node.fullName().split('.')[:-1]))
if parent is not None:
yn = ytNode.ytNode(node.name(), node, parent)
yn.setSelection(node['selected'].value(), ytVariables.ytCaller.yt_caller_nuke)
def nukeDestroyNodeCallback(self):
'''the callback that called while deleting node in nuke'''
yl.debug('nukeDestroyNodeCallback begin')
node = nuke.thisNode()
yn = self.getPkgNodeByPath(node.fullName())
parent = yn.getParent()
if parent:
yn.getParent().removeChild(yn)
yl.debug('nukeDestroyNodeCallback end')
def nukeSelectionCallback(self):
'''the callback that called while selecting node in nuke'''
k = nuke.thisKnob()
if k.name() == 'selected':
yl.debug('nukeSelectNodeCallback')
n = nuke.thisNode()
if ytVariables.ytCaller.yt_caller_isGuiCallback:
ytVariables.ytCaller.yt_caller_isGuiCallback = False
return
yt = self.getPkgNodeByPath(n.fullName())
if yt is not None:
yt.setSelection(k.value(), ytVariables.ytCaller.yt_caller_nuke)
def addNukeCallback(self):
'''add method to Nuke callback list'''
yl.debug('nukeAddNodeCallback')
if '*' not in nuke.onCreates or (self.nukeCreateNodeCallback, (), {}, None) not in nuke.onCreates['*']:
nuke.addOnCreate(self.nukeCreateNodeCallback)
if '*' not in nuke.knobChangeds or (self.nukeSelectionCallback, (), {}, None) not in nuke.knobChangeds['*']:
nuke.addKnobChanged(self.nukeSelectionCallback)
if '*' not in nuke.onDestroys or (self.nukeDestroyNodeCallback, (), {}, None) not in nuke.onDestroys['*']:
nuke.addOnDestroy(self.nukeDestroyNodeCallback)
def removeNukeCallback(self):
'''remove method from Nuke callback list'''
yl.debug('nukeDestroyNodeCallback')
if '*' in nuke.onCreates and (self.nukeCreateNodeCallback, (), {}, None) in nuke.onCreates['*']:
nuke.removeOnCreate(self.nukeCreateNodeCallback)
if '*' in nuke.knobChangeds or (self.nukeSelectionCallback, (), {}, None) in nuke.knobChangeds['*']:
nuke.removeKnobChanged(self.nukeSelectionCallback)
if '*' in nuke.onDestroys and (self.nukeDestroyNodeCallback, (), {}, None) in nuke.onDestroys['*']:
nuke.removeOnDestroy(self.nukeDestroyNodeCallback)
def ytNodeSelectionCallback(self, pNode, caller):
'''the callback that called while selecting node in nuke or in treeView'''
if caller == ytVariables.ytCaller.yt_caller_gui:
yl.debug('call ytNodeSelectionCallback to select node %s in nuke' % pNode.getName())
ytVariables.ytCaller.yt_caller_isGuiCallback = True
pNode.getNode().setSelected(pNode.getSelection())
elif caller == ytVariables.ytCaller.yt_caller_nuke:
yl.debug('call ytNodeSelectionCallback to select node %s in treeView' % pNode.getName())
modelIndex = self.outlineGui.outlineTreeView.model().getIndexFromNode(pNode)
selected = self.outlineGui.outlineTreeView.selectionModel().isSelected(modelIndex)
if not pNode.getSelection() is selected:
ytVariables.ytCaller.yt_caller_isNukeCallback = True
if pNode.getSelection():
self.outlineGui.outlineTreeView.selectionModel().select(modelIndex, QtCore.QItemSelectionModel.Select)
else:
self.outlineGui.outlineTreeView.selectionModel().select(modelIndex, QtCore.QItemSelectionModel.Deselect)
def ytTreeViewSelectionCallback(self, selected, deselected):
# signal loop break: gui -> ytNode -> nuke -> (break here) -> ytNode -> gui -> ...
yl.debug('ytTreeViewSelectionCallback')
if ytVariables.ytCaller.yt_caller_isNukeCallback:
ytVariables.ytCaller.yt_caller_isNukeCallback = False
return
# deselect deselected node in nuke
[i.internalPointer().setSelection(False) for i in deselected.indexes()]
# select selected node in nuke
[i.internalPointer().setSelection(True) for i in selected.indexes()]
def addYtCallback(self):
'''add methods to corresponding callback lists'''
yl.debug('add method to ytNode\'s callback lists and plugin\'s callback list')
ytCallbacks.ytNode_selectionChanged_callback.append((self.ytNodeSelectionCallback, ()))
ytCallbacks.ytNode_childCreated_callback.append((self.outlineGui.outlineTreeView.model().createNodeSignal.emit, ()))
ytCallbacks.ytNode_childDestroyed_callback.append((self.outlineGui.outlineTreeView.model().deleteNodeSignal.emit, ()))
def connectGuiSignal(self):
yl.debug('connect gui\'s signal')
self.outlineGui.closedSignal.connect(self.stop)
self.outlineGui.outlineTreeView.selectionModel().selectionChanged.connect(self.ytTreeViewSelectionCallback)
self.app.focusChanged.connect(self.setCurrentWidget)
def getPkgNodeByPath(self, nodePkgPath=''):
'''
used by root ytNode
nodePath is node fullName in nuke, getted by node.fullName()
'''
yl.debug('get ytNode by path: %s' % nodePkgPath)
if not isinstance(nodePkgPath, str):
yl.error('TypeError: parameter need string, getted by node.fullName() in nuke')
return None
if nodePkgPath == '':
return self.rootNode
pathNames = nodePkgPath.split('.')
nodePkg = self.rootNode
if pathNames != []:
for p in pathNames:
cn = nodePkg.getChildrenName()
if p in cn:
nodePkg = nodePkg[cn.index(p)]
else:
yl.error('can not find node: %s' % nodePkgPath)
return None
return nodePkg
return None
def show(self):
yl.debug('show yangTools')
if not self.isShow:
self.addNukeCallback()
self.outlineGui.show()
t= getNodeTreeThread(self.rootNode)
t.start()
self.isShow = True
def stop(self):
yl.debug('stop yangTools')
if self.isShow:
self.removeNukeCallback()
self.rootNode.clearChildren()
self.outlineGui.outlineTreeView.model().resetModel()
[p.stop() for p in ytPlugins.plugins]
self.isShow = False
def addPluginSearchPath(self, path):
yl.debug('add plugin search path')
ytEnvInit.appendEnv('YT_PLUGIN_PATH', path)
ytEnvInit.appendEnv('PATH', path)
def getPlugins(self):
yl.debug('get plugins')
return ytPlugins.plugins
def getNukeMainWindow(self):
yl.debug('get main window instance of nuke')
self.app = QtWidgets.QApplication.instance()
for w in self.app.topLevelWidgets():
if w.inherits('QMainWindow') and w.metaObject().className() == 'Foundry::UI::DockMainWindow':
return w
else:
yl.error('RuntimeError: Could not find DockMainWindow instance')
def setCurrentWidget(self, old, new):
if new:
w = new
while True:
n = w.windowTitle()
pw = w.parent()
if n:
if n in ytVariables.ytNukeWidgets.yt_widgets:
ytVariables.ytNukeWidgets.yt_current_widget = n
yl.debug('go into {}'.format(n))
elif 'Viewer' in n:
ytVariables.ytNukeWidgets.yt_current_widget = 'Viewer'
yl.debug('go into {}'.format(n))
return None
elif pw:
w = pw
else:
return None
class getNodeTreeThread(threading.Thread):
def __init__(self, space=None):
super(getNodeTreeThread, self).__init__()
self.space = space
def run(self):
yl.debug('start get node tree thread')
self.getNodeTree(self.space)
def getNodeTree(self, space=None):
if space is None:
space = ytNode.ytNode('root', nuke.root())
yl.debug('get node tree in space: %s' % space.getName())
ns = nuke.toNode(space.getPath()).nodes()
if len(ns) > 0:
for n in ns:
pn = ytNode.ytNode(n.name(), n, space)
pn.setSelection(n['selected'].value(), ytVariables.ytCaller.yt_caller_nuke)
if n.Class() == 'Group':
self.getNodeTree(pn)
|
import curses
from curses import wrapper
#from collections import OrderedDict
from curses.textpad import Textbox, rectangle
import vault
from vault import * # из за загрузки pickle'ом ??? он не видит модули?
import random
def makeWin(x, y, w, h):
win = curses.newwin(h, w, y, x)
win.border()
win.bkgd(curses.color_pair(ColorBlW))
return win
class MenuList():
# список элементов меню = ключ- горячая клавиша, значение (текст, функция обработчик)
items = []
lastKey = '1'
selected = 0
def __init__(self):
self.items= list()
def add(self, item, fun, args, highlightfun, key=None):
if key == None:
key = self.lastKey
self.lastKey = chr(ord(self.lastKey) + 1)
self.items.append((key, item, fun, args, highlightfun))
def select(self):
(key, text, fun, args, *_) = self.items[self.selected]
return fun(args)
def highlight(self):
(key, text, fun, args, hf, *_) = self.items[self.selected]
return hf(args)
def next(self):
self.selected += 1
if self.selected >= len(self.items):
self.selected = 0
def pred(self):
self.selected -= 1
if self.selected < 0:
self.selected = len(self.items) - 1
MenuWidth = 20
TextWidth = 60
class MenuListCurses(MenuList):
def __init__(self):
super().__init__()
self.win = makeWin(1, 1, MenuWidth, curses.LINES-3)
def display(self):
win = self.win
win.refresh()
win.clear()
win.border()
x = 1
y = 0
for (k, t, _, *_) in self.items:
y += 1
if y - 1 == self.selected:
win.addstr(y, x, '(' + k + ') ' + t, curses.color_pair(ColorWBl) )
else:
win.addstr(y, x, '(' + k + ') ' + t, curses.color_pair(ColorBW))
class TextView():
def __init__(self):
self.win = makeWin(1+MenuWidth, 1, TextWidth, curses.LINES-3)
self.text = list()
def display(self):
win = self.win
win.refresh()
win.clear()
win.border()
x = 2
y = 0
for t in self.text:
y += 1
win.addstr(y, x, t, curses.color_pair(ColorBW))
ColorBW = 1
ColorBlW = 2
ColorRB = 3
ColorWBl = 4
ColorRW = 5
ColorMW = 6
class Wincon():
mainwin = None
wins = []
menu = None
debugwin = None
menuContent = None #
def mainRefresh(self):
self.mainwin.refresh()
#self.mainwin.clear()
self.mainwin.border()
self.mainwin.addstr(curses.LINES-1, 2, "[k:up j:down q:exit ]")
def buildMenu(self):
def MenuSelect(a):
#m = self.inp.run()
#self.buildMenu()
return True
def MenuChange(a):
k, a = a
self.path[0] = k
self.menuContent.text = [str(a.value())]
#self.menuContent.text = [str(a.value()), str(a), k]
# TODO: надо как то хранить путь к текущему элементу для изменения
return True
self.menu = MenuListCurses()
self.win = self.menu.win
for k, v in self.store.items():
if v.isDir():
self.menu.add('['+k+']', MenuSelect, (k, v), MenuChange)
else:
self.menu.add(k, MenuSelect, (k, v), MenuChange)
def __init__(self, scr):
self.mainwin = scr
scr.clear()
self.wins = list()
curses.curs_set(False)
curses.init_pair(ColorBW, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK )
curses.init_pair(ColorWBl, curses.COLOR_WHITE, curses.COLOR_BLUE )
curses.init_pair(ColorRW, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(ColorMW, curses.COLOR_MAGENTA, curses.COLOR_WHITE)
self.path = ['cc']
scr.bkgd(curses.color_pair(ColorBW))
self.mainRefresh()
self.inp = inp = Inputer()
self.menuContent = TextView()
self.vit = ViTextEdit()
self.store = vault.Storage(True)
self.buildMenu()
self.menu.highlight()
def handler(self):
notEnd = True
key = self.win.getch()
if key == ord('q') or key == 27:
notEnd = False
if key == ord('j'):
self.menu.next()
self.menu.highlight()
if key == ord('k'):
self.menu.pred()
self.menu.highlight()
if key == ord(' '):
return self.menu.select()
if key == ord('a'): # добавление элемента
name = self.inp.run()
self.store[name] = name
self.buildMenu()
if key == ord('e'): # Редактирование значения ??? TODO редактирование директорий особо.
newtext = self.vit.run(self.menuContent.text)
# save new text
self.store[self.path[0]] = ''.join(newtext)
self.buildMenu()
#TODO: add dir, save all. move to dir, path save.
# строить меню по текущему пути. менять путь при переходе в дир.
self.win.addstr(curses.LINES-5,1,'вы нажали: '+str(key))
return notEnd
def addWin(self):
pass
def refresh(self):
self.mainRefresh()
for w in self.wins:
w.refresh()
self.menuContent.display()
self.menu.display()
def work(self):
notEnd = True
while notEnd:
self.refresh()
self.win.addstr(curses.LINES-4,1,': '+self.path[0])
notEnd = self.handler()
self.refresh()
class Inputer():
def __init__(self):
self.win = makeWin(1+MenuWidth+TextWidth, 1, 20, 3)
def run(self):
self.msg = ''
self.display()
nend = True
while nend:
nend = self.handler()
self.display()
self.win.clear()
curses.curs_set(False)
return self.msg
def handler(self):
notEnd = True
key = self.win.getch()
if key == 10 or key == 27:
notEnd = False
else:
self.msg += chr(key)
return notEnd
def display(self):
win = self.win
win.refresh()
win.border()
curses.curs_set(True)
#curses.setsyx(4,82)
win.addstr(1, 2, self.msg, curses.color_pair(ColorBW))
INSERT = 0
COMMAND = 1
class ViTextEdit():
mode = INSERT
def __init__(self):
self.width = TextWidth
self.height = curses.LINES-3
self.win = makeWin(1+MenuWidth, 1, self.width, self.height)
self.win.bkgd(curses.color_pair(ColorRW))
def run(self, text=''):
#get text from storage selected
self.msg = ''
#self.text = text.split('\n')
self.text = text
self.currentLine = 0
self.y = len(text)
self.x = len(text[-1])
self.key = ''
self.display()
self.mode2ins()
nend = True
while nend:
if self.mode == INSERT:
nend = self.handler()
else:
nend = self.handlerCom()
self.display()
self.win.clear()
curses.curs_set(False)
return self.text
def addChar(self, c):
if len(self.text[self.currentLine]) >= self.width-2:
self.newLine()
self.text[self.currentLine] = self.text[self.currentLine][:self.x-1] + c + self.text[self.currentLine][self.x-1:]
self.cursorMove(1,0)
def newLine(self):
self.text.append('')
self.currentLine += 1
self.cursorMove(-1000,1)
def bs(self):
self.text[self.currentLine] = self.text[self.currentLine][:-1]
self.cursorMove(-1,0)
def mode2ins(self):
self.mode = INSERT
self.win.bkgd(curses.color_pair(ColorRW))
def mode2com(self):
self.mode = COMMAND
self.win.bkgd(curses.color_pair(ColorMW))
def cursorMove(self, dx, dy):
self.y += dy
self.x += dx
if self.y < 1:
self.y = 1
if self.y > len(self.text): #self.height-2:
self.y = len(self.text)
self.currentLine = self.y-1
if self.x < 1:
self.x = 1
if self.x > len(self.text[self.currentLine]):
self.x = len(self.text[self.currentLine])+1
def handlerCom(self):
notEnd = True
key = self.win.getkey()
if ord(key) == ord('i'):
self.mode2ins()
elif ord(key) == 24:
notEnd = False
elif ord(key) == ord('j'):
self.cursorMove(0, 1)
elif ord(key) == ord('k'):
self.cursorMove(0, -1)
elif ord(key) == ord('h'):
self.cursorMove(-1, 0)
elif ord(key) == ord('l'):
self.cursorMove(1, 0)
return notEnd
def handler(self):
notEnd = True
#key = self.win.getch()
key = self.win.getkey()
if ord(key) == 27: # escape
self.mode2com()
elif ord(key) == 10: # newline
self.newLine()
elif ord(key) == 24: # Ctrl-X ^X
notEnd = False
elif ord(key) == 127: # backspace
self.bs()
else:
self.addChar(key)
self.key = str(ord(key))
return notEnd
def display(self):
win = self.win
curses.curs_set(False)
win.clear()
win.refresh()
win.border()
win.addstr(self.height-1, 2, ' ', curses.color_pair(ColorBW))
win.addstr(self.height-1, 2, self.key, curses.color_pair(ColorBW))
y = 1
for line in self.text:
win.addstr(y, 1, line, curses.color_pair(ColorBW))
y += 1
curses.curs_set(True)
#if self.mode == COMMAND:
win.addstr(self.y, self.x, '', curses.color_pair(ColorBW))
def main(scr):
w = Wincon(scr)
w.refresh()
w.work()
curses.curs_set(True)
wrapper(main)
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2015,
# <NAME> (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.8'
__tabversion__ = '3.8'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = %s\n' % repr(self.lextokens))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
lines, linen = inspect.getsourcelines(module)
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
<gh_stars>10-100
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import data
import model
import matplotlib.pyplot as plt
class ArgsClass:
pass
args = ArgsClass()
args.data = './data/' # path of the corpus
args.checkpoint = '' # checkpoint to use
args.model = 'LSTM' # type of net (RNN_TANH, RNN_RELU, LSTM, GRU)
args.emsize = 215 # word embeddings size
args.nhid = 215 # num. of hidden units per layer
args.nlayers = 2 # num. of layers
args.lr = 20 # initial learning rate
args.clip = 0.25 # gradient clipper
args.epochs = 20 # number of epochs
args.batch_size = 20 # batch size
args.bptt = 20 # length of sequence
args.dropout = 0.2 # dropout size on layers (0 = no dropout)
args.tied = True # tie word embedding and softmax weights
args.seed = 1000 # random seed number
args.cuda = False # usage of CUDA
args.log_interval = 200 # interval for printing
args.save = './output/model.pt' # final model saving path
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
eval_batch_size = 10
test_data = batchify(corpus.test, eval_batch_size)
criterion = nn.CrossEntropyLoss()
if args.cuda:
criterion.cuda()
# model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
args.checkpoint = './output/model.pt'
# Load checkpoint
if args.checkpoint != '':
if args.cuda:
model = torch.load(args.checkpoint)
else:
# Load GPU model on CPU
model = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
if args.cuda:
model.cuda()
else:
model.cpu()
print(model)
print('------------------------------------------------------')
print('\t\t Total parameters in model : ', sum(param.numel() for param in model.parameters()))
print('------------------------------------------------------\n')
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
return [state.detach() for state in h]
def get_batch(source, i, evaluation=False):
seq_len = min(args.bptt, len(source) - 1 - i)
data = Variable(source[i:i + seq_len], volatile=evaluation)
target = Variable(source[i + 1:i + 1 + seq_len].view(-1))
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss / len(data_source)
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of testing | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
|
<gh_stars>0
import numpy as np
import tensorflow as tf
import gym
import ray
from collections import deque
import random
from attempt.utilities.utils import env_extract_dims, flatten_goal_observation
from attempt.models.models import Critic_gen, Actor_gen
from attempt.utilities.her import HERGoalEnvWrapper, HER
from attempt.utilities.replay_buffer import PlainReplayBuffer, HindsightExperienceReplayWrapper, ReplayBuffer, GoalSelectionStrategy
class DDPG:
def __init__(self, env: gym.Env, buffer_size: int=int(1e6), gamma: int = 0.99, tau: int = 1e-2, start_steps: int=100,
noise_scale: float=0.1, batch_size=32, actor_lr=1e-3, value_lr=1e-3, seed: int=5):
# env
self.obs_dim, self.act_dim = env_extract_dims(env)
self.env = env
self.act_low, self.act_high = self.env.action_space.low, self.env.action_space.high
# replay buffer
self.buffer_size = buffer_size
self.replay_buffer = PlainReplayBuffer(self.obs_dim, self.act_dim, buffer_size)
# networks
self.policy = Actor_gen(self.obs_dim, self.act_dim, hidden_layers=(64, 64, 64), action_mult=self.act_high)
self.value = Critic_gen(self.obs_dim, self.act_dim, hidden_layers=(64, 64, 64, 1))
self.policy_target = Actor_gen(self.obs_dim, self.act_dim, hidden_layers=(64, 64, 64), action_mult=self.act_high)
self.value_target = Critic_gen(self.obs_dim, self.act_dim, hidden_layers=(64, 64, 64, 1))
self.policy_target.set_weights(self.policy.get_weights())
self.value_target.set_weights(self.value.get_weights())
self.policy_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
self.value_optimizer = tf.keras.optimizers.Adam(learning_rate=value_lr)
# ddpg hyperparameters
self.gamma = gamma
self.tau = tau
self.noise_scale = noise_scale
self.seed = np.random.seed(seed)
self.start_steps = start_steps
self.batch_size = batch_size
# monitoring
self.rewards = []
self.value_losses = []
self.policy_losses = []
def get_action(self, s):
a = self.policy(s.reshape(1, -1))[0]
a += self.noise_scale * np.random.randn(self.act_dim)
return np.clip(a, self.act_low, self.act_high)
def _policy_loss(self, states):
next_policy_actions = self.policy(states)
return - tf.reduce_mean(self.value([states, next_policy_actions]))
def _value_loss(self, states, actions, next_states, rewards, done):
Qvals = self.value([states, actions])
next_actions = self.policy_target(next_states)
next_Q = self.value_target([next_states, next_actions])
next_Q = tf.clip_by_value(next_Q, -(1/(1 - self.gamma)), 0)
Qprime = rewards + self.gamma * (1 - done) * next_Q
return tf.reduce_mean(tf.square(Qvals - Qprime))
def _learn_on_batch(self, batch):
states, actions, rewards, next_states, done = batch
# value optimization
with tf.GradientTape() as tape:
value_loss = self._value_loss(states, actions, next_states, rewards, done)
self.value_losses.append(value_loss)
value_gradients = tape.gradient(value_loss, self.value.trainable_variables)
self.value_optimizer.apply_gradients(zip(value_gradients, self.value.trainable_variables))
# policy optimization
with tf.GradientTape() as tape2:
policy_loss = self._policy_loss(states)
self.policy_losses.append(policy_loss)
policy_gradients = tape2.gradient(policy_loss, self.policy.trainable_variables)
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy.trainable_variables))
def update_target_networks(self):
# updating target network
temp1 = np.array(self.value_target.get_weights())
temp2 = np.array(self.value.get_weights())
temp3 = self.tau * temp2 + (1 - self.tau) * temp1
self.value_target.set_weights(temp3)
# updating Actor network
temp1 = np.array(self.policy_target.get_weights())
temp2 = np.array(self.policy.get_weights())
temp3 = self.tau * temp2 + (1 - self.tau) * temp1
self.policy_target.set_weights(temp3)
def drill(self, num_episodes=50):
num_steps = 0
for episode in range(num_episodes):
is_done = False
observation, episode_reward = self.env.reset(), 0
while not is_done:
num_steps += 1
if num_steps > self.start_steps:
action = self.get_action(observation)
else:
action = self.env.action_space.sample()
next_observation, reward, is_done, info = self.env.step(action)
episode_reward += reward
# update buffer
self.replay_buffer.store(observation, action, reward, next_observation, is_done)
observation = next_observation
#n_samples = len(self.replay_buffer)
#K = np.min([n_samples, self.batch_size])
batch = self.replay_buffer.sample_batch(self.batch_size)
self._learn_on_batch(batch)
self.update_target_networks()
if is_done:
self.rewards.append(episode_reward)
print("Episode " + str(episode) + ": " + str(episode_reward))
self.test_env(5)
def test_env(self, num_episodes=1):
n_steps = 0
for j in range(num_episodes):
s, episode_return, episode_length, d = self.env.reset(), 0, 0, False
while not d:
# Take deterministic actions at test time (noise_scale=0)
s, r, d, _ = self.env.step(self.policy(tf.convert_to_tensor([s]))[0])
episode_return += r
episode_length += 1
n_steps += 1
print('test return:', episode_return, 'episode_length:', episode_length)
class HERDDPG(DDPG):
def __init__(self, env: gym.GoalEnv, goal_selection_strategy=GoalSelectionStrategy.FUTURE, buffer_size: int=int(1e6),
gamma: int = 0.98, tau: int = 0.95, start_steps: int = 1000, noise_scale: float = 0.1,
batch_size=int(128), actor_lr=1e-4, value_lr=1e-3, seed: int=5, k=2):
super().__init__(env, buffer_size, gamma, tau, start_steps, noise_scale,
batch_size, actor_lr, value_lr, seed)
self.env = HERGoalEnvWrapper(env)
self.goal_selection_strategy = goal_selection_strategy
self.k = k
self.replay_buffer = HindsightExperienceReplayWrapper(ReplayBuffer(self.buffer_size), k,
goal_selection_strategy, self.env)
self.ep_len = 50
def train(self, num_batches=40):
for _ in range(num_batches):
if self.replay_buffer.can_sample(self.batch_size):
batch = self.replay_buffer.sample(self.batch_size)
self._learn_on_batch(batch)
self.update_target_networks()
def gather_cycle(self, num_episodes=16):
for episode in range(num_episodes):
observation, episode_reward = self.env.reset(), 0
for _ in range(self.ep_len):
if np.random.uniform() >= 0.2:
action = self.get_action(observation)
else:
action = self.env.action_space.sample()
next_observation, reward, is_done, info = self.env.step(action)
episode_reward += reward
# update buffer
self.replay_buffer.add(observation, action, reward, next_observation, is_done, info)
observation = next_observation
if self.replay_buffer.can_sample(self.batch_size):
batch = self.replay_buffer.sample(self.batch_size)
self._learn_on_batch(batch)
self.update_target_networks()
self.rewards.append(episode_reward)
print(episode_reward, self.policy_losses[-1], self.value_losses[-1])
def test_env(self, num_episodes=1, render=False):
n_steps = 0
for j in range(num_episodes):
s, episode_return, episode_length, d = self.env.reset(), 0, 0, False
if render:
self.env.render()
while not d:
# Take deterministic actions at test time (noise_scale=0)
s, r, d, _ = self.env.step(self.policy(s.reshape(1, -1))[0])
if render:
self.env.render()
episode_return += r
episode_length += 1
n_steps += 1
print('test return:', episode_return, 'episode_length:', episode_length)
def unload(self):
self.gather_cycle()
return self.replay_buffer.sample(750)
def get_updated_policy(self, policy, value, policy_target, value_target):
self.policy.set_weights(policy)
self.value.set_weights(value)
self.policy_target.set_weights(policy_target)
self.value_target.set_weights(value_target)
@ray.remote
class RemoteHERDDPG(HERDDPG):
pass |
<reponame>mlubin/SCIP.jl
#!/usr/bin/env python2.7
from collections import OrderedDict
from jinja2 import Template
from lxml import etree
from itertools import chain
import os
import sys
import time
# TODO: add xml dir to source
def log(msg):
print '[%s] %s' % (time.asctime(), msg)
class SCIPXMLParser(object):
# Mapping of C types to Julia types
TYPE_MAP = {
'SCIP_Longint': 'Int64',
'char': 'Char',
'const char *': 'String',
'double': 'Float64',
'int': 'Int',
'unsigned int': 'Uint',
'void': 'Void',
}
# Known constructors and destructors that we are going to wrap with
# convenience functions. Format:
# {SCIP struct: ({constructors}, destructor)}
WRAPPED_TYPES = {
'SCIP': (set(['SCIPcreate']), 'SCIPfree'),
'SCIP_CONS': (set(['SCIPcreateConsBasicLinear']), 'SCIPreleaseCons'),
'SCIP_VAR': (set(['SCIPcreateVarBasic']), 'SCIPreleaseVar'),
}
# Set of variable names on the C side that will cause errors if they
# are used in Julia. These will get replaced with something else.
JULIA_BUILTINS = set(['global', 'local', 'type'])
def __init__(self):
self.typealiases = OrderedDict() # {SCIP_Bool: Uint, ...}
self.defines = OrderedDict() # {TRUE: 1, ...}
self.enums = OrderedDict() # {SCIP_Retcode: {SCIP_OKAY: 1, ...}, ...}
self.typedefs = OrderedDict() # {SCIP_Retcode: SCIP_RETCODE}
self.checked_functions = OrderedDict() # {SCIPversion: (SCIP_Real, ...)}
self.unchecked_functions = OrderedDict() # {SCIPcreate: (SCIP_Retcode, ...}}
self.checked_functions_orig = {} # original arg names
self.structs = set () # {SCIP, SCIP_VAR, ...}
def parse(self, filepath):
log('parsing %s' % filepath)
# Main parser loop. This is responsible for identifying major sections
# of the XML and passing those off to more specialized methods.
doxygen = etree.parse(open(filepath)).getroot()
for compounddef in doxygen:
if compounddef.tag != 'compounddef':
continue
for sectiondef in compounddef:
if sectiondef.tag != 'sectiondef':
continue
# Section type: enum, define, etc.
kind = sectiondef.attrib['kind']
if kind == 'define':
self._parse_defines(sectiondef)
elif kind == 'enum':
self._parse_enums(sectiondef)
elif kind == 'typedef':
self._parse_typedefs(sectiondef)
elif kind in ('user-defined', 'func'):
self._parse_functions(sectiondef)
# Sanity check: typedefs are for enums and should not reappear
# in the typealiases section. This might happend depending on
# on the order types are found.
for tn in chain(self.typedefs, self.enums):
try:
del self.typealiases[tn]
except KeyError:
pass
def _convert_type(self, type_name):
type_name = type_name.strip()
if type_name in SCIPXMLParser.TYPE_MAP:
return SCIPXMLParser.TYPE_MAP[type_name]
elif type_name in self.typedefs:
return type_name
elif type_name in self.typealiases:
return type_name
elif type_name.startswith('SCIP') :
if type_name.endswith('*'):
return 'Ptr{%s}' % self._convert_type(type_name[:-1])
elif type_name.replace('_', '').isalnum():
if type_name not in self.typealiases:
self.typealiases[type_name] = 'Void'
return type_name
raise KeyError('type unknown: %r' % type_name)
def _parse_enums(self, node):
# <memberdef kind="enum">
# <name>SCIP_Retcode</name>
# <enumvalue>
# <name>SCIP_OKAY</name>
# <initializer>= +1</initializer>
# </enumvalue>
# ...
# </memberdef>
for memberdef in node:
if memberdef.tag != 'memberdef':
continue
enum_name = None # e.g. SCIP_Retcode
enum_vals = OrderedDict() # {'SCIP_OKAY': 1, ...}
for child in memberdef:
if child.tag == 'name':
# Name of enum
enum_name = child.text
elif child.tag == 'enumvalue':
# Names and values of constants in enum
name = initializer = None
for valnode in child:
if valnode.tag == 'name':
name = valnode.text
elif valnode.tag == 'initializer':
initializer = valnode.text.replace('=', '').strip()
enum_vals[name] = initializer
self.enums[enum_name] = enum_vals
def _parse_defines(self, node):
# <memberdef kind="define">
# <name>SCIP_Bool</name>
# <initializer>unsigned int</initializer>
# </memberdef>
for memberdef in node:
if memberdef.tag != 'memberdef':
continue
define_name = None # e.g. SCIP_Bool
for child in memberdef:
if child.tag == 'name':
# Name of define
define_name = child.text
elif child.tag == 'initializer':
# Known C types are treated as typealiases, while defines
# with numbers or strings are treated as constants.
t = child.text
if t is None:
continue
if t in SCIPXMLParser.TYPE_MAP:
self.typealiases[define_name] = SCIPXMLParser.TYPE_MAP[t]
elif t.isdigit() or (t.startswith('"') and t.endswith('"')) \
or (t.startswith("'") and t.endswith("'")):
if define_name not in self.defines:
self.defines[define_name] = t
def _parse_typedefs(self, node):
# <memberdef kind="typedef">
# <type>enum <ref kindref="member">SCIP_Retcode</ref></type>
# <name>SCIP_RETCODE</name>
# </memberdef>
for memberdef in node:
if memberdef.tag != 'memberdef':
continue
type_ref = type_name = None
for child in memberdef:
if child.tag == 'name':
type_name = child.text
elif child.tag == 'type':
# enum SCIP_Retcode
if child.text.startswith('enum'):
for ref in child:
if ref.tag != 'ref':
continue
type_ref = ref.text
if type_name is not None and type_ref is not None:
self.typedefs[type_name] = type_ref
def _parse_functions(self, node):
# <memberdef kind="function">
# <type><ref>SCIP_Bool</ref></type>
# <name>SCIPisTransformed</name>
# <param>
# <type><ref>SCIP</ref> **</type>
# <declname>scip</declname>
# </param>
# </memberdef>
for memberdef in node:
if memberdef.tag != 'memberdef':
continue
ret_type = None
func_name = None
arg_types = [] # such as Int or Ptr{SCIP}
arg_names = [] # such as scip
arg_vals = [] # such as scip[1]
for child in memberdef:
if child.tag == 'type':
ret_type = ' '.join(s.strip() for s in child.itertext())
elif child.tag == 'name':
func_name = child.text
elif child.tag == 'param':
for param_child in child:
if param_child.tag == 'type':
# Construct type name from hierarchical XML.
arg_types.append(' '.join(s.strip() for s in param_child.itertext()))
elif param_child.tag == 'declname':
# Pull out var name and convert to forms like scip[1].
t = param_child.text
if t in SCIPXMLParser.JULIA_BUILTINS:
t += 'Var'
arg_names.append(t)
arg_vals.append(t)
# if func_name == 'SCIPincludeDefaultPlugins':
# print ret_type, func_name, arg_types, arg_names, arg_vals
# We're only interested in functions that start with 'SCIP'.
if None in (ret_type, func_name) or not func_name.startswith('SCIP'):
continue
# And in functions with easily understood signatures.
if len([x for x in arg_types if '(' in x]):
continue
orig_arg_names = list(arg_names)
# Convert function signature components to Julia types & names.
try:
orig_arg_types = list(arg_types)
ret_type = self._convert_type(ret_type)
arg_types = [self._convert_type(tn) for tn in arg_types]
except KeyError:
continue
# Convert function name and values in signature to use type.
for i, (at, an, av) in enumerate(zip(arg_types, arg_names, arg_vals)):
if at.rstrip('*').strip() in SCIPXMLParser.TYPE_MAP:
continue
# Convert from scip to pointer(scip) or array(scip)
if at.endswith('}}'):
av = 'array(%s)' % av
elif at.endswith('}'):
av = 'pointer(%s)' % av
arg_vals[i] = av
# Convert from scip to scip::SCIP_t if the type is a SCIP struct
is_pointer = at.startswith('Ptr{')
at = at.replace('Ptr{','').lstrip('{').rstrip('}')
if at.startswith('SCIP'):
if is_pointer:
self.structs.add(at)
at = '%s_t' % at
arg_names[i] = '%s::%s' % (an, at)
if len(arg_types) == 1 and arg_types[0] == 'Void':
arg_types = []
if ret_type == 'SCIP_RETCODE':
# Separate out functions based on whether they return SCIP
# return codes or not. These are handled by diferrent macros.
if func_name not in self.checked_functions:
self.checked_functions[func_name] = (arg_types, arg_names, arg_vals)
self.checked_functions_orig[func_name] = orig_arg_names
else:
if func_name not in self.unchecked_functions:
self.unchecked_functions[func_name] = (ret_type, arg_types, arg_names, arg_vals)
if __name__ == '__main__':
try:
xmldir, tmpldir, srcdir = sys.argv[1:]
except:
print 'usage: %s xml-dir template-dir src-dir' % sys.argv[0]
sys.exit()
# Parse each public header file into data structures for generating
# the interface, then use those to convert templates into source.
parser = SCIPXMLParser()
for filename in os.listdir(xmldir):
if not filename.endswith('_8h.xml'):
continue
if not (filename in ('def_8h.xml', 'scip_8h.xml', 'scipdefplugins_8h.xml') or \
filename.startswith('pub__') or filename.startswith('type__') or filename.startswith('cons__')):
continue
# if filename != 'scipdefplugins_8h.xml':
# continue
# if filename != 'def_8h.xml':
# continue
#if filename != 'scip_8h.xml':
# continue
parser.parse(os.path.join(xmldir, filename))
# Template -> src conversion.
for filename in os.listdir(tmpldir):
with open(os.path.join(srcdir, filename), 'w') as outfile:
with open(os.path.join(tmpldir, filename)) as infile:
template = Template(infile.read())
outfile.write(template.render(parser=parser))
|
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from io import StringIO
import numpy as np
class ArithmeticObject(ABC):
@property
@abstractmethod
def value(self):
pass
@abstractmethod
def dtype(self):
pass
@abstractmethod
def resolve(self):
pass
@abstractmethod
def __repr__(self):
pass
class ArithmeticString(ArithmeticObject):
def __init__(self, value):
self._value = value
def resolve(self):
return self.value
@property
def dtype(self):
return np.dtype(object)
@property
def value(self):
return f"'{self._value}'"
def __repr__(self):
return self.value
class ArithmeticNumber(ArithmeticObject):
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def resolve(self):
return self.value
@property
def value(self):
return f"{self._value}"
@property
def dtype(self):
return self._dtype
def __repr__(self):
return self.value
class ArithmeticSeries(ArithmeticObject):
def __init__(self, query_compiler, display_name, dtype):
task = query_compiler.get_arithmetic_op_fields()
if task is not None:
self._value = task._arithmetic_series.value
self._tasks = task._arithmetic_series._tasks.copy()
self._dtype = dtype
else:
aggregatable_field_name = query_compiler.display_name_to_aggregatable_name(
display_name
)
if aggregatable_field_name is None:
raise ValueError(
f"Can not perform arithmetic operations on non aggregatable fields"
f"{display_name} is not aggregatable."
)
self._value = f"doc['{aggregatable_field_name}'].value"
self._tasks = []
self._dtype = dtype
@property
def value(self):
return self._value
@property
def dtype(self):
return self._dtype
def __repr__(self):
buf = StringIO()
buf.write(f"Series: {self.value} ")
buf.write("Tasks: ")
for task in self._tasks:
buf.write(f"{task!r} ")
return buf.getvalue()
def resolve(self):
value = self._value
for task in self._tasks:
if task.op_name == "__add__":
value = f"({value} + {task.object.resolve()})"
elif task.op_name == "__truediv__":
value = f"({value} / {task.object.resolve()})"
elif task.op_name == "__floordiv__":
value = f"Math.floor({value} / {task.object.resolve()})"
elif task.op_name == "__mod__":
value = f"({value} % {task.object.resolve()})"
elif task.op_name == "__mul__":
value = f"({value} * {task.object.resolve()})"
elif task.op_name == "__pow__":
value = f"Math.pow({value}, {task.object.resolve()})"
elif task.op_name == "__sub__":
value = f"({value} - {task.object.resolve()})"
elif task.op_name == "__radd__":
value = f"({task.object.resolve()} + {value})"
elif task.op_name == "__rtruediv__":
value = f"({task.object.resolve()} / {value})"
elif task.op_name == "__rfloordiv__":
value = f"Math.floor({task.object.resolve()} / {value})"
elif task.op_name == "__rmod__":
value = f"({task.object.resolve()} % {value})"
elif task.op_name == "__rmul__":
value = f"({task.object.resolve()} * {value})"
elif task.op_name == "__rpow__":
value = f"Math.pow({task.object.resolve()}, {value})"
elif task.op_name == "__rsub__":
value = f"({task.object.resolve()} - {value})"
return value
def arithmetic_operation(self, op_name, right):
# check if operation is supported (raises on unsupported)
self.check_is_supported(op_name, right)
task = ArithmeticTask(op_name, right)
self._tasks.append(task)
return self
def check_is_supported(self, op_name, right):
# supported set is
# series.number op_name number (all ops)
# series.string op_name string (only add)
# series.string op_name int (only mul)
# series.string op_name float (none)
# series.int op_name string (none)
# series.float op_name string (none)
# see end of https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=dtype for
# dtype heirarchy
if np.issubdtype(self.dtype, np.number) and np.issubdtype(
right.dtype, np.number
):
# series.number op_name number (all ops)
return True
elif np.issubdtype(self.dtype, np.object_) and np.issubdtype(
right.dtype, np.object_
):
# series.string op_name string (only add)
if op_name == "__add__" or op_name == "__radd__":
return True
elif np.issubdtype(self.dtype, np.object_) and np.issubdtype(
right.dtype, np.integer
):
# series.string op_name int (only mul)
if op_name == "__mul__":
return True
raise TypeError(
f"Arithmetic operation on incompatible types {self.dtype} {op_name} {right.dtype}"
)
class ArithmeticTask:
def __init__(self, op_name, object):
self._op_name = op_name
if not isinstance(object, ArithmeticObject):
raise TypeError(f"Task requires ArithmeticObject not {type(object)}")
self._object = object
def __repr__(self):
buf = StringIO()
buf.write(f"op_name: {self.op_name} object: {self.object!r} ")
return buf.getvalue()
@property
def op_name(self):
return self._op_name
@property
def object(self):
return self._object
|
import os
class Vertice:
def __init__(self, valor):
self.valor = valor
self.lista_adyacentes = []
def get_valor(self):
return self.valor
def set_numero(self, valor):
self.valor = valor
def get_lista_adyacentes(self):
return self.lista_adyacentes
def set_lista_adyacentes(self, lista):
self.lista_adyacentes = lista
class Grafo:
def __init__(self):
self.lista_vertices = []
def existe_vertice(self, valor):
for vertice in self.lista_vertices:
if vertice.get_valor() == valor:
return vertice
return None
def agregar_vertice(self, valor):
vertice = self.existe_vertice(valor)
if vertice is None:
self.lista_vertices.append(Vertice(valor))
else:
print('Ya existe')
def enlazar(self, origen_, destino_):
origen = self.existe_vertice(origen_)
destino = self.existe_vertice(destino_)
if (origen is None) or (destino is None):
print('No es posible enlazar')
else:
origen.get_lista_adyacentes().append(destino)
def graficar(self):
cadena = 'digraph G{\n'
cadena += "node[shape = \"record\"]\n"
aux = []
for i in range(0, len(self.lista_vertices)):
temp = self.lista_vertices[i]
if aux.__contains__(temp) is False:
aux.append(temp)
cadena += f'node{hash(temp)} [label="{temp.get_valor()}" ]\n'
for j in range(0, len(temp.get_lista_adyacentes())):
cadena += f'node{hash(temp)} -> node{hash(temp.get_lista_adyacentes()[j])}\n'
cadena += '}'
file = open("Grafo.circo", "w")
file.write(cadena)
file.close()
os.system('circo -Tpng Grafo.circo -o Grafo.png')
def buscar(self, valor):
for i in range(0, len(self.lista_vertices)):
if self.lista_vertices[i].get_valor() == valor:
return self.lista_vertices[i]
return None
def anchura(self, inicio):
visitados = []
cola = []
origen = self.buscar(inicio)
cola.append(origen)
while cola:
actual = cola.pop(0)
if actual not in visitados:
print(actual.get_valor(), end=' -')
visitados.append(actual)
# Si los vertices adyacentes no han sido visitados, agregar a la cola
for i in actual.get_lista_adyacentes():
if i not in visitados:
cola.append(i)
g = Grafo()
g.agregar_vertice('A')
g.agregar_vertice('B')
g.agregar_vertice('C')
g.agregar_vertice('D')
g.agregar_vertice('H')
g.agregar_vertice('T')
g.agregar_vertice('R')
g.enlazar('B', 'H')
g.enlazar('C', 'R')
g.enlazar('D', 'B')
g.enlazar('D', 'C')
g.enlazar('H', 'A')
g.enlazar('H', 'T')
g.enlazar('H', 'D')
g.enlazar('R', 'H')
print('RECORRIDO POR ANCHURA')
g.anchura('D')
print()
g.graficar()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from collections import OrderedDict
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from elq.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from elq.biencoder.allennlp_span_utils import batched_span_select, batched_index_select
from elq.biencoder.utils import batch_reshape_mask_left
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
def get_submodel_from_state_dict(state_dict, prefix):
# get only submodel specified with prefix 'prefix' from the state_dict
new_state_dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith(prefix):
key = key[len(prefix)+1:] # +1 for '.'
new_state_dict[key] = value
return new_state_dict
class MentionScoresHead(nn.Module):
def __init__(
self, bert_output_dim, scoring_method="qa_linear", max_mention_length=10,
):
super(MentionScoresHead, self).__init__()
self.scoring_method = scoring_method
self.max_mention_length = max_mention_length
if self.scoring_method == "qa_linear":
self.bound_classifier = nn.Linear(bert_output_dim, 3)
elif self.scoring_method == "qa_mlp" or self.scoring_method == "qa": # for back-compatibility
self.bound_classifier = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, 3),
)
else:
raise NotImplementedError()
def forward(self, bert_output, mask_ctxt):
'''
Retuns scores for *inclusive* mention boundaries
'''
# (bs, seqlen, 3)
logits = self.bound_classifier(bert_output)
if self.scoring_method[:2] == "qa":
# (bs, seqlen, 1); (bs, seqlen, 1); (bs, seqlen, 1)
start_logprobs, end_logprobs, mention_logprobs = logits.split(1, dim=-1)
# (bs, seqlen)
start_logprobs = start_logprobs.squeeze(-1)
end_logprobs = end_logprobs.squeeze(-1)
mention_logprobs = mention_logprobs.squeeze(-1)
# impossible to choose masked tokens as starts/ends of spans
start_logprobs[~mask_ctxt] = -float("Inf")
end_logprobs[~mask_ctxt] = -float("Inf")
mention_logprobs[~mask_ctxt] = -float("Inf")
# take sum of log softmaxes:
# log p(mention) = log p(start_pos && end_pos) = log p(start_pos) + log p(end_pos)
# DIM: (bs, starts, ends)
mention_scores = start_logprobs.unsqueeze(2) + end_logprobs.unsqueeze(1)
# (bs, starts, ends)
mention_cum_scores = torch.zeros(mention_scores.size(), dtype=mention_scores.dtype).to(mention_scores.device)
# add ends
mention_logprobs_end_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)):
mention_logprobs_end_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,:,i] += mention_logprobs_end_cumsum.unsqueeze(-1)
# subtract starts
mention_logprobs_start_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)-1):
mention_logprobs_start_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,(i+1),:] -= mention_logprobs_start_cumsum.unsqueeze(-1)
# DIM: (bs, starts, ends)
mention_scores += mention_cum_scores
# DIM: (starts, ends, 2) -- tuples of [start_idx, end_idx]
mention_bounds = torch.stack([
torch.arange(mention_scores.size(1)).unsqueeze(-1).expand(mention_scores.size(1), mention_scores.size(2)), # start idxs
torch.arange(mention_scores.size(1)).unsqueeze(0).expand(mention_scores.size(1), mention_scores.size(2)), # end idxs
], dim=-1).to(mask_ctxt.device)
# DIM: (starts, ends)
mention_sizes = mention_bounds[:,:,1] - mention_bounds[:,:,0] + 1 # (+1 as ends are inclusive)
# Remove invalids (startpos > endpos, endpos > seqlen) and renormalize
# DIM: (bs, starts, ends)
valid_mask = (mention_sizes.unsqueeze(0) > 0) & mask_ctxt.unsqueeze(1)
# DIM: (bs, starts, ends)
mention_scores[~valid_mask] = -float("inf") # invalids have logprob=-inf (p=0)
# DIM: (bs, starts * ends)
mention_scores = mention_scores.view(mention_scores.size(0), -1)
# DIM: (bs, starts * ends, 2)
mention_bounds = mention_bounds.view(-1, 2)
mention_bounds = mention_bounds.unsqueeze(0).expand(mention_scores.size(0), mention_scores.size(1), 2)
if self.max_mention_length is not None:
mention_scores, mention_bounds = self.filter_by_mention_size(
mention_scores, mention_bounds, self.max_mention_length,
)
return mention_scores, mention_bounds
def filter_by_mention_size(self, mention_scores, mention_bounds, max_mention_length):
'''
Filter all mentions > maximum mention length
mention_scores: torch.FloatTensor (bsz, num_mentions)
mention_bounds: torch.LongTensor (bsz, num_mentions, 2)
'''
# (bsz, num_mentions)
mention_bounds_mask = (mention_bounds[:,:,1] - mention_bounds[:,:,0] <= max_mention_length)
# (bsz, num_filtered_mentions)
mention_scores = mention_scores[mention_bounds_mask]
mention_scores = mention_scores.view(mention_bounds_mask.size(0),-1)
# (bsz, num_filtered_mentions, 2)
mention_bounds = mention_bounds[mention_bounds_mask]
mention_bounds = mention_bounds.view(mention_bounds_mask.size(0),-1,2)
return mention_scores, mention_bounds
class GetContextEmbedsHead(nn.Module):
def __init__(self, mention_aggregation_type, ctxt_output_dim, cand_output_dim, dropout=0.1):
"""
mention_aggregation_type
`all_avg`: average across tokens in mention
`fl_avg`: to average across first/last tokens in mention
`{all/fl}_linear`: for linear layer over mention
`{all/fl}_mlp` to MLP over mention
"""
super(GetContextEmbedsHead, self).__init__()
# for aggregating mention outputs of context encoder
self.mention_aggregation_type = mention_aggregation_type.split('_')
self.tokens_to_aggregate = self.mention_aggregation_type[0]
self.aggregate_method = "_".join(self.mention_aggregation_type[1:])
self.dropout = nn.Dropout(dropout)
if self.mention_aggregation_type == 'all_avg' or self.mention_aggregation_type == 'none':
assert ctxt_output_dim == cand_output_dim
if self.aggregate_method == 'linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim * 2, cand_output_dim)
elif self.aggregate_method == 'avg_linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim, cand_output_dim)
elif self.aggregate_method == 'mlp':
self.mention_agg_mlp = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, output_dim),
)
else:
self.mention_agg_mlp = None
def forward(self, bert_output, mention_bounds):
'''
bert_output
(bs, seqlen, embed_dim)
mention_bounds: both bounds are inclusive [start, end]
(bs, num_spans, 2)
'''
# get embedding of [CLS] token
if mention_bounds.size(0) == 0:
return mention_bounds
if self.tokens_to_aggregate == 'all':
(
embedding_ctxt, # (batch_size, num_spans, max_batch_span_width, embedding_size)
mask, # (batch_size, num_spans, max_batch_span_width)
) = batched_span_select(
bert_output, # (batch_size, sequence_length, embedding_size)
mention_bounds, # (batch_size, num_spans, 2)
)
embedding_ctxt[~mask] = 0 # 0 out masked elements
# embedding_ctxt = (batch_size, num_spans, max_batch_span_width, embedding_size)
if self.aggregate_method.startswith('avg'):
embedding_ctxt = embedding_ctxt.sum(2) / mask.sum(2).float().unsqueeze(-1)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
if self.aggregate_method == 'avg_linear':
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
elif self.tokens_to_aggregate == 'fl':
start_embeddings = batched_index_select(bert_output, mention_bounds[:,:,0])
end_embeddings = batched_index_select(bert_output, mention_bounds[:,:,1])
embedding_ctxt = torch.cat([start_embeddings.unsqueeze(2), end_embeddings.unsqueeze(2)], dim=2)
# embedding_ctxt = (batch_size, num_spans, 2, embedding_size)
if self.aggregate_method == 'avg':
embedding_ctxt = embedding_ctxt.mean(2)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
elif self.aggregate_method == 'linear':
embedding_ctxt = embedding_ctxt.view(embedding_ctxt.size(0), embedding_ctxt.size(1), -1)
# embedding_ctxt = (batch_size, num_spans, 2 * embedding_size)
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
else:
raise NotImplementedError()
return embedding_ctxt
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"], output_hidden_states=True)
if params["load_cand_enc_only"]:
bert_model = "bert-large-uncased"
else:
bert_model = params['bert_model']
cand_bert = BertModel.from_pretrained(
bert_model,
output_hidden_states=True,
)
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
if params.get("freeze_cand_enc", False):
for param in self.cand_encoder.parameters():
param.requires_grad = False
self.config = ctxt_bert.config
ctxt_bert_output_dim = ctxt_bert.embeddings.word_embeddings.weight.size(1)
self.mention_aggregation_type = params.get('mention_aggregation_type', None)
self.classification_heads = nn.ModuleDict({})
self.linear_compression = None
if self.mention_aggregation_type is not None:
classification_heads_dict = {'get_context_embeds': GetContextEmbedsHead(
self.mention_aggregation_type,
ctxt_bert_output_dim,
cand_bert.embeddings.word_embeddings.weight.size(1),
)}
classification_heads_dict['mention_scores'] = MentionScoresHead(
ctxt_bert_output_dim,
params["mention_scoring_method"],
params.get("max_mention_length", 10),
)
self.classification_heads = nn.ModuleDict(classification_heads_dict)
elif ctxt_bert_output_dim != cand_bert.embeddings.word_embeddings.weight.size(1):
# mapping to make the output dimensions match for dot-product similarity
self.linear_compression = nn.Linear(ctxt_bert_output_dim, cand_bert.embeddings.word_embeddings.weight.size(1))
def get_raw_ctxt_encoding(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
):
"""
Gets raw, shared context embeddings from BERT,
to be used by both mention detector and entity linker
Returns:
torch.FloatTensor (bsz, seqlen, embed_dim)
"""
raw_ctxt_encoding, _, _ = self.context_encoder.bert_model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
return raw_ctxt_encoding
def get_ctxt_mention_scores(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
raw_ctxt_encoding = None,
):
"""
Gets mention scores using raw context encodings
Inputs:
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
Returns:
torch.FloatTensor (bsz, num_total_mentions): mention scores/logits
torch.IntTensor (bsz, num_total_mentions): mention boundaries
"""
# (bsz, seqlen, embed_dim)
if raw_ctxt_encoding is None:
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# (num_total_mentions,); (num_total_mentions,)
return self.classification_heads['mention_scores'](
raw_ctxt_encoding, mask_ctxt,
)
def prune_ctxt_mentions(
self,
mention_logits,
mention_bounds,
num_cand_mentions,
threshold,
):
'''
Prunes mentions based on mention scores/logits (by either
`threshold` or `num_cand_mentions`, whichever yields less candidates)
Inputs:
mention_logits: torch.FloatTensor (bsz, num_total_mentions)
mention_bounds: torch.IntTensor (bsz, num_total_mentions)
num_cand_mentions: int
threshold: float
Returns:
torch.FloatTensor(bsz, max_num_pred_mentions): top mention scores/logits
torch.IntTensor(bsz, max_num_pred_mentions, 2): top mention boundaries
torch.BoolTensor(bsz, max_num_pred_mentions): mask on top mentions
torch.BoolTensor(bsz, total_possible_mentions): mask for reshaping from total possible mentions -> max # pred mentions
'''
# (bsz, num_cand_mentions); (bsz, num_cand_mentions)
top_mention_logits, mention_pos = mention_logits.topk(num_cand_mentions, sorted=True)
# (bsz, num_cand_mentions, 2)
# [:,:,0]: index of batch
# [:,:,1]: index into top mention in mention_bounds
mention_pos = torch.stack([torch.arange(mention_pos.size(0)).to(mention_pos.device).unsqueeze(-1).expand_as(mention_pos), mention_pos], dim=-1)
# (bsz, num_cand_mentions)
top_mention_pos_mask = torch.sigmoid(top_mention_logits).log() > threshold
# (total_possible_mentions, 2)
# tuples of [index of batch, index into mention_bounds] of what mentions to include
mention_pos = mention_pos[top_mention_pos_mask | (
# 2nd part of OR: if nothing is > threshold, use topK that are > -inf
((top_mention_pos_mask.sum(1) == 0).unsqueeze(-1)) & (top_mention_logits > -float("inf"))
)]
mention_pos = mention_pos.view(-1, 2)
# (bsz, total_possible_mentions)
# mask of possible logits
mention_pos_mask = torch.zeros(mention_logits.size(), dtype=torch.bool).to(mention_pos.device)
mention_pos_mask[mention_pos[:,0], mention_pos[:,1]] = 1
# (bsz, max_num_pred_mentions, 2)
chosen_mention_bounds, chosen_mention_mask = batch_reshape_mask_left(mention_bounds, mention_pos_mask, pad_idx=0)
# (bsz, max_num_pred_mentions)
chosen_mention_logits, _ = batch_reshape_mask_left(mention_logits, mention_pos_mask, pad_idx=-float("inf"), left_align_mask=chosen_mention_mask)
return chosen_mention_logits, chosen_mention_bounds, chosen_mention_mask, mention_pos_mask
def get_ctxt_embeds(
self,
raw_ctxt_encoding,
mention_bounds,
):
"""
Get candidate scores + embeddings associated with passed-in mention_bounds
Input
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
shared embeddings straight from BERT
mention_bounds: torch.IntTensor (bsz, max_num_pred_mentions, 2)
top mention boundaries
Returns
torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim)
"""
# (bs, max_num_pred_mentions, embed_dim)
embedding_ctxt = self.classification_heads['get_context_embeds'](raw_ctxt_encoding, mention_bounds)
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt
def forward_ctxt(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
if self.mention_aggregation_type is None:
'''
OLD system: don't do mention aggregation (use tokens around mention)
'''
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# linear mapping to correct context length
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt, None, None, None
else:
'''
NEW system: aggregate mention tokens
'''
# (bs, seqlen, embed_size)
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
top_mention_bounds = None
top_mention_logits = None
extra_rets = {}
if get_mention_scores:
mention_logits, mention_bounds = self.get_ctxt_mention_scores(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, raw_ctxt_encoding,
)
extra_rets['all_mention_logits'] = mention_logits
extra_rets['all_mention_bounds'] = mention_bounds
if gold_mention_bounds is None:
(
top_mention_logits, top_mention_bounds, top_mention_mask, all_mention_mask,
) = self.prune_ctxt_mentions(
mention_logits, mention_bounds, num_cand_mentions, topK_threshold,
)
extra_rets['mention_logits'] = top_mention_logits.view(-1)
extra_rets['all_mention_mask'] = all_mention_mask
if top_mention_bounds is None:
# use gold mention
top_mention_bounds = gold_mention_bounds
top_mention_mask = gold_mention_bounds_mask
assert top_mention_bounds is not None
assert top_mention_mask is not None
# (bs, num_pred_mentions OR num_gold_mentions, embed_size)
embedding_ctxt = self.get_ctxt_embeds(
raw_ctxt_encoding, top_mention_bounds,
)
# for merging dataparallel, only 1st dimension can differ...
return {
"mention_reps": embedding_ctxt.view(-1, embedding_ctxt.size(-1)),
"mention_bounds": top_mention_bounds.view(-1, top_mention_bounds.size(-1)),
"mention_masks": top_mention_mask.view(-1),
"mention_dims": torch.tensor(top_mention_mask.size()).unsqueeze(0).to(embedding_ctxt.device),
**extra_rets
}
def forward_candidate(
self,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
try:
return self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
except:
print(token_idx_cands.size())
print(segment_idx_cands.size())
print(mask_cands.size())
return torch.rand(token_idx_cands.size()).to(token_idx_cands.device)
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
embedding_ctxt = embedding_cands = top_mention_mask = \
top_mention_logits = top_mention_bounds = all_mention_mask = \
all_mention_logits = all_mention_bounds = max_num_pred_mentions = None
context_outs = None
cand_outs = None
if token_idx_ctxt is not None:
context_outs = self.forward_ctxt(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions, topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores,
)
if token_idx_cands is not None:
cand_outs = self.forward_candidate(
token_idx_cands, segment_idx_cands, mask_cands
)
return context_outs, cand_outs
def upgrade_state_dict_named(self, state_dict):
prefix = ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(
model_path,
cand_enc_only=params.get("load_cand_enc_only", False),
)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False, cand_enc_only=False):
if cpu or not torch.cuda.is_available():
state_dict = torch.load(fname, map_location=torch.device("cpu"))
else:
state_dict = torch.load(fname)
if cand_enc_only:
cand_state_dict = get_submodel_from_state_dict(state_dict, 'cand_encoder')
self.model.cand_encoder.load_state_dict(cand_state_dict)
else:
self.model.upgrade_state_dict_named(state_dict)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(
self, cands, gold_mention_bounds=None, gold_mention_bounds_mask=None,
num_cand_mentions=50, topK_threshold=-4.5,
get_mention_scores=True,
):
"""
if gold_mention_bounds specified, selects according to gold_mention_bounds,
otherwise selects according to top-scoring mentions
Returns: Dictionary
mention_reps: torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim): mention embeddings
mention_masks: torch.BoolTensor (bsz, max_num_pred_mentions): mention padding mask
mention_bounds: torch.LongTensor (bsz, max_num_pred_mentions, 2)
(
mention_logits: torch.FloatTensor (bsz, max_num_pred_mentions): mention scores/logits
all_mention_mask: torch.BoolTensor ((bsz, all_cand_mentions)
all_mention_logits: torch.FloatTensor (bsz, all_cand_mentions): all mention scores/logits
all_mention_bounds: torch.LongTensor (bsz, all_cand_mentions, 2): all mention bounds
)
"""
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
context_outs, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands,
None, None, None,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores
)
if context_outs['mention_dims'].size(0) <= 1:
for key in context_outs:
if 'all' in key or key == 'mention_dims':
continue
context_outs[key] = context_outs[key].view([context_outs['mention_dims'][0,0], -1] + list(context_outs[key].size()[1:]))
return context_outs
'''
Reshape to (bs, num_mentions, *), iterating across GPUs
'''
def init_tensor(shape, dtype, init_value):
return init_value * torch.ones(
shape
).to(dtype=dtype, device=context_outs['mention_dims'].device)
bs = cands.size(0)
n_pred_mentions = context_outs['mention_dims'][:,1].max()
context_outs_reshape = {}
for key in context_outs:
if 'all' in key or key == 'mention_dims':
context_outs_reshape[key] = context_outs[key]
continue
# (bsz, max_num_pred_mentions, *)
context_outs_reshape[key] = init_tensor(
[bs, n_pred_mentions] + list(context_outs[key].size()[1:]),
context_outs[key].dtype,
-float("inf") if 'logit' in key else 0,
)
for idx in range(len(context_outs['mention_dims'])):
# reshape
gpu_bs = context_outs['mention_dims'][idx, 0]
b_width = context_outs['mention_dims'][idx, 1]
start_idx = (context_outs['mention_dims'][:idx, 0] * context_outs['mention_dims'][:idx, 1]).sum()
end_idx = start_idx + b_width * gpu_bs
s_reshape = context_outs['mention_dims'][:idx, 0].sum()
e_reshape = s_reshape + gpu_bs
for key in context_outs_reshape:
if 'all' in key or key == 'mention_dims':
continue
if len(context_outs[key].size()) == 1:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width)
else:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width, -1)
context_outs_reshape[key][s_reshape:e_reshape, :b_width] = target_tensor
return context_outs_reshape
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None,
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands
# Score candidates given context input and label input
# If text_encs/cand_encs is provided (pre-computed), text_vecs/cand_vecs is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
text_encs=None, # pre-computed mention encoding
cand_encs=None, # pre-computed candidate encoding.
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
mention_threshold=-4.5,
get_mention_scores=True,
hard_negs=False, # (if training) passed in a subset of hard negatives
hard_negs_mask=None, # (if hard negs training) mask for gold candidate mentions on all inputs (pos + negs)
):
"""
text_vecs (bs, max_ctxt_size):
cand_vecs (bs, max_num_gold_mentions, 1, max_cand_size):
text_encs (batch_num_mentions, embed_size): Pre-encoded mention vectors, masked before input
cand_encs (num_ents_to_match [batch_num_total_ents/all_ents], embed_size): Pre-encoded candidate vectors, masked before input
"""
'''
Compute context representations and/or get mention scores
'''
if text_encs is None or get_mention_scores:
# embedding_ctxt: (bs, num_gold_mentions/num_pred_mentions, embed_size)
context_outs = self.encode_context(
text_vecs, gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=mention_threshold,
get_mention_scores=get_mention_scores,
)
mention_logits = None
mention_bounds = None
if get_mention_scores:
mention_logits = context_outs['all_mention_logits']
mention_bounds = context_outs['all_mention_bounds']
if text_encs is None:
if gold_mention_bounds is None:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][context_outs['mention_masks']]
else:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][gold_mention_bounds_mask]
else:
# Context encoding is given, do not need to re-compute
embedding_ctxt = text_encs
'''
Compute candidate representations
'''
if cand_encs is None:
# Train time: Compute candidates in batch and compare in-batch negatives
# cand_vecs: (bs, num_gold_mentions, 1, cand_width) -> (batch_num_gold_mentions, cand_width)
cand_vecs = cand_vecs[gold_mention_bounds_mask].squeeze(1)
# (batch_num_gold_mentions, embed_dim)
embedding_cands = self.encode_candidate(cand_vecs)
else:
# (batch_num_gold_mentions, embed_dim)
embedding_cands = cand_encs
'''
Do inner-product search, or obtain scores on hard-negative entities
'''
if hard_negs:
assert hard_negs_mask is not None
# (num_mention_in_batch, embed_dim)
embedding_ctxt = embedding_ctxt[hard_negs_mask]
embedding_cands = embedding_cands[hard_negs_mask]
embedding_ctxt = embedding_ctxt.unsqueeze(1) # num_mention_in_batch x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # num_mention_in_batch x embed_size x 1
scores = torch.bmm(embedding_ctxt, embedding_cands) # num_mention_in_batch x 1 x 1
scores = torch.squeeze(scores)
# (num_mention_in_batch,)
return scores, mention_logits, mention_bounds
else:
# matmul across all cand_encs (in-batch, if cand_encs is None, or across all cand_encs)
# (all_batch_pred_mentions, num_cands)
# similarity score between ctxt i and cand j
all_scores = embedding_ctxt.mm(embedding_cands.t())
return all_scores, mention_logits, mention_bounds
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(
self, context_input, cand_input,
text_encs=None, # pre-computed mention encoding.
cand_encs=None, # pre-computed candidate embeddings
mention_logits=None, # pre-computed mention logits
mention_bounds=None, # pre-computed mention bounds
label_input=None, # labels for passed-in (if hard negatives training)
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
hard_negs_mask=None, # should be non-none if we are using negs
return_loss=True,
):
"""
text_encs/cand_encs/label_inputs masked before training
In-batch negs training: cand_encs None, label_inputs None, return_loss True
Hard negs training: cand_encs non-None, label_inputs non-None, return_loss True
cand_encs = all entities in batch + additional hard negatives
Inference: cand_encs non-None, label_inputs None, return_loss False
cand_encs = all entities in DB
cand_encs
non-None: set of candidate encodings to search in
None: compute in-batch candidate vectors (used as negatives if train mode)
label_inputs
non-None: labels to use for hard negatives training
None: random negatives training and/or inference
"""
hard_negs = label_input is not None
'''
GET CANDIDATE SCORES
'''
scores, out_mention_logits, out_mention_bounds = self.score_candidate(
context_input, cand_input,
hard_negs=hard_negs,
cand_encs=cand_encs,
text_encs=text_encs,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
hard_negs_mask=hard_negs_mask,
get_mention_scores=(return_loss and (mention_logits is None or mention_bounds is None)),
)
if mention_logits is None:
mention_logits = out_mention_logits
if mention_bounds is None:
mention_bounds = out_mention_bounds
if not return_loss:
return None, scores, mention_logits, mention_bounds
'''
COMPUTE MENTION LOSS (TRAINING MODE)
'''
span_loss = 0
if mention_logits is not None and mention_bounds is not None:
N = context_input.size(0) # batch size
M = gold_mention_bounds.size(1) # num_mentions per instance (just 1, so far)
# 1 value
span_loss = self.get_span_loss(
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
mention_logits=mention_logits, mention_bounds=mention_bounds,
)
'''
COMPUTE EL LOSS (TRAINING MODE)
'''
if hard_negs:
'''
Hard negatives (negatives passed in)
'''
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
label_input = label_input[hard_negs_mask]
# scores: (num_mentions_in_batch,); label_input: (num_mentions_in_batch,)
loss = loss_fct(scores, label_input.float()) + span_loss
else:
'''
Random negatives (use in-batch negatives)
'''
# scores: (bs*num_mentions [filtered], bs*num_mentions [filtered])
target = torch.LongTensor(torch.arange(scores.size(1)))
target = target.to(self.device)
# log P(entity|mention) + log P(mention) = log [P(entity|mention)P(mention)]
loss = F.cross_entropy(scores, target, reduction="mean") + span_loss
return loss, scores, mention_logits, mention_bounds
def get_span_loss(
self, gold_mention_bounds, gold_mention_bounds_mask, mention_logits, mention_bounds,
):
"""
gold_mention_bounds (bs, num_mentions, 2)
gold_mention_bounds_mask (bs, num_mentions):
mention_logits (bs, all_mentions)
menion_bounds (bs, all_mentions, 2)
"""
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
gold_mention_bounds[~gold_mention_bounds_mask] = -1 # ensure don't select masked to score
# triples of [ex in batch, mention_idx in gold_mention_bounds, idx in mention_bounds]
# use 1st, 2nd to index into gold_mention_bounds, 1st, 3rd to index into mention_bounds
gold_mention_pos_idx = ((
mention_bounds.unsqueeze(1) - gold_mention_bounds.unsqueeze(2) # (bs, num_mentions, start_pos * end_pos, 2)
).abs().sum(-1) == 0).nonzero()
# gold_mention_pos_idx should have 1 entry per masked element
# (num_gold_mentions [~gold_mention_bounds_mask])
gold_mention_pos = gold_mention_pos_idx[:,2]
# (bs, total_possible_spans)
gold_mention_binary = torch.zeros(mention_logits.size(), dtype=mention_logits.dtype).to(gold_mention_bounds.device)
gold_mention_binary[gold_mention_pos_idx[:,0], gold_mention_pos_idx[:,2]] = 1
# prune masked spans
mask = mention_logits != -float("inf")
masked_mention_logits = mention_logits[mask]
masked_gold_mention_binary = gold_mention_binary[mask]
# (bs, total_possible_spans)
span_loss = loss_fct(masked_mention_logits, masked_gold_mention_binary)
return span_loss
def to_bert_input(token_idx, null_idx):
"""
token_idx is a 2D tensor int.
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
import os
from sys import argv
from getpass import getuser
from argparse import ArgumentParser
def user_defaults(mode):
user = getuser()
defaults = {}
###########################################################################
## Add your username here if you want to change default args
###########################################################################
if user == "nate":
defaults["workers"] = 4
if 'tless' in argv:
defaults["batch_size"] = 16
defaults["truncate_obj"] = defaults["batch_size"]
else:
defaults["batch_size"] = 2
defaults["truncate_obj"] = 16
elif user == "rpng":
defaults["workers"] = 8
if 'tless' in argv:
# NOTE: Need to change if using sim data. This is for the primesense
# data which has 1 object per image.
defaults["batch_size"] = 56
defaults["truncate_obj"] = defaults["batch_size"]
else:
defaults["batch_size"] = 12
defaults["truncate_obj"] = 64
elif user == "zuox":
defaults["workers"] = 12 # 8
if 'tless' in argv:
defaults["batch_size"] = 128
defaults["truncate_obj"] = defaults["batch_size"]
else:
defaults["batch_size"] = 24 # 6
defaults["truncate_obj"] = 128 # 32
# User-independent args
defaults["dataset"] = "ycbv"
if mode == 'train':
defaults["checkpoint_path"] = None
defaults["detection_type"] = "gt+noise"
else:
defaults["checkpoint_path"] = "results/pkpnet_09-28-2021@15-54-39/model_best.pth.tar"
defaults["detection_type"] = "saved"
return defaults
def get_args(mode='train'):
assert mode in ['train', 'eval']
parser = ArgumentParser(description=('Evaluate' if mode=='eval' else 'Train') + ' PkpNet')
defaults = user_defaults(mode)
parser.add_argument('--checkpoint_path', '-c', default=defaults["checkpoint_path"],
help=f'Path to the checkpoint file to load (resume for training or run for eval). '
+ f'(default={defaults["checkpoint_path"]})')
parser.add_argument('--dataset', '-d', default=defaults["dataset"],
choices=['ycbv', 'tless'], help=f'Dataset type (default={defaults["dataset"]}).')
parser.add_argument('--no_network_cov', '-u', action='store_true',
help='If set, ignore the uncertainty predicted by the net and '
+ 'skip MLE loss if training.')
parser.add_argument('--show_viz', action='store_true',
help='If set, visualize the results while training or testing.')
det_choices = ['gt','gt+noise']
if mode != "train":
det_choices.append('saved')
parser.add_argument('--detection_type', '-t', default=defaults["detection_type"],
choices=det_choices,
help=f'Type of detection to feed network (default={defaults["detection_type"]}).')
default_split = 'primesense' if 'tless' in argv else 'real+synt'
# Mode-specific args
if mode == 'train':
parser.add_argument('--workers', '-j', type=int, default=defaults["workers"],
help=f'Maximum number of workers in dataloader (some eval may use less).')
parser.add_argument('--batch_size', '-b', type=int, default=defaults["batch_size"],
help=f'Maximum batch_size for machine (some eval may use less). '
+ 'Note that multiple objects are loaded for each batch, so the '
+ 'true batch size run through the keypoint model will be larger than this.')
parser.add_argument('--epochs', type=int, default=1000 if 'tless' in argv else 30,
help=f'Number of epochs to train.')
parser.add_argument('--lr', type=float, default=1e-3,
help=f'Learning rate.')
parser.add_argument('--ext', default="",
help=f'Extension to place on the directory name for organizational purposes. '
'Also, if a directory matching ext is already found, then resume from the '
'most recent one (unless no_resume is set)')
parser.add_argument('--no_resume', action='store_true',
help='The training code typically looks for the most recent directory matching '
'the current args for resuming. If no_resume is set, then train from scrath.')
parser.add_argument('--pretrain', default=None,
help=f'Path to the checkpoint file to use for a pretrained '
'network without resuming.')
parser.add_argument('--data_split', default=default_split,
help=f'"+"-separated list of the training splits to use. '
'Can be any combination of "real", "synt", and "pbr". See the BOP '
f'website for more details on what these are (default={default_split}).')
parser.add_argument('--truncate_obj', type=int, default=defaults["truncate_obj"],
help=f'Truncate a batch to this many objects so you can leave the batch size '
+ 'larger but avoid memory overflow. Warnings will be printed if more than '
+ 'a few objects are truncated.')
parser.add_argument('--mask_occluded', action='store_true',
help='If set, train the network to only detect visible keypoints.')
parser.add_argument('--no_augmentations', action='store_true',
help='If set, skip the training data augmentations.')
else:
parser.add_argument('--nviews', type=int, default=-1,
help=f'Number of views to eval with. --nviews=1 returns just the single-view '
'PnP results (with covariance-weighted refinement if using covariance), '
'--nviews=N for some N>1 (typically small like 5 or 8) will perform a SfM type '
'evaluation. In this case, the SfM problem will be run separately for each '
'view with the current view in the dataset as the global frame in the problem. '
'This will be rather slow to process because of this. Set to -1 to eval all '
'views sequentially in SLAM fashion. (default=-1).')
# TODO I feel like there's a better way to do the viz args, like some sort of list
# of things you want to viz.
parser.add_argument('--no_viz', action='store_true',
help='If set, save some time by skipping visualizations altogether. '
'Visualizations typically take 200ms or so per image, so it\'s alot.')
parser.add_argument('--viz_cov', action='store_true',
help='If set, visualize the covariance in the images (may be visually busy).')
parser.add_argument('--do_viz_extra', action='store_true',
help='If set, add extra visualizations of each individual object.')
parser.add_argument('--no_prior_det', '-p', action='store_true',
help='If set, skip the prior detections which disambiguate multiview results.')
parser.add_argument('--debug_gt_kp', action='store_true',
help='If set, debug the evaluation with the GT keypoints instead of estimated. '
'Note that there is some noise added to the GT keypoints to keep the information '
'matrix of the PnP and BA well-conditioned, so the result is not "perfect"')
parser.add_argument('--gt_cam_pose', action='store_true',
help='If set, give the SLAM system the GT camera poses.')
parser.add_argument('--debug_saved_only', action='store_true',
help='If set, just eval the saved detections to debug the AUC of ADD(-S).')
parser.add_argument('--give_all_prior', action='store_true',
help='If set, consider all objects the prior detection regardless of '
'symmetric or not. If gt_cam_pose is not set, then other methods will '
'try and estimate the camera pose from the bboxes or a constant velocity model.')
args = parser.parse_args()
# Make sure all data downloaded is in ./data (can be a symlink)
setattr(args, "data_root", os.path.join(os.getcwd(), 'data/bop_datasets/', args.dataset))
return args
|
<filename>chapter06/demo_6.4.py
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/12/8 下午3:59
# 6.4 用TensorFlow实现单层神经网络
# 1. 创建计算图会话,导入必要的编程库
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
# 2. 加载Iris数据集,存储花萼长度作为目标值,然后开始一个计算图会话
iris = datasets.load_iris()
x_vals = np.array([x[0:3] for x in iris.data])
y_vals = np.array([x[3] for x in iris.data])
sess = tf.Session()
# 3. 因为数据集比较小,我们设置一个种子使得返回结果可复现
seed = 5
tf.set_random_seed(seed)
np.random.seed(seed)
# 4. 为了准备数据集,我们创建一个80-20分的训练集和测试集。通过min-max缩放法正则化特征值为0到1之间
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
def normailize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m-col_min)/(col_max-col_min)
x_vals_train = np.nan_to_num(normailize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normailize_cols(x_vals_test))
# 5. 现在为数据集和目标值声明批量大小和占位符
batch_size = 50
x_data = tf.placeholder(shape=[None, 3], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# 6. 这一步相当重要,声明有合适形状的模型变量。我们能声明隐藏层为任意大小,本例中设置为有五个隐藏节点
hidden_layer_nodes = 5
A1 = tf.Variable(tf.random_normal(shape=[3,hidden_layer_nodes]))
b1 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes]))
A2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes,1]))
b2 = tf.Variable(tf.random_normal(shape=[1]))
# 7. 分两步声明训练模型: 第一步,创建一个隐藏层输出;第二步,创建训练模型的最后输出
"""
注意,本例中的模型有三个特征、五个隐藏节点和一个输出结果值。
"""
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data, A1), b1))
final_output = tf.nn.relu(tf.add(tf.matmul(hidden_output, A2), b2))
# 8. 这里定义均方误差为损失函数
loss = tf.reduce_mean(tf.square(y_target - final_output))
# 9. 声明优化算法,初始化模型变量
my_opt = tf.train.GradientDescentOptimizer(0.005)
train_step = my_opt.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
# 10. 遍历迭代训练模型。我们也初始化两个列表(list)存储训练损失和测试损失。在每次迭代训练时,随机选择批量训练数据来拟合模型
# First we initialize the loss vectors for storage.
loss_vec = []
test_loss = []
for i in range(500):
# First we select a random set of indices for the batch.
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
# We then select the training values
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
# Now we run the training loss
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
# We save the training loss
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(np.sqrt(temp_loss))
# Finaly, we run the test-set loss and save it.
test_temp_loss = sess.run(loss, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%50 == 0:
print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))
# 11. 使用matplotlib绘制损失函数
plt.plot(loss_vec, 'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='Test Loss')
plt.title('Loss (MSE) per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.show()
|
import sys
sys.dont_write_bytecode = True
import os
import json
import torch
import torchvision
import torch.nn.parallel
import torch.optim as optim
import numpy as np
from tensorboardX import SummaryWriter
import core.utils.opts as opts
from core.data.datasets.dataset import VideoDataSet, ProposalDataSet
from core.models.models import TEM, PEM
from core.loss.loss_function import TEM_loss_function, PEM_loss_function
import pandas as pd
from core.models.pgm import PGM_proposal_generation, PGM_feature_generation
from core.utils.post_processing import BSN_post_processing
from tools.eval import evaluation_proposal
def train_TEM(data_loader, model, optimizer, epoch, writer, opt):
model.train()
epoch_action_loss = 0
epoch_start_loss = 0
epoch_end_loss = 0
epoch_cost = 0
for n_iter, (input_data, label_action, label_start,
label_end) in enumerate(data_loader):
TEM_output = model(input_data)
loss = TEM_loss_function(label_action, label_start, label_end,
TEM_output, opt)
cost = loss["cost"]
optimizer.zero_grad()
cost.backward()
optimizer.step()
epoch_action_loss += loss["loss_action"].cpu().detach().numpy()
epoch_start_loss += loss["loss_start"].cpu().detach().numpy()
epoch_end_loss += loss["loss_end"].cpu().detach().numpy()
epoch_cost += loss["cost"].cpu().detach().numpy()
writer.add_scalars('data/action',
{'train': epoch_action_loss / (n_iter + 1)}, epoch)
writer.add_scalars('data/start',
{'train': epoch_start_loss / (n_iter + 1)}, epoch)
writer.add_scalars('data/end', {'train': epoch_end_loss / (n_iter + 1)},
epoch)
writer.add_scalars('data/cost', {'train': epoch_cost / (n_iter + 1)},
epoch)
print(
"TEM training loss(epoch %d): action - %.03f, start - %.03f, end - %.03f"
% (epoch, epoch_action_loss / (n_iter + 1), epoch_start_loss /
(n_iter + 1), epoch_end_loss / (n_iter + 1)))
def test_TEM(data_loader, model, epoch, writer, opt):
model.eval()
epoch_action_loss = 0
epoch_start_loss = 0
epoch_end_loss = 0
epoch_cost = 0
for n_iter, (input_data, label_action, label_start,
label_end) in enumerate(data_loader):
TEM_output = model(input_data)
loss = TEM_loss_function(label_action, label_start, label_end,
TEM_output, opt)
epoch_action_loss += loss["loss_action"].cpu().detach().numpy()
epoch_start_loss += loss["loss_start"].cpu().detach().numpy()
epoch_end_loss += loss["loss_end"].cpu().detach().numpy()
epoch_cost += loss["cost"].cpu().detach().numpy()
writer.add_scalars('data/action',
{'test': epoch_action_loss / (n_iter + 1)}, epoch)
writer.add_scalars('data/start', {'test': epoch_start_loss / (n_iter + 1)},
epoch)
writer.add_scalars('data/end', {'test': epoch_end_loss / (n_iter + 1)},
epoch)
writer.add_scalars('data/cost', {'test': epoch_cost / (n_iter + 1)}, epoch)
print(
"TEM testing loss(epoch %d): action - %.03f, start - %.03f, end - %.03f"
% (epoch, epoch_action_loss / (n_iter + 1), epoch_start_loss /
(n_iter + 1), epoch_end_loss / (n_iter + 1)))
state = {'epoch': epoch + 1, 'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/tem_checkpoint.pth.tar")
if epoch_cost < model.module.tem_best_loss:
model.module.tem_best_loss = np.mean(epoch_cost)
torch.save(state, opt["checkpoint_path"] + "/tem_best.pth.tar")
def train_PEM(data_loader, model, optimizer, epoch, writer, opt):
model.train()
epoch_iou_loss = 0
for n_iter, (input_data, label_iou) in enumerate(data_loader):
PEM_output = model(input_data)
iou_loss = PEM_loss_function(PEM_output, label_iou, model, opt)
optimizer.zero_grad()
iou_loss.backward()
optimizer.step()
epoch_iou_loss += iou_loss.cpu().detach().numpy()
writer.add_scalars('data/iou_loss',
{'train': epoch_iou_loss / (n_iter + 1)}, epoch)
print("PEM training loss(epoch %d): iou - %.04f" % (epoch, epoch_iou_loss /
(n_iter + 1)))
def test_PEM(data_loader, model, epoch, writer, opt):
model.eval()
epoch_iou_loss = 0
for n_iter, (input_data, label_iou) in enumerate(data_loader):
PEM_output = model(input_data)
iou_loss = PEM_loss_function(PEM_output, label_iou, model, opt)
epoch_iou_loss += iou_loss.cpu().detach().numpy()
writer.add_scalars('data/iou_loss',
{'validation': epoch_iou_loss / (n_iter + 1)}, epoch)
print("PEM testing loss(epoch %d): iou - %.04f" % (epoch, epoch_iou_loss /
(n_iter + 1)))
state = {'epoch': epoch + 1, 'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/pem_checkpoint.pth.tar")
if epoch_iou_loss < model.module.pem_best_loss:
model.module.pem_best_loss = np.mean(epoch_iou_loss)
torch.save(state, opt["checkpoint_path"] + "/pem_best.pth.tar")
def BSN_Train_TEM(opt):
writer = SummaryWriter()
model = TEM(opt)
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
optimizer = optim.Adam(model.parameters(),
lr=opt["tem_training_lr"],
weight_decay=opt["tem_weight_decay"])
train_loader = torch.utils.data.DataLoader(
VideoDataSet(opt, subset="train"),
batch_size=model.module.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True)
test_loader = torch.utils.data.DataLoader(
VideoDataSet(opt, subset="validation"),
batch_size=model.module.batch_size,
shuffle=False,
num_workers=8,
pin_memory=True,
drop_last=True)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=opt["tem_step_size"],
gamma=opt["tem_step_gamma"])
for epoch in range(opt["tem_epoch"]):
scheduler.step()
train_TEM(train_loader, model, optimizer, epoch, writer, opt)
test_TEM(test_loader, model, epoch, writer, opt)
writer.close()
def BSN_Train_PEM(opt):
writer = SummaryWriter()
model = PEM(opt)
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
optimizer = optim.Adam(model.parameters(),
lr=opt["pem_training_lr"],
weight_decay=opt["pem_weight_decay"])
def collate_fn(batch):
batch_data = torch.cat([x[0] for x in batch])
batch_iou = torch.cat([x[1] for x in batch])
return batch_data, batch_iou
train_loader = torch.utils.data.DataLoader(
ProposalDataSet(opt, subset="train"),
batch_size=model.module.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn)
test_loader = torch.utils.data.DataLoader(
ProposalDataSet(opt, subset="validation"),
batch_size=model.module.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=opt["pem_step_size"],
gamma=opt["pem_step_gamma"])
for epoch in range(opt["pem_epoch"]):
scheduler.step()
train_PEM(train_loader, model, optimizer, epoch, writer, opt)
test_PEM(test_loader, model, epoch, writer, opt)
writer.close()
def BSN_inference_TEM(opt):
model = TEM(opt)
checkpoint = torch.load(opt["checkpoint_path"] + "/tem_best.pth.tar")
base_dict = {
'.'.join(k.split('.')[1:]): v
for k, v in list(checkpoint['state_dict'].items())
}
model.load_state_dict(base_dict)
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
model.eval()
test_loader = torch.utils.data.DataLoader(
VideoDataSet(opt, subset="full"),
batch_size=model.module.batch_size,
shuffle=False,
num_workers=8,
pin_memory=True,
drop_last=False)
columns = ["action", "start", "end", "xmin", "xmax"]
for index_list, input_data, anchor_xmin, anchor_xmax in test_loader:
TEM_output = model(input_data).detach().cpu().numpy()
batch_action = TEM_output[:, 0, :]
batch_start = TEM_output[:, 1, :]
batch_end = TEM_output[:, 2, :]
index_list = index_list.numpy()
anchor_xmin = np.array([x.numpy()[0] for x in anchor_xmin])
anchor_xmax = np.array([x.numpy()[0] for x in anchor_xmax])
for batch_idx, full_idx in enumerate(index_list):
video = list(test_loader.dataset.video_list)[full_idx]
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
anchor_xmin, anchor_xmax),
axis=1)
video_df = pd.DataFrame(video_result, columns=columns)
video_df.to_csv("./output/TEM_results/" + video + ".csv",
index=False)
def BSN_inference_PEM(opt):
model = PEM(opt)
checkpoint = torch.load(opt["checkpoint_path"] + "/pem_best.pth.tar")
base_dict = {
'.'.join(k.split('.')[1:]): v
for k, v in list(checkpoint['state_dict'].items())
}
model.load_state_dict(base_dict)
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
model.eval()
test_loader = torch.utils.data.DataLoader(
ProposalDataSet(opt, subset=opt["pem_inference_subset"]),
batch_size=model.module.batch_size,
shuffle=False,
num_workers=8,
pin_memory=True,
drop_last=False)
for idx, (video_feature, video_xmin, video_xmax, video_xmin_score,
video_xmax_score) in enumerate(test_loader):
video_name = test_loader.dataset.video_list[idx]
video_conf = model(video_feature).view(-1).detach().cpu().numpy()
video_xmin = video_xmin.view(-1).cpu().numpy()
video_xmax = video_xmax.view(-1).cpu().numpy()
video_xmin_score = video_xmin_score.view(-1).cpu().numpy()
video_xmax_score = video_xmax_score.view(-1).cpu().numpy()
df = pd.DataFrame()
df["xmin"] = video_xmin
df["xmax"] = video_xmax
df["xmin_score"] = video_xmin_score
df["xmax_score"] = video_xmax_score
df["iou_score"] = video_conf
df.to_csv("./output/PEM_results/" + video_name + ".csv", index=False)
def main(opt):
if opt["module"] == "TEM":
if opt["mode"] == "train":
print("TEM training start")
BSN_Train_TEM(opt)
print("TEM training finished")
elif opt["mode"] == "inference":
print("TEM inference start")
if not os.path.exists("output/TEM_results"):
os.makedirs("output/TEM_results")
BSN_inference_TEM(opt)
print("TEM inference finished")
else:
print("Wrong mode. TEM has two modes: train and inference")
elif opt["module"] == "PGM":
if not os.path.exists("output/PGM_proposals"):
os.makedirs("output/PGM_proposals")
print("PGM: start generate proposals")
PGM_proposal_generation(opt)
print("PGM: finish generate proposals")
if not os.path.exists("output/PGM_feature"):
os.makedirs("output/PGM_feature")
print("PGM: start generate BSP feature")
PGM_feature_generation(opt)
print("PGM: finish generate BSP feature")
elif opt["module"] == "PEM":
if opt["mode"] == "train":
print("PEM training start")
BSN_Train_PEM(opt)
print("PEM training finished")
elif opt["mode"] == "inference":
if not os.path.exists("output/PEM_results"):
os.makedirs("output/PEM_results")
print("PEM inference start")
BSN_inference_PEM(opt)
print("PEM inference finished")
else:
print("Wrong mode. PEM has two modes: train and inference")
elif opt["module"] == "Post_processing":
print("Post processing start")
BSN_post_processing(opt)
print("Post processing finished")
elif opt["module"] == "Evaluation":
evaluation_proposal(opt)
print("")
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
if not os.path.exists(opt["checkpoint_path"]):
os.makedirs(opt["checkpoint_path"])
opt_file = open(opt["checkpoint_path"] + "/opts.json", "w")
json.dump(opt, opt_file)
opt_file.close()
main(opt) |
#<NAME>
# Algoritmo do banqueiro
# Número de processos
P = 5
# Número de recursos
R = 3
# Função para encontrar a necessidade de cada processo
def calculateprecisa(precisa, max, alocacao):
# Calculando a necessidade de cada P ->processo
for i in range(P):
for j in range(R):
#Necessidade de instância = instância max -
#instância alocada
precisa[i][j] = max[i][j] - alocacao[i][j]
#Função para encontrar o sistema está em
# estado seguro ou não
def isSafe(processes, acessivel, max, alocacao):
precisa = []
for i in range(P):
l = []
for j in range(R):
l.append(0)
precisa.append(l)
# Função para calcular a precisa da matrix
calculateprecisa(precisa, max, alocacao)
# Marcar todos os processos quando terminar
finalizar = [0] * P
# Para armazenar a sequência segura
safeSeq = [0] * P
# Para armazenar a sequência segura
trabalho = [0] * R
for i in range(R):
trabalho[i] = acessivel[i]
# Enquanto todos os processos não estão terminados
# ou o sistema não está em estado seguro.
count = 0
while (count < P):
# Encontre um processo que não termine
# e cujas necessidades podem ser satisfeitas
# com os recursos atuais do trabalho [].
found = False
for p in range(P):
# Encontre um processo que não termine
# e cujas necessidades podem ser satisfeitas
# com os recursos atuais do trabalho [].
if (finalizar[p] == 0):
# Verifique se há todos os recursos número de necessidade atual de P é menor
# do que trabalho
for j in range(R):
if (precisa[p][j] > trabalho[j]):
break
# Se todas as necessidades de p estiverem satisfeitas.
if (j == R - 1):
# Adicione os recursos alocados de
# atual P para o disponível / trabalho
# recursos, isto é, os recursos
for k in range(R):
trabalho[k] += alocacao[p][k]
# Adicione este processo a uma sequência segura.
safeSeq[count] = p
count += 1
# Marque este p como terminado
finalizar[p] = 1
found = True
# Se não conseguimos encontrar um próximo processo
# em sequência segura.
if (found == False):
print("O sistema não está em estado seguro")
return False
# If system is in safe state then
# safe sequence will be as below
print("O sistema está em estado seguro.",
"\nO estado seguro é: ", end = " ")
print(*safeSeq)
return True
# main função Principal
if __name__ =="__main__":
processes = [0, 1, 2, 3, 4]
# Instâncias disponíveis de recursos
acessivel = [3, 3, 2]
# Máximo R que pode ser alocado
# para processos
max = [[7, 5, 3], [3, 2, 2],
[9, 0, 2], [2, 2, 2],
[4, 3, 3]]
# Recursos alocados para processos
alocacao = [[0, 1, 0], [2, 0, 0],
[3, 0, 2], [2, 1, 1],
[0, 0, 2]]
# Verifique se o sistema está em estado seguro ou não
isSafe(processes, acessivel, max, alocacao)
# A saida: O sistema está em estado seguro. A sequência segura é: 1 3 4 0 2
|
<reponame>AllVides/DB_EDD_G9
from tkinter import *
from tkinter import ttk
from tkinter import messagebox # message box
from LoadData import Data as Cargar
from tkinter import filedialog
from tkinter import Image
class StorageGui(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
#self.pack(fill='both', expand=1)
self.WinPrincipal()
def WinPrincipal(self):
label1=Label( text="¡ Welcome To TitusDB!")
label1.config(font=("Verdana",50))
label1.pack(anchor=CENTER)
label2=Label( text="¡ Select your option : ")
label2.config(font=("Verdana",28))
label2.pack(anchor=CENTER)
label3=Label( text="\n\n1.option 1\n2.option 2\n3.option 3\n4. ISAM \n5.option 5")
label3.config(font=("Verdana",23))
label3.pack(anchor=CENTER)
label3=Label( text="\n\nInsert Number")
label3.config(font=("Verdana",15))
label3.pack()
entry = Entry()
entry.pack(anchor=CENTER)
button = Button(text= 'Accept', padx= 15, pady=6, bg= 'grey',fg='white', command=self.viewDb)
button.pack(anchor=CENTER)
def say_hi(self):
print("hi there, everyone!")
def viewDb(self):
self.master.withdraw()
Win2 = Toplevel()
Win2.geometry("1000x600")
Win2.title(" [EDD] Fase-1" )
Win2.configure(bg='#2C3E50')
label1=Label(Win2, text="\nLOADING DATA BASE\n")
label1.place(x=200,y=250,width=200,height=200)
label1.config(font=("Verdana",50))
label1.pack(anchor=CENTER)
label2=Label(Win2, text=" insert load CSV ")
label2.config(font=("Verdana",15))
label2.pack()
self.pathCSV = Entry(Win2)
self.pathCSV.pack()
self.master=Win2
button = Button(Win2, text= 'Accept', padx= 15, pady=6, bg= 'grey',fg='white',command=self.Open_Archive)
button.pack(anchor=CENTER)
def window3(self):
Win3 = Tk()
Win3.geometry("1000x600")
Win3.title(" [EDD] Fase-1" )
Win3.configure(bg='#2C3E50')
label1=Label(Win3,text="\nSelect your Data Base\n\n\n")
label1.config(font=("Verdana",15))
label1.pack(anchor=CENTER)
comboExample = ttk.Combobox (Win3,
values=[
"January",
"February",
"March",
"April"])
comboExample.pack()
button = Button(Win3, text= 'Accept', padx= 15, pady=6, bg= 'grey',fg='white',command=self.Open_Archive)
button.pack(anchor=CENTER)
# img = PhotoImage(file="TytusLogo.gif")
# panel = Label(Win3, image = img).place(x=100,y=500)
self.master=Win3
def loadtable(self,path):
Cargar.CargarArchivo(path)
self.master.withdraw()
self.window3()
def Open_Archive(self):
archive=filedialog.askopenfilename(initialdir="/home/msaban",
title="seleccione Archivo",filetypes=(("jpeg files","*jpg"),
("all files","*.*")))
print( archive)
messagebox.showinfo("Loading data","DATA SAVED IN ISAM")
self.loadtable(archive)
class NavBar(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack(fill='both', expand=1)
self.create_butons()
def create_butons(self):
self.prueba = Button(self, text="DBs",fg="black", bg="white")
self.prueba.place(x=40,y=40,width=50,height=30)
|
<filename>tests/test_adapter.py<gh_stars>0
import random
import unittest
import torch
from datasets import load_dataset
from tests.test_adapter_embeddings import EmbeddingTestMixin
from transformers import (
AutoModel,
AutoModelForSeq2SeqLM,
BartConfig,
BertConfig,
DistilBertConfig,
EncoderDecoderConfig,
EncoderDecoderModel,
GlueDataset,
GlueDataTrainingArguments,
GPT2Config,
MBartConfig,
RobertaConfig,
T5Config,
XLMRobertaConfig,
)
from transformers.testing_utils import require_torch, torch_device
from .test_adapter_common import AdapterModelTestMixin
from .test_adapter_composition import ParallelAdapterInferenceTestMixin, ParallelTrainingMixin
from .test_adapter_conversion import ModelClassConversionTestMixin
from .test_adapter_fusion_common import AdapterFusionModelTestMixin
from .test_adapter_heads import PredictionHeadModelTestMixin
from .test_adapter_training import AdapterTrainingTestMixin
def make_config(config_class, **kwargs):
return staticmethod(lambda: config_class(**kwargs))
class AdapterTestBase:
# If not overriden by subclass, AutoModel should be used.
model_class = AutoModel
def get_model(self):
if self.model_class == AutoModel:
model = AutoModel.from_config(self.config())
else:
model = self.model_class(self.config())
model.to(torch_device)
return model
def get_input_samples(self, shape, vocab_size=5000, config=None):
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(random.randint(0, vocab_size - 1))
input_ids = torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
# this is needed e.g. for BART
if config and config.eos_token_id is not None and config.eos_token_id < vocab_size:
input_ids[input_ids == config.eos_token_id] = random.randint(0, config.eos_token_id - 1)
input_ids[:, -1] = config.eos_token_id
in_data = {"input_ids": input_ids}
if config and config.is_encoder_decoder:
in_data["decoder_input_ids"] = input_ids.clone()
return in_data
def add_head(self, model, name, **kwargs):
model.add_classification_head(name, **kwargs)
def dataset(self, tokenizer):
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
return GlueDataset(data_args, tokenizer=tokenizer, mode="train")
class BertAdapterTestBase(AdapterTestBase):
config_class = BertConfig
config = make_config(
BertConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
tokenizer_name = "bert-base-uncased"
@require_torch
class BertAdapterTest(
EmbeddingTestMixin,
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
PredictionHeadModelTestMixin,
AdapterTrainingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
BertAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class BertClassConversionTest(
ModelClassConversionTestMixin,
BertAdapterTestBase,
unittest.TestCase,
):
pass
class RobertaAdapterTestBase(AdapterTestBase):
config_class = RobertaConfig
config = make_config(
RobertaConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
@require_torch
class RobertaAdapterTest(
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
PredictionHeadModelTestMixin,
ParallelAdapterInferenceTestMixin,
RobertaAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class RobertaClassConversionTest(
ModelClassConversionTestMixin,
RobertaAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class XLMRobertaClassConversionTest(
ModelClassConversionTestMixin,
AdapterTestBase,
unittest.TestCase,
):
config_class = XLMRobertaConfig
config = make_config(
XLMRobertaConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
class DistilBertAdapterTestBase(AdapterTestBase):
config_class = DistilBertConfig
config = make_config(
DistilBertConfig,
dim=32,
n_layers=4,
n_heads=4,
hidden_dim=37,
)
tokenizer_name = "distilbert-base-uncased"
@require_torch
class DistilBertAdapterTest(
AdapterModelTestMixin,
EmbeddingTestMixin,
AdapterFusionModelTestMixin,
PredictionHeadModelTestMixin,
AdapterTrainingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
DistilBertAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class DistilBertClassConversionTest(
ModelClassConversionTestMixin,
DistilBertAdapterTestBase,
unittest.TestCase,
):
pass
class BartAdapterTestBase(AdapterTestBase):
config_class = BartConfig
config = make_config(
BartConfig,
d_model=16,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
)
tokenizer_name = "facebook/bart-base"
@require_torch
class BartAdapterTest(
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
EmbeddingTestMixin,
PredictionHeadModelTestMixin,
AdapterTrainingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
BartAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class BartClassConversionTest(
ModelClassConversionTestMixin,
BartAdapterTestBase,
unittest.TestCase,
):
pass
class MBartAdapterTestBase(AdapterTestBase):
config_class = MBartConfig
config = make_config(
MBartConfig,
d_model=16,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
)
@require_torch
class MBartAdapterTest(
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
PredictionHeadModelTestMixin,
ParallelAdapterInferenceTestMixin,
MBartAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class MBartClassConversionTest(
ModelClassConversionTestMixin,
MBartAdapterTestBase,
unittest.TestCase,
):
pass
class GPT2AdapterTestBase(AdapterTestBase):
config_class = GPT2Config
config = make_config(
GPT2Config,
n_embd=32,
n_layer=4,
n_head=4,
# set pad token to eos token
pad_token_id=50256,
)
tokenizer_name = "gpt2"
@require_torch
class GPT2AdapterTest(
AdapterModelTestMixin,
EmbeddingTestMixin,
AdapterFusionModelTestMixin,
PredictionHeadModelTestMixin,
AdapterTrainingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
GPT2AdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class GPT2ClassConversionTest(
ModelClassConversionTestMixin,
GPT2AdapterTestBase,
unittest.TestCase,
):
pass
class EncoderDecoderAdapterTestBase(AdapterTestBase):
model_class = EncoderDecoderModel
config_class = EncoderDecoderConfig
config = staticmethod(
lambda: EncoderDecoderConfig.from_encoder_decoder_configs(
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
),
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
is_decoder=True,
add_cross_attention=True,
),
)
)
tokenizer_name = "bert-base-uncased"
@require_torch
class EncoderDecoderAdapterTest(
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
EncoderDecoderAdapterTestBase,
unittest.TestCase,
):
def test_invertible_adapter_with_head(self):
"""This test class is copied and adapted from the identically-named test in test_adapter_heads.py."""
model = AutoModelForSeq2SeqLM.from_config(self.config())
model.add_adapter("test", config="pfeiffer+inv")
model.set_active_adapters("test")
# Set a hook before the invertible adapter to make sure it's actually called twice:
# Once after the embedding layer and once in the prediction head.
calls = 0
def forward_pre_hook(module, input):
nonlocal calls
calls += 1
inv_adapter = model.base_model.get_invertible_adapter()
self.assertIsNotNone(inv_adapter)
inv_adapter.register_forward_pre_hook(forward_pre_hook)
in_data = self.get_input_samples((1, 128), config=model.config)
out = model(**in_data)
self.assertEqual((1, 128, model.config.decoder.vocab_size), out[0].shape)
self.assertEqual(2, calls)
@require_torch
class T5AdapterTestBase(AdapterTestBase):
config_class = T5Config
config = make_config(
T5Config,
d_model=16,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
tie_word_embeddings=False,
decoder_start_token_id=0,
)
tokenizer_name = "t5-base"
def add_head(self, model, name, **kwargs):
model.add_seq2seq_lm_head(name)
def dataset(self, tokenizer):
def preprocess_function(examples):
inputs = examples["document"]
targets = examples["summary"]
inputs = ["Summarize: " + inp for inp in inputs]
model_inputs = tokenizer(inputs, padding=True, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, padding=True, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
data_args = {
"task_name": "xsum",
"path": "./tests/fixtures/tests_samples/xsum/sample.json",
}
dataset = load_dataset("json", data_files=data_args["path"])
train_dataset = dataset["train"]
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on train dataset",
)
return train_dataset
@require_torch
class T5AdapterTest(
T5AdapterTestBase,
EmbeddingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
AdapterTrainingTestMixin,
PredictionHeadModelTestMixin,
AdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class T5ClassConversionTest(
ModelClassConversionTestMixin,
T5AdapterTestBase,
unittest.TestCase,
):
pass
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addSubjectDialogUi.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AddSubject(object):
def setupUi(self, AddSubject):
AddSubject.setObjectName("AddSubject")
AddSubject.resize(519, 312)
self.gridLayout = QtWidgets.QGridLayout(AddSubject)
self.gridLayout.setObjectName("gridLayout")
self.labelAddSubjects = QtWidgets.QLabel(AddSubject)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelAddSubjects.setFont(font)
self.labelAddSubjects.setObjectName("labelAddSubjects")
self.gridLayout.addWidget(self.labelAddSubjects, 0, 0, 1, 2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.pushButtonCancel = QtWidgets.QPushButton(AddSubject)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout_2.addWidget(self.pushButtonCancel)
self.pushButtonOk = QtWidgets.QPushButton(AddSubject)
self.pushButtonOk.setObjectName("pushButtonOk")
self.horizontalLayout_2.addWidget(self.pushButtonOk)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 0, 1, 3)
self.scrollArea = QtWidgets.QScrollArea(AddSubject)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 499, 240))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.pushButtonRemove = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButtonRemove.setEnabled(False)
self.pushButtonRemove.setObjectName("pushButtonRemove")
self.gridLayout_2.addWidget(self.pushButtonRemove, 2, 0, 1, 1)
self.listWidgetFileNames = QtWidgets.QListWidget(self.scrollAreaWidgetContents)
self.listWidgetFileNames.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.listWidgetFileNames.setObjectName("listWidgetFileNames")
self.gridLayout_2.addWidget(self.listWidgetFileNames, 0, 0, 1, 1)
self.pushButtonBrowse = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButtonBrowse.sizePolicy().hasHeightForWidth())
self.pushButtonBrowse.setSizePolicy(sizePolicy)
self.pushButtonBrowse.setObjectName("pushButtonBrowse")
self.gridLayout_2.addWidget(self.pushButtonBrowse, 1, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 1, 0, 1, 3)
self.retranslateUi(AddSubject)
self.pushButtonCancel.clicked.connect(AddSubject.reject)
self.pushButtonOk.clicked.connect(AddSubject.accept)
QtCore.QMetaObject.connectSlotsByName(AddSubject)
def retranslateUi(self, AddSubject):
_translate = QtCore.QCoreApplication.translate
AddSubject.setWindowTitle(_translate("AddSubject", "Meggie - Add subjects"))
self.labelAddSubjects.setText(_translate("AddSubject", "Add subjects (raw files) to the experiment:"))
self.pushButtonCancel.setText(_translate("AddSubject", "Cancel"))
self.pushButtonOk.setText(_translate("AddSubject", "Ok"))
self.pushButtonRemove.setText(_translate("AddSubject", "Remove"))
self.pushButtonBrowse.setText(_translate("AddSubject", "Browse..."))
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys, os
import argparse
import re
import zipfile
import tempfile
import logging
from evmlab import reproduce, utils
from evmlab import vm as VMUtils
logger = logging.getLogger(__name__)
try:
import flask
app = flask.Flask(__name__, template_folder=os.path.join(os.path.dirname(__file__), 'templates'))
logger.info("Flask init: template_folder: %s" % os.path.join(os.path.dirname(__file__), 'templates'))
except ImportError:
logger.warning("Flask not installed, disabling web mode")
app = None
OUTPUT_DIR = tempfile.mkdtemp(prefix="evmlab")
def create_zip_archive(input_files, output_archive):
"""
Bundles artefacts into a zip-file
@param input_artefacts - map of files to save
@param output_archive prefix to zip-file name
"""
logger.debug("creating zip archive %s for input artefacts:")
zipf = zipfile.ZipFile(output_archive, 'w', zipfile.ZIP_DEFLATED)
for location, name in input_files:
logger.debug("adding %s as %s to archive..." % (location, name))
zipf.write(location, name)
zipf.close()
if app:
hash_regexp = re.compile("0x[0-9a-f]{64}")
@app.route("/")
def test():
logger.debug("rendering index...")
return flask.render_template("index.html")
@app.route('/reproduce/<txhash>')
def reproduce_tx(txhash):
logger.debug("reproducing transaction %s" % txhash)
# Verify input
if not hash_regexp.match(txhash):
logger.debug("rendering index(invalid tx hash)...")
return flask.render_template("index.html", message="Invalid tx hash")
try:
artefacts, vm_args = reproduce.reproduceTx(txhash, app.vm, app.api)
logger.debug("done reproducing transaction trace...")
except Exception as e:
logger.exception("exception thrown while reproducing transaction...")
return flask.render_template("index.html", message=str(e))
logger.debug("saving artefacts to %s" % OUTPUT_DIR)
saved_files = utils.saveFiles(OUTPUT_DIR, artefacts)
# Some tricks to get the right command for local replay
p_gen = saved_files['parity genesis']['name']
g_gen = saved_files['geth genesis']['name']
vm_args['genesis'] = g_gen
command = app.vm.makeCommand(**vm_args)
logger.debug("vm command: %s" % command)
logger.debug("creating zip archive for artefacts")
prefix = txhash[:8]
output_archive = os.path.join(OUTPUT_DIR, "%s.zip" % prefix)
# create a list of files to pack with zipFiles
input_files = [(os.path.join(v['path'], v['name']), v['name']) for v in saved_files]
create_zip_archive(input_files=input_files, output_archive=output_archive)
logger.debug("rendering reproduce_tx...")
return flask.render_template("index.html",
files=saved_files, zipfile="%s.zip" % prefix,
message="Transaction tracing seems to have been successfull. Use the following command to execute locally",
code=" \\\n\t".join(command))
@app.route('/download/<path:filename>')
def download_file(filename):
logger.debug("rendering download_file...")
return flask.send_from_directory(OUTPUT_DIR, filename, as_attachment=True)
def test(vm, api):
print("Doing tests")
# Jumpdest-analysis attack
tx = ""
# Parity-wallet attack
tx = "0x9dbf0326a03a2a3719c27be4fa69aacc9857fd231a8d9dcaede4bb083def75ec"
# tenx token transfer (should include SLOADS)
tx = "0xd6d519043d40691a36c9e718e47110309590e6f47084ac0ec00b53718e449fd3"
return reproduce.reproduceTx(tx, vm, api)
def main():
description = """
Tool to reproduce on-chain events locally.
This can run either as a command-line tool, or as a webapp using a built-in flask interface.
"""
examples = """
Examples
# Reproduce a tx with a local evm binary
python3 reproducer.py --no-docker -g ~/go/src/github.com/ethereum/go-ethereum/build/bin/evm --hash 0xd6d519043d40691a36c9e718e47110309590e6f47084ac0ec00b53718e449fd3
# Reproduce a tx with a docker evm
python3 reproducer.py -g holiman/gethvm --hash 0xd6d519043d40691a36c9e718e47110309590e6f47084ac0ec00b53718e449fd3
# Start the reproducer webapp using the default geth docker image:
python3 reproducer.py -w localhost
Unfinished:
* This does not _quite_ work with parity, yet, because parity does not load the code in genesis for the 'to'
-account, it expects the code to be given as an argument.
"""
parser = argparse.ArgumentParser(description=description, epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
evmchoice = parser.add_mutually_exclusive_group()
evmchoice.add_argument('-g', '--geth-evm', type=str,
help="Geth EVM binary or docker image name", default="holiman/gethvm")
evmchoice.add_argument('-p', '--parity-evm', type=str, default=None,
help="Parity EVM binary or docker image name")
parser.add_argument("--no-docker", action="store_true",
help="Set to true if using a local binary instead of a docker image")
web_or_direct = parser.add_mutually_exclusive_group()
web_or_direct.add_argument('-x', '--hash', type=str,
help="Don't run webapp, just lookup hash")
if app:
web_or_direct.add_argument('-w', '--www', type=str, help="Run webapp on given interface (interface:port)")
parser.add_argument('-d', '--debug', action="store_true", default=False,
help="Run flask in debug mode (WARNING: debug on in production is insecure)")
parser.add_argument('-t', '--test', action="store_true", default=False,
help="Dont run webapp, only local tests")
web3settings = parser.add_argument_group('Web3', 'Settings about where to fetch information from (default infura)')
web3settings.add_argument("--web3", type=str, default="https://mainnet.infura.io/",
help="Web3 API url to fetch info from (default 'https://mainnet.infura.io/'")
args = parser.parse_args()
# end of arg handling
if args.parity_evm:
vm = VMUtils.ParityVM(args.parity_evm, not args.no_docker)
else:
vm = VMUtils.GethVM(args.geth_evm, not args.no_docker)
api = utils.getApi(args.web3)
if args.test:
artefacts = test(vm, api)
import pprint
pprint.PrettyPrinter().pprint(artefacts)
sys.exit(0)
if app and args.www:
if ':' in args.www:
host, port = args.www.split(':')
port = port
else:
host = args.www
port = 5000
app.debug = args.debug
app.api = api
app.vm = vm
app.run(host=host, port=port)
elif args.hash:
artefacts, vm_args = reproduce.reproduceTx(args.hash, vm, api)
saved_files = utils.saveFiles(OUTPUT_DIR, artefacts)
# Some tricks to get the right command for local replay
p_gen = saved_files['parity genesis']['name']
g_gen = saved_files['geth genesis']['name']
vm_args['genesis'] = "%s/%s" % (OUTPUT_DIR, g_gen)
print("\nCommand to execute locally (geth):\n")
print("%s" % " ".join(vm.makeCommand(**vm_args)))
print("\nWith memory:\n")
vm_args['memory'] = True
print("%s" % " ".join(vm.makeCommand(**vm_args)))
vm_args.pop('json', None)
vm_args.pop('memory', None)
vm_args['statdump'] = "true"
print("\nFor benchmarking:\n")
print("%s" % " ".join(vm.makeCommand(**vm_args)))
print("\nFor opviewing:\n")
print("python3 opviewer.py -f %s/%s" % (saved_files['json-trace']['path'], saved_files['json-trace']['name']))
print("\nFor opviewing with sources:\n")
print(
"python3 opviewer.py -f %s/%s --web3 '%s' -s path_to_contract_dir -j path_to_solc_combined_json --hash %s" % (
saved_files['json-trace']['path'], saved_files['json-trace']['name'], args.web3, args.hash))
logger.debug("creating zip archive for artefacts")
prefix = args.hash[:8]
output_archive = os.path.join(OUTPUT_DIR, "%s.zip" % prefix)
# create a list of files to pack with zipFiles
input_files = [(os.path.join(v['path'], v['name']), v['name']) for v in saved_files]
create_zip_archive(input_files=input_files, output_archive=output_archive)
print("\nZipped files into %s" % output_archive)
else:
parser.print_usage()
if __name__ == '__main__':
logging.basicConfig(format='[%(filename)s - %(funcName)20s() ][%(levelname)8s] %(message)s',
level=logging.INFO)
main()
|
<filename>chip8/cpu.py
from .display import Display
from . import instr
import urllib.request
from time import sleep
class Registers:
V = bytearray(16)
I = 0x00
PC = 0x200 # Program counter
SP = 0x00 # Stack pointer
# Sound and display timers
ST = 0x00
DT = 0x00
class CPU(object):
def __init__(
self,
file,
fgcolor=(255, 255, 255),
bgcolor=(0, 0, 0),
keys="<KEY>",
speed=60,
scale=10,
url=False,
) -> None:
# Initialize the memory, stack and registers
self.memory = bytearray(16 ** 3)
self.stack = [0] * 16
self.registers = Registers()
self.sleep_time = 1 / speed
self.load_rom(file, url)
self.display = Display(fgcolor, bgcolor, keys, scale)
# Load sprites and start event loop
self.load_sprites()
self.loop()
def load_sprites(self):
# Binary data for the sprites
sprites = (b'\xf0\x90\x90\x90\xf0 ` p\xf0\x10\xf0\x80\xf0\xf0\x10\xf0'
b'\x10\xf0\x90\x90\xf0\x10\x10\xf0\x80\xf0\x10\xf0\xf0\x80'
b'\xf0\x90\xf0\xf0\x10 @@\xf0\x90\xf0\x90\xf0\xf0\x90\xf0'
b'\x10\xf0\xf0\x90\xf0\x90\x90\xe0\x90\xe0\x90\xe0\xf0\x80'
b'\x80\x80\xf0\xe0\x90\x90\x90\xe0\xf0\x80\xf0\x80\xf0\xf0'
b'\x80\xf0\x80\x80')
# Write the bytes to memory, from 0x000 to 0x07f
for idx, b in enumerate(sprites):
self.memory[idx] = b
def loop(self):
while True:
# Try to update display - raises error if display has been quit
try:
self.display.update() # Handle display events
except:
return
# Get the instruction from memory
instr = self.memory[self.registers.PC : self.registers.PC + 2]
# Stop execution if instruction is 0x00 0x00
if instr.hex() == "0000":
self.display.quit()
return
# Decrement the delay timer if it is nonzero
if self.registers.DT:
self.registers.DT -= 1
self.handle(instr) # Handle the instruction
sleep(self.sleep_time)
self.registers.PC += 2 # Increment the program counter
def handle(self, opcode):
"""Handles the instruction
Args:
opcode (bytearray[2]): A bytearray of length 2, containing
the bytes for the instruction opcode
"""
# Parse the instruction into operation name and arguments
opname, args = instr.parse(opcode)
# For debug
# v_data = " ".join(["{:02x}".format(x) for x in self.registers.V])
# print(f"{hex(self.registers.PC)} | {opcode.hex()} ({opname}); V: {v_data}, I: {self.registers.I}")
# Call the function associated with the instruction
instr.call(self, opname, args)
def load_rom(self, rom_loc, url):
# Open ROM and read data
if not url:
with open(rom_loc, "rb") as f:
data = f.read()
else:
data = urllib.request.urlopen(rom_loc).read()
# Write to memory starting at 0x200
for idx, byte in enumerate(data):
self.memory[0x200 + idx] = byte
|
# # Desafio Semantix
# Responda as seguintes questões devem ser desenvolvidas em Spark utilizando a sua linguagem de preferência.
#
#
# 1) Número de hosts únicos.
# r) 161884
# In[2]:
#testando variavel do Spark
sc
# In[3]:
#Acessando arquivo Julho
base = sc.textFile("C:/Users/h_eiz/desafioSemantix/Arquivos/access_log_Jul95.txt")
base
# In[4]:
#assinatura da row
base.first()
# In[5]:
#Splitando por ' - - ', e retiramos as partes da string diferente dos holts
temporariaA = base.flatMap(lambda k: k.split(" - - "))
temporariaA
# In[20]:
#filtrando partes Holts e criando um mapa já re
temporariaB = temporariaA.filter(lambda line: 'HTTP' not in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b)
HsJul = temporariaB.count()
HsJul
# In[13]:
#Checando o map
temporariaB.first()
# In[18]:
#Repitindo os mesmos procedimentos para agosto
base2 = sc.textFile("C:/Users/h_eiz/desafioSemantix/Arquivos/access_log_Aug95.txt")
temporariaC= base2.flatMap(lambda k: k.split(" - - ")).filter(lambda line: 'HTTP' not in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b)
HsAug = temporariaC.count()
# In[21]:
#Resultado da Promeira Questão
Resposta1 = HsJul + HsAug
Resposta1
# # Quetão 2
# 2. O total de erros 404.
# r)20901
# In[23]:
#Buscando linha a linha erros 404
Err404Jul = base.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).count()
Err404Aug = base2.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).count()
# In[26]:
#Resultado Segunda Questão
Resposta2 = Err404Jul + Err404Aug
Resposta2
# # Questão 3
# Os 5 URLs que mais causaram erro 404.
# R)[('dialip-217.den.mmc.com', 56),
# ('172.16.17.32', 40),
# ('ts8-1.westwood.ts.ucla.edu', 37),
# ('192.168.3.11', 36),
# ('maz3.maz.net', 36)]
# In[65]:
#Filtrando separando e contanto os tops de erro 404
Err404topJul = base.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).flatMap(lambda k: k.split(" - - ")).filter(lambda line: 'HTTP' not in line).filter(lambda line: ' ' not in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b)
Err404topAug = base2.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).flatMap(lambda k: k.split(" - - ")).filter(lambda line: 'HTTP' not in line).filter(lambda line: ' ' not in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b)
# In[66]:
#Ordena do maior para o menor
SortAug = Err404topAug.sortBy(lambda x: x[1],False)
SortJul = Err404topJul.sortBy(lambda x: x[1],False)
# In[67]:
#União e seleção dos 5 primeiros
tempFim = SortAug + SortJul
Resposta3 = tempFim.take(5)
Resposta3
# # Questão 4
# Quantidade de erros 404 por dia
# r)[('[28/Jul', 93),
# ('[27/Jul', 319),
# ('[26/Jul', 329),
# ('[25/Jul', 439),
# ('[24/Jul', 323),
# ('[23/Jul', 224),
# ('[22/Jul', 191),
# ('[21/Jul', 316),
# ('[20/Jul', 408),
# ('[19/Jul', 623),
# ('[18/Jul', 445),
# ('[17/Jul', 394),
# ('[16/Jul', 252),
# ('[15/Jul', 239),
# ('[14/Jul', 386),
# ('[13/Jul', 514),
# ('[12/Jul', 454),
# ('[11/Jul', 453),
# ('[10/Jul', 381),
# ('[09/Jul', 338),
# ('[08/Jul', 291),
# ('[07/Jul', 541),
# ('[06/Jul', 614),
# ('[05/Jul', 472),
# ('[04/Jul', 341),
# ('[03/Jul', 452),
# ('[02/Jul', 279),
# ('[01/Jul', 304),
# ('[31/Aug', 513),
# ('[30/Aug', 546),
# ('[29/Aug', 408),
# ('[28/Aug', 383),
# ('[27/Aug', 364),
# ('[26/Aug', 357),
# ('[25/Aug', 403),
# ('[24/Aug', 406),
# ('[23/Aug', 336),
# ('[22/Aug', 270),
# ('[21/Aug', 296),
# ('[20/Aug', 295),
# ('[19/Aug', 202),
# ('[18/Aug', 245),
# ('[17/Aug', 261),
# ('[16/Aug', 252),
# ('[15/Aug', 316),
# ('[14/Aug', 281),
# ('[13/Aug', 212),
# ('[12/Aug', 187),
# ('[11/Aug', 249),
# ('[10/Aug', 308),
# ('[09/Aug', 274),
# ('[08/Aug', 371),
# ('[07/Aug', 366),
# ('[06/Aug', 206),
# ('[05/Aug', 227),
# ('[04/Aug', 330),
# ('[03/Aug', 289),
# ('[01/Aug', 235)]
# In[74]:
#fiktrando e Map
diaJul = base.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).flatMap(lambda k: k.split(' ')).filter(lambda line: '[' in line).flatMap(lambda line: line.split('/1995')).filter(lambda line: '/' in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b).sortBy(lambda x: x[0],False)
diaAug = base2.flatMap(lambda k: k.split('/n')).filter(lambda line: ' 404 ' in line).flatMap(lambda k: k.split(' ')).filter(lambda line: '[' in line).flatMap(lambda line: line.split('/1995')).filter(lambda line: '/' in line).map(lambda m: (m,1)).reduceByKey(lambda a, b: a + b).sortBy(lambda x: x[0],False)
# In[ ]:
#juntando
Resposta4 = diaJul + diaAug
Resposta4.take(80)
# # Quetão 5
# O total de bytes retornados.
# r) 65123227715 bytes
# In[109]:
#Tratando e separando dados
#Mapeador split o resultado e elimina não inteiros
def pLine(l):
campos = l.split(" ")
test = 1
numBytes = 0
try:
numBytes = int(campos[1])
return(test, numBytes)
except:
return(test, numBytes)
#filtra e concatena
bytesJul = base.flatMap(lambda k: k.split('/n')).flatMap(lambda k: k.split(" - - ")).filter(lambda line: '/1995' in line).flatMap(lambda k: k.split('HTTP')).filter(lambda line: '/1995' not in line).flatMap(lambda k: k.split('" ')).filter(lambda line: r'^[0-9]' not in line).filter(lambda line: '.' not in line)
bytesAug = base2.flatMap(lambda k: k.split('/n')).flatMap(lambda k: k.split(" - - ")).filter(lambda line: '/1995' in line).flatMap(lambda k: k.split('HTTP')).filter(lambda line: '/1995' not in line).flatMap(lambda k: k.split('" ')).filter(lambda line: r'^[0-9]' not in line).filter(lambda line: '.' not in line)
bytesJoin = bytesJul + bytesAug
tt1 = bytesJoin.map(pLine).reduceByKey(lambda a, b: a + b)
# In[110]:
tt1.take(10)
|
<reponame>jorisvandenbossche/pydov
# -*- coding: utf-8 -*-
"""Module containing the search classes to retrieve DOV borehole data."""
import pandas as pd
from pydov.search.abstract import AbstractSearch
from pydov.types.fields import _WfsInjectedField
from pydov.types.grondmonster import Grondmonster
from pydov.util import owsutil
class GrondmonsterSearch(AbstractSearch):
"""Search class to retrieve the grain size distribution of
ground samples ('grondmonster')"""
__wfs_schema = None
__wfs_namespace = None
__md_metadata = None
__fc_featurecatalogue = None
__xsd_schemas = None
def __init__(self, objecttype=Grondmonster):
"""Initialisation.
Parameters
----------
objecttype : subclass of pydov.types.abstract.AbstractDovType
Reference to a class representing the Grondmonster type.
Optional: defaults to the Grondmonster type containing the fields
described in the documentation.
"""
super(GrondmonsterSearch, self).\
__init__('boringen:grondmonsters', objecttype)
def _init_namespace(self):
"""Initialise the WFS namespace associated with the layer."""
if GrondmonsterSearch.__wfs_namespace is None:
GrondmonsterSearch.__wfs_namespace = self._get_namespace()
def _init_fields(self):
"""Initialise the fields and their metadata available in this search
class."""
if self._fields is None:
if GrondmonsterSearch.__wfs_schema is None:
GrondmonsterSearch.__wfs_schema = self._get_schema()
if GrondmonsterSearch.__md_metadata is None:
GrondmonsterSearch.__md_metadata = \
self._get_remote_metadata()
if GrondmonsterSearch.__fc_featurecatalogue is None:
csw_url = self._get_csw_base_url()
fc_uuid = owsutil.get_featurecatalogue_uuid(
GrondmonsterSearch.__md_metadata)
GrondmonsterSearch.__fc_featurecatalogue = \
owsutil.get_remote_featurecatalogue(csw_url, fc_uuid)
if GrondmonsterSearch.__xsd_schemas is None:
GrondmonsterSearch.__xsd_schemas = \
self._get_remote_xsd_schemas()
fields = self._build_fields(
GrondmonsterSearch.__wfs_schema,
GrondmonsterSearch.__fc_featurecatalogue,
GrondmonsterSearch.__xsd_schemas)
for field in fields.values():
if field['name'] not in self._type.get_field_names(
include_wfs_injected=True):
self._type.fields.append(
_WfsInjectedField(name=field['name'],
datatype=field['type']))
self._fields = self._build_fields(
GrondmonsterSearch.__wfs_schema,
GrondmonsterSearch.__fc_featurecatalogue,
GrondmonsterSearch.__xsd_schemas)
def search(self, location=None, query=None, sort_by=None,
return_fields=None, max_features=None):
"""Search for ground samples (Grondmonster). Provide either
`location` or `query`. When `return_fields` is None,
all fields are returned.
Parameters
----------
location : pydov.util.location.AbstractLocationFilter or \
owslib.fes.BinaryLogicOpType<AbstractLocationFilter> or \
owslib.fes.UnaryLogicOpType<AbstractLocationFilter>
Location filter limiting the features to retrieve. Can either be a
single instance of a subclass of AbstractLocationFilter, or a
combination using And, Or, Not of AbstractLocationFilters.
query : owslib.fes.OgcExpression
OGC filter expression to use for searching. This can contain any
combination of filter elements defined in owslib.fes. The query
should use the fields provided in `get_fields()`. Note that not
all fields are currently supported as a search parameter.
sort_by : owslib.fes.SortBy, optional
List of properties to sort by.
return_fields : list<str> or tuple<str> or set<str>
A list of fields to be returned in the output data. This should
be a subset of the fields provided in `get_fields()`. Note that
not all fields are currently supported as return fields.
max_features : int
Limit the maximum number of features to request.
Returns
-------
pandas.core.frame.DataFrame
DataFrame containing the output of the search query.
Raises
------
pydov.util.errors.InvalidSearchParameterError
When not one of `location`, `query` or `max_features` is provided.
pydov.util.errors.InvalidFieldError
When at least one of the fields in `return_fields` is unknown.
When a field that is only accessible as return field is used as
a query parameter.
When a field that can only be used as a query parameter is used as
a return field.
pydov.util.errors.FeatureOverflowError
When the number of features to be returned is equal to the
maxFeatures limit of the WFS server.
AttributeError
When the argument supplied as return_fields is not a list,
tuple or set.
"""
self._pre_search_validation(location, query, sort_by, return_fields,
max_features)
fts = self._search(location=location, query=query, sort_by=sort_by,
return_fields=return_fields,
max_features=max_features)
grondmonster = self._type.from_wfs(fts, self.__wfs_namespace)
df = pd.DataFrame(
data=self._type.to_df_array(grondmonster, return_fields),
columns=self._type.get_field_names(return_fields))
return df
|
### after training with bce resnet101, try to use lstm to further improve the performance
import logging
from datetime import datetime
import argparse
import os
from munkres import Munkres
from scipy.stats import logistic
from future.utils import iteritems
import numpy as np
from collections import OrderedDict
from sklearn.metrics import precision_recall_fscore_support
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.utils.data.dataloader import default_collate
from model import CNN_Encoder
from model import TransformerModel
from dataset import COCOMultiLabel
from dataset import categories
"""Initialize loss and m function"""
criterion = nn.CrossEntropyLoss()
m = Munkres()
def get_logger(filename, verbosity=1, name=None):
"""logger function for print logs."""
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter("[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s")
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
"""Create log file"""
now = datetime.now()
timestr = now.strftime("%Y%m%d%H%M")
tim_str_file= 'save_model/exp' + "_" + timestr + ".log"
logger = get_logger(tim_str_file)
def my_collate(batch):
batch = [b for b in batch if b is not None]
return default_collate(batch)
def adjust_learning_rate(optimizer, shrink_factor):
logger.info( "DECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
logger.info ("The new learning rate is {}".format(optimizer.param_groups[0]['lr']))
def convert_weights(state_dict):
"""convert wights when load model."""
tmp_weights = OrderedDict()
for name, params in iteritems(state_dict):
tmp_weights[name.replace('module.', '')] = params
return tmp_weights
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument('-batch_size', type=int, default=32, metavar='N',help='input batch size for training (default: 32)')
parser.add_argument('-epochs', type=int, default=30, metavar='N', help='number of epochs to train (default: 30)')
parser.add_argument('-log_interval', type=int, default=50, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('-image_path', type=str, default = '/home/notebook/code/personal/S9031003/DATASET/MSCOCO2014/', help='path for the training and validation folders')
parser.add_argument('-save_path', type=str, default='save_model/', help='save training models')
parser.add_argument('-encoder_weights', type=str, default='save_model/resnet101_model_ft.pt', help='pretrained resnet model')
parser.add_argument('-decoder_lr', default=1e-4, type=float, help='learning rate for transformer encoder')
parser.add_argument('-encoder_lr', default=1e-4, type=float, help='learning rate for cnn')
parser.add_argument('-max_length', default=15, type=int, help='set maximum number of labels for each image')
parser.add_argument('-dropout', type=float, default=0.1, help='dropout coefficient')
parser.add_argument('-num_workers', default=6, type=int, help='number of workers')
parser.add_argument('-coeff', default=0.5, type=float, help='learning rate decrease coefficient')
## for transfomer model
parser.add_argument('-num_layers', default=6, type=int, help="number of transformer encoder")
parser.add_argument('-input_fea_size', default=2048, type=int, help='initial input feature size of transformer')
parser.add_argument('-d_ff', default=1024, type=int, help='dimension of feedforward')
parser.add_argument('-embed_size', default=512, type=int, help='dimension of embedded size')
parser.add_argument('-use_bn', default=0, type=int, help='used when embedding cnn features')
parser.add_argument('-vocab_size', default=80, type=int, help='label category size')
parser.add_argument('-threshold', type=float, default=0.5, help='threshold for the evaluation (default: 0.5)')
parser.add_argument('-ef', type=float, default=0.9, help='the trade-off of classification and distance loss')
parser.add_argument('-C', type=float, default=7.0, help = 'margin in distrance loss function')
## evaluation or not
parser.add_argument('-use_eval', default=False, type=bool, help='open when only evaluation')
parser.add_argument('-use_model', type=str, default='save_model/trained_model.pt.tar', help='trained model only for evaluation')
args = parser.parse_args()
return args
def get_dis_loss(embs, label_mask, label_glove, loss_hinge):
"""Caculate distance loss"""
## get size of each dimension
batch_size, label_size, dim = label_mask.size(0), label_mask.size(1), label_glove.size(2)
mask_n = 1 - label_mask
## preprare unify dimensions
embs = embs.unsqueeze(1).expand(batch_size, label_size, dim)
mask = label_mask * 2 - 1
dis = torch.sqrt(torch.sum((embs - label_glove) ** 2, dim=2))
dis_p = torch.sum(dis * label_mask) / torch.sum(label_mask)
loss = dis_p + loss_hinge(dis, mask)
return loss
def train(args, encoder, decoder, train_loader, encoder_optimizer, decoder_optimizer, epoch, loss_hinge):
encoder.train()
decoder.train()
for batch_idx, (data, label_cls, label_steps, label_length, label_glove,img_name) in enumerate(train_loader):
### set input data format, send the data to cnn and then to transformer encoder
data, label_cls, label_steps, label_length, label_glove = data.cuda(), label_cls.cuda(), label_steps.cuda(), label_length.cuda(), label_glove.cuda()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
att_feats, fc_feats = encoder(data)
att_feats = att_feats.view(fc_feats.size(0), -1, fc_feats.size(1))
pre_label, pre_emb = decoder(fc_feats, att_feats, label_steps)
### calculate loss and update parameters
emb_loss = get_dis_loss(pre_emb, label_cls, label_glove, loss_hinge)
cls_loss = F.binary_cross_entropy_with_logits(pre_label, label_cls)
loss = cls_loss + args.ef * emb_loss
encoder_optimizer.step()
loss.backward()
decoder_optimizer.step()
### log for check performance
if batch_idx % args.log_interval == 0 and batch_idx != 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()))
def test(args, encoder, decoder, test_loader, threshold, epoch):
encoder.eval()
decoder.eval()
### whether save test model
if args.use_eval:
res_fh = open('test_result_raw_coco_vte_drl.txt', 'w')
with torch.no_grad():
for batch_idx, (data,label_cls,label_steps, label_length, label_glove,img_name) in enumerate(test_loader):
### set input data format, send the data to cnn and then to transformer encoder
data, label_cls, label_steps, label_glove = data.cuda(), label_cls.cuda(), label_steps.cuda(), label_glove.cuda()
att_feats, fc_feats = encoder(data)
att_feats = att_feats.view(fc_feats.size(0), -1, fc_feats.size(1))
output, pre_emb = decoder(fc_feats, att_feats, label_steps)
### get final predicted label
output_arr = output.data.cpu().numpy()
output_arr = logistic.cdf(output_arr)
junk = output_arr.copy()
output_arr[output_arr >= threshold] = 1
output_arr[output_arr < threshold] = 0
if batch_idx == 0:
labels = label_cls.data.cpu().numpy()
outputs = output_arr
else:
labels = np.concatenate((labels, label_cls.data.cpu().numpy()),axis=0)
outputs = np.concatenate((outputs, output_arr),axis=0)
### write output predicted and ground truth labels to .txt file
if args.use_eval:
for i in range(len(img_name)):
pred_labels = list([categories[j] for j in range(args.vocab_size) if output_arr[i][j] > 0])
gt_labels = list([categories[j] for j in range(args.vocab_size) if label_cls[i][j] > 0])
res_fh.write('{}\t{}\t{}\n'.format(img_name[i], ','.join(pred_labels), ','.join(gt_labels)))
### log for check performance
if batch_idx % args.log_interval == 0 and batch_idx != 0:
logger.info('Val Epoch: {}[{}/{} ({:.0f}%)]'.format( epoch, batch_idx * len(data), len(test_loader.dataset),100. * batch_idx / len(test_loader)))
### Caculate precision and print log
prec, recall, _, _ = precision_recall_fscore_support(outputs,labels,average='macro')
f1 = 2 * prec * recall / (prec + recall)
logger.info('\nMACRO prec: {:.2f}, recall: {:.2f}, f1: {:.2f}\n'.format(100*recall, 100*prec, 100*f1))
prec, recall, f1, _ = precision_recall_fscore_support(outputs,labels,average='micro')
logger.info('\nMICRO prec: {:.2f}, recall: {:.2f}, f1: {:.2f}\n'.format(100*recall, 100*prec, 100*f1))
### close write file"
if args.use_eval:
res_fh.close()
return f1
### main function
def main():
### settings
args = set_args()
save_path = args.save_path
if not os.path.isdir(save_path):
os.makedirs(save_path)
logger.info(args)
### prepare for data
train_dataset = COCOMultiLabel(args, train=True, image_path=args.image_path)
test_dataset = COCOMultiLabel(args, train=False, image_path=args.image_path)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
num_workers=args.num_workers, pin_memory=True,
shuffle=True, drop_last=True, collate_fn=my_collate)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size,
num_workers=args.num_workers, pin_memory=True,
shuffle=False, drop_last=False, collate_fn=my_collate)
## prepare for models
encoder = CNN_Encoder().cuda()
decoder = TransformerModel(args).cuda()
## set different parameter for training or only evaluation'
if args.use_eval:
weights_dic = torch.load(args.use_model)
encoder.load_state_dict(convert_weights(weights_dic['encoder_state_dict']))
decoder.load_state_dict(convert_weights(weights_dic['decoder_state_dict']))
else:
encoder.load_state_dict(convert_weights(torch.load(args.encoder_weights)))
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=args.encoder_lr)
decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.decoder_lr)
## whether using dataparallel'
if torch.cuda.device_count() > 1:
encoder = nn.DataParallel(encoder)
decoder = nn.DataParallel(decoder)
## set hinge loss function'
loss_hinge = torch.nn.HingeEmbeddingLoss(margin=args.C, size_average=None, reduce=None, reduction='mean')
## if only evaluation, return"
if args.use_eval:
f1 = test(args, encoder, decoder, test_loader, args.threshold, 1)
return
## training stage
highest_f1 = 0
epochs_without_improve = 0
for epoch in range(args.epochs):
## train and test
train(args, encoder, decoder, train_loader,encoder_optimizer, decoder_optimizer, epoch, loss_hinge)
f1 = test(args, encoder, decoder, test_loader, args.threshold, epoch)
### save parameter
save_dict = {'encoder_state_dict': encoder.state_dict(),
'decoder_state_dict': decoder.state_dict(),
'epoch': epoch, 'f1': f1,
'decoder_optimizer_state_dict': decoder_optimizer.state_dict(),
'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),
'epochs_without_improve': epochs_without_improve}
### save models'
torch.save(save_dict, args.save_path + "/checkpoint_" + timestr + '.pt.tar')
if f1 > highest_f1:
torch.save(save_dict, args.save_path + "/BEST_checkpoint_" + timestr + '.pt.tar')
logger.info("Now the highest f1 is {}, it was {}".format(100*f1, 100*highest_f1))
highest_f1 = f1
epochs_without_improve = 0
else:
epochs_without_improve += 1
if epochs_without_improve == 3:
adjust_learning_rate(decoder_optimizer, args.coeff)
adjust_learning_rate(encoder_optimizer, args.coeff)
epochs_without_imp = 0
if __name__ == '__main__':
main()
|
import logging
from typing import Union
class CodingString:
def __init__(self, hex_string: str, endian: str = "little") -> None:
"""
Work with Hexstring
:param hex_string:string Format shall be like "23A4", an hexstring has to be an even-number!
:param endian: str: possible: "little" - bit from right to left
"big" - bit from left to right
other: left to right
"""
if len(hex_string) % 2 != 0:
raise ValueError("hex_string has to be an even-number")
self.hex_string = hex_string
self.endian = endian
self.hex_list = self.__convert2hex(hex_string)
self.binary_list = self.__convert2binary(hex_string)
# endian
self.little_endian = (self.endian == "little")
self.big_endian = (self.endian == "big")
def read_value(self, byte: int, bit: int, bit_length: int, convert_to_dec: bool =True):
"""
Read Value from specific version
:param byte:
:param bit: int value 0...7
:param length:
:return: int: Value in decimal
"""
byte_value = self.binary_list[byte]
start_bit = bit
if self.little_endian:
# count negative (starts with -1, -(bit +1)
# start bit has to be swapped to left on bit_length -1
# start_bit = -(bit +1 + bit_length -1)
start_bit = -(bit + bit_length)
stop_bit = start_bit + bit_length
elif self.big_endian:
# count positive from bit : bit+bit_length
start_bit = bit
stop_bit = bit + bit_length
else:
raise AttributeError(f"Don't know the Endian {self.endian}")
bit_value = byte_value[start_bit:stop_bit]
# convert bit to dec:
if convert_to_dec:
return int(bit_value, 2)
else:
return bit_value
def write_value(self, byte: int, bit: int, bit_length: int, value: int):
"""
Write a value to Object
:param byte:
:param bit:
:param bit_length:
:return:
"""
byte_value = self.binary_list[byte]
start_bit = bit
if self.little_endian:
start_bit = -(1 + bit)
stop_bit = start_bit - (bit_length -1)
elif self.big_endian:
start_bit = bit
stop_bit = bit + (bit_length -1)
else:
raise AttributeError(f"Don't know the Endian {self.endian}")
bit_value = byte_value[start_bit:stop_bit]
return bit_value
def combine_bytes(self, start_byte, number_of_bytes):
bytes = self.binary_list[start_byte]
for i in range(number_of_bytes - 1):
bytes += self.binary_list[start_byte + i + 1]
return bytes
def __convert2hex(self, hex_string) -> list:
"""
Split String in a list of hex : ["A5", "B3", "12"]
:return: hexvalue
"""
n = 2
hexlist = [hex_string[i: i + n] for i in range(0, len(hex_string), n)]
return hexlist
def __convert2binary(self, hex_string) -> list:
"""
Convert the hexstring to binary
:return:
"""
hex_scale = 16
sign_in_byte = 8
# remove 0b at first and fill bits to 8
binary_list = [bin(int(myhex, hex_scale))[2:].zfill(sign_in_byte) for myhex in self.__convert2hex(hex_string)]
return binary_list
|
<gh_stars>100-1000
"""
Utilities for populating the dataset for bounding box collection.
You will need to insert an image and category collection into the database.
The instructions dict consists of:
{
id : str
title : str
description : str
instructions: url
examples: [url]
}
Where instructions is a url to a website (like a Google Slides presentation) where you have more info about the task.
`examples` is a list of image urls that will be rendered on the start screen. The height for these images should
be 500px.
A bounding box task dict consists of:
{
id : str
image_ids : [str]
instructions_id : str,
category_id : str
}
Where image ids point to normal image objects.
The result of a worker completing the task is:
{
time : float
task_id : str
date : str
worker_id : str
results : [bbox_result]
}
Where bbox_result looks like:
{
time : float
annotations : [annotation]
image : image
}
Where `image` and `annotation` are the standard image and annotation objects.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import random
import uuid
from annotation_tools.annotation_tools import get_db
def drop_bbox_collections(db):
db.drop_collection('bbox_task')
db.drop_collection('bbox_task_instructions')
db.drop_collection('bbox_task_result')
def ensure_bbox_indices(db):
db.bbox_task.create_index("id", unique=True)
db.bbox_task_instructions.create_index("id", unique=True)
def insert_bbox_tasks(db, tasks):
"""
Args:
db: a pymongo database connection
tasks: [{
id : task_id,
image_ids : [image_ids],
instructions_id : instructions_id,
category_id : str
}] A list of bbox task dicts.
"""
try:
response = db.bbox_task.insert_many(tasks, ordered=False)
except:
pass
return response
def insert_bbox_task_instructions(db, task_instructions):
""" Store the instructions for the bbox task.
Args:
task_instructions: [{
'id' :
'title' :
'description' :
'instructions':
'examples'
}] A list of bbox task instructions
"""
try:
response = db.bbox_task_instructions.insert_many(task_instructions, ordered=False)
except:
pass
return response
def create_bbox_tasks_for_all_images(db, category_id, instructions_id, num_images_per_task=20):
"""Insert all images into a bounding box task. This is a convenience function.
Returns:
[<bbox task dict>] a list of the tasks created.
"""
ensure_bbox_indices(db)
images = list(db.image.find({}, {'id' : True}))
image_ids = [image['id'] for image in images]
random.shuffle(image_ids)
image_id_groups = [image_ids[idx:idx+num_images_per_task]
for idx in range(0, len(image_ids), num_images_per_task)]
bbox_tasks = []
for group in image_id_groups:
task_id = str(uuid.uuid1())
bbox_tasks.append({
'id' : task_id,
'image_ids': group,
'instructions_id' : instructions_id,
'category_id' : category_id
})
insert_bbox_tasks(db, bbox_tasks)
return bbox_tasks
def load_tasks(db, task_data):
"""
task_data{
'tasks' : [bbox_task],
'instructions' : [bbox_task_instructions]
}
"""
assert 'tasks' in task_data, "Failed to find `tasks` in task_data object."
if 'instructions' in task_data:
instructions = task_data['instructions']
print("Inserting %d instructions." % (len(instructions),))
response = insert_bbox_task_instructions(db, instructions)
print("Successfully inserted %d instuctions." % (len(response.inserted_ids),))
tasks = task_data['tasks']
print("Inserting %d tasks." % (len(tasks),))
response = insert_bbox_tasks(db, tasks)
print("Successfully inserted %d tasks." % (len(response.inserted_ids),))
def export_task_results(db, task_data=None, denormalize=False):
""" Export the bbox task results. Saves a list of task results to `output_path`.
Args:
task_data: Use this to specify which task results to export.
denormalize: Should the annotations be stored in image coordinates?
"""
if task_data != None:
assert 'tasks' in task_data, "Failed to find `tasks` in task_data object."
task_ids = list(set([task['id'] for task in task_data['tasks']]))
task_results = list(db.bbox_task_result.find({'task_id' : {"$in" : task_ids}}, projection={'_id' : False}))
else:
task_results = list(db.bbox_task_result.find(projection={'_id' : False}))
if denormalize:
for task_result in task_results:
for image_result in task_result['results']:
image = image_result['image']
width = image['width']
height = image['height']
for anno in image_result['annotations']:
x, y, w, h = anno['bbox']
anno['bbox'] = [x * width, y * height, w * width, h * height]
return task_results
def parse_args():
parser = argparse.ArgumentParser(description='Dataset loading and exporting utilities.')
parser.add_argument('-a', '--action', choices=['drop', 'load', 'export'], dest='action',
help='The action you would like to perform.', required=True)
parser.add_argument('-t', '--tasks', dest='task_path',
help='Path to a json task file containing bbox tasks and (optionally) instructions. Used with the `load` and `export` action.', type=str,
required=False, default=None)
parser.add_argument('-u', '--denormalize', dest='denormalize',
help='Denormalize the annotations when exporting the database. Used with the `export` action.',
required=False, action='store_true', default=False)
parser.add_argument('-o', '--output', dest='output_path',
help='Save path for the json dataset. Used with the `export` action.', type=str,
required=False)
args = parser.parse_args()
return args
def main():
args = parse_args()
db = get_db()
action = args.action
if action == 'drop':
drop_bbox_collections(db)
elif action == 'load':
with open(args.task_path) as f:
task_data = json.load(f)
ensure_bbox_indices(db)
load_tasks(db, task_data)
elif action == 'export':
if args.task_path != None:
with open(args.task_path) as f:
task_data = json.load(f)
else:
task_data = None
results = export_task_results(db, task_data, denormalize=args.denormalize)
with open(args.output_path, 'w') as f:
json.dump(results, f)
if __name__ == '__main__':
main()
|
<gh_stars>0
"""
**CollectionCost.py**
- Created by <NAME> for Offshore BOS
- Refactored by <NAME> for LandBOSSE
NREL - 05/31/2019
This module consists of two classes:
- The first class in this module is the parent class Cable, with a sublass Array that inherits from Cable
- The second class is the ArraySystem class that instantiates the Array class and determines the wind farm layout and calculates total collection system cost
"""
import math
import numpy as np
import traceback
import pandas as pd
from .CostModule import CostModule
from .WeatherDelay import WeatherDelay as WD
class Cable:
"""
Create an instance of Cable (either array or export)
Parameters
---------
cable_specs : dict
Dictionary containing cable specifications
line_frequency_hz : int
Additional user inputs
Returns
-------
current_capacity : float
Cable current rating at 1m burial depth, Amps
rated_voltage : float
Cable rated voltage, kV
ac_resistance : float
Cable resistance for AC current, Ohms/km
inductance : float
Cable inductance, mH/km
capacitance : float
Cable capacitance, nF/km
cost : int
Cable cost, $US/km
char_impedance : float
Characteristic impedance of equivalent cable circuit, Ohms
power_factor : float
Power factor of AC current in cable (nondim)
cable_power : float
Maximum 3-phase power dissipated in cable, MW
"""
def __init__(self, cable_specs, addl_specs):
"""
Parameters
----------
cable_specs : dict
The input dictionary with key value pairs described in the
class documentation
addl_specs : dict
The output dictionary with key value pairs as found on the
output documentation.
"""
self.current_capacity = cable_specs['Current Capacity (A)']
self.rated_voltage = cable_specs['Rated Voltage (V)']
self.ac_resistance = cable_specs['AC Resistance (Ohms/km)']
self.inductance = cable_specs['Inductance (mH/km)']
self.capacitance = cable_specs['Capacitance (nF/km)']
self.cost = cable_specs['Cost (USD/LF)']
self.line_frequency_hz = addl_specs['line_frequency_hz']
# Calc additional cable specs
self.calc_char_impedance(self.line_frequency_hz)
self.calc_power_factor()
self.calc_cable_power()
def calc_char_impedance(self, line_frequency_hz):
"""
Calculate characteristic impedance of cable, Ohms
Parameters
----------
line_frequency_hz : int
Frequency of AC current, Hz
"""
conductance = 1 / self.ac_resistance
num = complex(self.ac_resistance, 2 * math.pi * line_frequency_hz * self.inductance)
den = complex(conductance, 2 * math.pi * line_frequency_hz * self.capacitance)
self.char_impedance = np.sqrt(num / den)
def calc_power_factor(self):
"""
Calculate power factor
"""
phase_angle = math.atan(np.imag(self.char_impedance) /
np.real(self.char_impedance))
self.power_factor = math.cos(phase_angle)
def calc_cable_power(self):
"""
Calculate maximum power transfer through 3-phase cable, MW
"""
# TODO: Verify eqn is correct
self.cable_power = (np.sqrt(3) * self.rated_voltage * self.current_capacity * self.power_factor / 1000)
class Array(Cable):
"""Array cable base class"""
def __init__(self, cable_specs, addl_inputs):
"""
Creates an instance of Array cable.
(May be multiple instances of different capacity cables in a string)
Parameters
----------
cable_specs : dict
Dictionary containing following cable specifications:
- turbine_rating_MW
- upstream_turb
- turbine_spacing_rotor_diameters
- rotor_diameter_m
addl_inputs : dict
- Any additional user inputs
Returns
-------
self.max_turb_per_cable : float
Maximum number of turbines (at turbine_rating_MW) an individual cable
can support
self.num_turb_per_cable : float
Number of turbines each cable in a string actually supports.
self.turb_sequence : float
Ordering of cable in string, starting with smallest cable at 0
self.downstream_connection : int
Additional cable length requried to connect between different sized
cables (for first cable in string only)
self.array_cable_len : float
Length of individual cable in a string, km
"""
super().__init__(cable_specs, addl_inputs)
self.line_frequency_hz = addl_inputs['line_frequency_hz']
self.calc_max_turb_per_cable(addl_inputs)
self.calc_num_turb_per_cable(addl_inputs)
self.calc_array_cable_len(addl_inputs)
def calc_max_turb_per_cable(self, addl_inputs):
"""
Calculate the number of turbines that each cable can support
Parameters
----------
turbine_rating_MW : int
Nameplate capacity of individual turbines
"""
turbine_rating_MW = addl_inputs['turbine_rating_MW']
self.max_turb_per_cable = np.floor(self.cable_power / turbine_rating_MW)
def calc_num_turb_per_cable(self, addl_inputs):
"""
Calculates actual number of turbines per cable, accounting for upstream
turbines.
Parameters
----------
upstream_turb : int
Number of turbines on upstream cables in string
"""
upstream_turb = addl_inputs['upstream_turb']
self.turb_sequence = addl_inputs['turb_sequence']
self.num_turb_per_cable = self.max_turb_per_cable - upstream_turb
if upstream_turb == 0:
self.downstream_connection = -1
else:
self.downstream_connection = 0
def calc_array_cable_len(self, addl_inputs):
"""
Calculate array cable length per string, km
Parameters
----------
turbine_spacing_rotor_diameters : int
Spacing between turbines in string, # of rotor diameters
rotor_diameter_m : int or float
Rotor diameter, m
"""
turbine_spacing_rotor_diameters = addl_inputs['turbine_spacing_rotor_diameters']
rotor_diameter_m = addl_inputs['rotor_diameter_m']
self.calc_turb_section_len(turbine_spacing_rotor_diameters, rotor_diameter_m)
self.array_cable_len = ((self.num_turb_per_cable + self.downstream_connection) * self.turb_section_length)
# @staticmethod
def calc_turb_section_len(self, turbine_spacing_rotor_diameters, rotor_diameter_m):
"""
Calculate array cable section length between two turbines. Also, section length == trench length. Which means
trench_length = cable_length for that section.
Parameters
----------
turbine_spacing_rotor_diameters : int
Spacing between turbines in string, # of rotor diameters
rotor_diameter_m : int or float
Rotor diameter, m
Returns
-------
turb_connect_len : int
Length of array cable between two turbines, km
"""
self.turb_section_length = (turbine_spacing_rotor_diameters * rotor_diameter_m) / 1000
return self.turb_section_length
class ArraySystem(CostModule):
"""
\nThis module:
* Calculates cable length to substation
* Calculates number of strings in a subarray
* Calculated number of strings
* Calculates total cable length for each cable type
* Calculates total trench length
* Calculates total collection system cost based on amount of material, amount of labor, price data, cable length, and trench length.
**Keys in the input dictionary are the following:**
* Given below are attributes that define each cable type:
* conductor_size
(int) cross-sectional diameter of cable [in mm]
"""
def __init__(self, input_dict, output_dict, project_name):
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
self.output_dict['total_cable_len_km'] = 0
self._km_to_LF = 0.0003048 #Units: [km/LF] Conversion factor for converting from km to linear foot.
self._total_cable_cost = 0
self._total_turbine_counter = 0
self.turbines_on_cable = []
self._cable_length_km = dict()
self.check_terminal = 0
def calc_num_strings(self):
"""
Calculate number of full and partial strings to support full plant
capacity.
Parameters
----------
available cables : dict
Dictionary of cable types
plant_capacity : int | float
Total capcity of wind plant (MW)
turbine_capacity : int | float
Nameplate capacity of individual turbines (MW)
Returns
-------
self.output_dict['total_turb_per_string'] : float
Number of turbines on each string
self.output_dict['num_full_strings'] : float
Number of complete strings in array
turb_per_partial_string : float
Number of turbines in the partial string (if applicable)
self.output_dict['num_partial_strings'] : float
Number of partial strings (if applicable, 0 or 1)
perc_full_string : list
Percentage of maximum number of turbines per cable type on
partial string
self.output_dict['num_turb_per_cable'] : list
Number of turbines on each cable type in string
"""
# Calculate total number of individual turbines in wind plant
self.output_dict['total_turb'] = self.input_dict['num_turbines']
# Calculate the number of turbines on each cable type in a string
self.output_dict['num_turb_per_cable'] = [cable.num_turb_per_cable for cable in self.cables.values()]
# Calculate the total number of turbines per string
self.output_dict['total_turb_per_string'] = sum(self.output_dict['num_turb_per_cable'])
# Calculate number of full strings and any remainder required to
# support the total number of turbines
self.output_dict['num_full_strings'] = np.floor(self.output_dict['total_turb'] / self.output_dict['total_turb_per_string'])
self.output_dict['num_leftover_turb'] = self.output_dict['total_turb'] % self.output_dict['total_turb_per_string']
# Calculate number of turbines on a remaining partial string
# Note: self.output_dict['turb_per_partial_string'] is only set if
# calc_num_turb_partial_strings()
# is called, which isn't always the case, as seen in the if...else construct below
#
# This means that self.output_dict['turb_per_partial_string'] cannot
# be used an output value for the details output.
if self.output_dict['num_leftover_turb'] > 0:
self.output_dict['num_partial_strings'] = 1
self.output_dict['perc_partial_string'] = self.calc_num_turb_partial_strings(self.output_dict['num_leftover_turb'], self.output_dict['num_turb_per_cable'])
else:
self.output_dict['num_partial_strings'] = 0
self.output_dict['perc_partial_string'] = np.zeros(len(self.output_dict['num_turb_per_cable']))
return (self.output_dict['total_turb_per_string'], self.output_dict['num_full_strings'], self.output_dict['num_partial_strings'],
self.output_dict['perc_partial_string'], self.output_dict['num_turb_per_cable'])
def calc_num_turb_partial_strings(self, num_leftover_turb, num_turb_per_cable):
"""
If a partial string exists, calculate the percentage of turbines on
each cable relative to a full string
Parameters
----------
self.output_dict['num_leftover_turb'] : float
Number of turbines in partial string
self.output_dict['num_turb_per_cable'] : list
List of number of turbines per cable type on a full string
Returns
-------
np.array
Array of percent of turbines per cable type on partial string
relative to full string
"""
num_remaining = num_leftover_turb
turb_per_partial_string = []
# Loop through each cable type in the string. Determine how many
# turbines are required for each cable type on the partial string
for max_turb in num_turb_per_cable:
if num_remaining > 0:
turb_per_partial_string.append(min(num_remaining, max_turb))
else:
turb_per_partial_string.append(0.0)
num_remaining -= max_turb
perc_partial_string = np.divide(turb_per_partial_string, num_turb_per_cable)
# Check to make sure there aren't any zeros in num_turbines_per_cable, which is used as the denominator
# in the division above (this happens when not all of the cable types in the input sheet need to be used).
# If there is a zero, then print a warning and change NaN to 0 in perc_partial_string.
if 0.0 in num_turb_per_cable:
print(
f'Warning: {self.project_name} CollectionCost module generates number of turbines per string that '
f'includes a zero entry. Please confirm that there not all cable types need to be used for the number of turbines that are being run.'
f' num_turbines={self.input_dict["num_turbines"]} rating_MW={self.input_dict["turbine_rating_MW"]}'
f' num_turb_per_cable: {num_turb_per_cable}')
perc_partial_string = np.nan_to_num(perc_partial_string)
self.output_dict['turb_per_partial_string'] = turb_per_partial_string
return perc_partial_string
#TODO: change length_to_substation calculation as a user defined input?
@staticmethod
def calc_cable_len_to_substation(distance_to_grid, turbine_spacing_rotor_diameters, row_spacing_rotor_diameters,
num_strings):
"""
Calculate the distance for the largest cable run to substation
Assumes substation is in the center of the layout, 1 row spacing in
front of first row
Parameters
----------
turbine_spacing_rotor_diameters : int or float
Spacing between turbines in a row, # of rotor diameters
row_spacing_rotor_diameters : int or float
Spacing between rows in wind plant, # of rotor diameters
num_strings : int
Total number of strings
Returns
-------
len_to_substation : int or float
Total length of largest array cable required to connect each string
to substation, km
"""
string_to_substation_length = []
if num_strings > 1:
# Define spacing terms for even or odd number of strings
# Even number: substation centered between middle two strings
# Odd number : substation centered on middle string
if (num_strings % 2) == 0:
n_max = int(num_strings / 2)
turb_space_scaling = 0.5
range_strings = range(1, n_max + 1)
else:
n_max = int((num_strings - 1) / 2)
turb_space_scaling = 1
range_strings = range(n_max + 1)
# Calculate hypotenuse length of each string to substation
for idx in range_strings:
if idx == 0:
c = 1
else:
c = 2
string_to_substation_length.append(c * np.sqrt(row_spacing_rotor_diameters ** 2 +
(turb_space_scaling * idx *
turbine_spacing_rotor_diameters) ** 2))
else:
string_to_substation_length.append(distance_to_grid)
# Sum up total length to substation
len_to_substation = np.sum(string_to_substation_length)
return len_to_substation
#TODO: Add parameter info in docstrings
@staticmethod
def calc_total_cable_length(total_turbines, count, check_terminal, turbines_per_cable, cable, cable_specs,
num_full_strings, num_partial_strings, len_to_substation, perc_partial_string):
"""
Calculate total length of each cable type, km
Parameters
----------
cable : object
Instance of individual cable type
cable_specs : dict
Dictionary containing cable specifications
self.output_dict['num_full_strings'] : float
Number of complete strings in array
self.output_dict['num_partial_strings'] : float
Number of partial strings (if applicable, 0 or 1)
len_to_substation : int or float
Total length of largest array cable required to connect each string
to substation, km
self.output_dict['perc_partial_string'] : list
List of percent of turbines per cable type on partial string
relative to full string
Returns
-------
total_cable_len : int or float
Total length of individual cable type
"""
# If terminal cable has already been accounted for, skip any
# calculations for other cables.
if (cable.turb_sequence - 1) > check_terminal:
cable.array_cable_len = 0
cable.total_length = 0
cable.num_turb_per_cable = 0
return 0, 0
# If num full strings < = 1, find which cable the final turbine
# is on, and calculate total cable length (including the len to
# substation) using that cable.
# This 'elif' is essentially a switch for distributed wind:
elif num_full_strings < 1 and num_partial_strings >= 0:
# if number of turbines is less than total string capacity,
# find the terminal cable and find total cable len
# up till that cable.
# If total turbines in project are less than cumulative turbines
# up till and including that cable.
terminal_string = cable.turb_sequence - 1 # Flag this cable as it is
# also the actual terminal cable
if (cable.turb_sequence - 1) == 0: # That is, if cable # 1 can hold
# more turbines than specified by user, it is the terminal cable
cable.num_turb_per_cable = total_turbines
cable.array_cable_len = ((cable.num_turb_per_cable + cable.downstream_connection)
* cable.turb_section_length)
total_cable_len = ((num_full_strings * cable.array_cable_len) +
(num_partial_strings * cable.array_cable_len)) + len_to_substation
else:
cable.num_turb_per_cable = total_turbines - turbines_per_cable[(count - 1)]
cable.array_cable_len = ((cable.num_turb_per_cable + cable.downstream_connection) *
cable.turb_section_length)
total_cable_len = ((num_full_strings * cable.array_cable_len) +
(num_partial_strings * cable.array_cable_len)) + len_to_substation
return total_cable_len, terminal_string
else: # Switch for utility scale landbosse
if cable.turb_sequence == len(cable_specs):
# Only add len_to_substation to the final cable in the string
total_cable_len = (num_full_strings * cable.array_cable_len +
num_partial_strings * (cable.array_cable_len * perc_partial_string) +
len_to_substation)
else:
total_cable_len = (num_full_strings * cable.array_cable_len +
num_partial_strings * (cable.array_cable_len * perc_partial_string))
# here 9999 == flag to announce that the terminal cable has NOT been reached
# and to continue calculations for each cable
return total_cable_len, 9999
def create_ArraySystem(self):
# data used in parent classes:
self.addl_specs = dict()
self.addl_specs['turbine_rating_MW'] = self.input_dict['turbine_rating_MW']
self.addl_specs['upstream_turb'] = 0
self.addl_specs['turb_sequence'] = 1
self.addl_specs['turbine_spacing_rotor_diameters'] = self.input_dict['turbine_spacing_rotor_diameters']
self.addl_specs['rotor_diameter_m'] = self.input_dict['rotor_diameter_m']
self.addl_specs['line_frequency_hz'] = self.input_dict['line_frequency_hz']
system = {
'upstream_turb': self.addl_specs['upstream_turb'],
'turb_sequence': self.addl_specs['turb_sequence'],
'turbine_rating_MW' : self.addl_specs['turbine_rating_MW'],
'turbine_spacing_rotor_diameters': self.addl_specs['turbine_spacing_rotor_diameters'],
'rotor_diameter_m': self.addl_specs['rotor_diameter_m']
}
# Loops through all user defined array cable types, composing them
# in ArraySystem
self.cables = {}
self.input_dict['cable_specs'] = self.input_dict['cable_specs_pd'].T.to_dict()
n=0 #to keep tab of number of cables input by user.
while n<len(self.input_dict['cable_specs']):
specs = self.input_dict['cable_specs'][n]
# Create instance of each cable and assign to ArraySystem.cables
cable = Array(specs, self.addl_specs)
n+=1
#self.cables[name] stores value which is a new instantiation of object of type Array.
self.cables[specs['Array Cable']] = cable
self.output_dict['cables'] = self.cables
# Update number of upstream cables on the string
self.addl_specs['upstream_turb'] += cable.num_turb_per_cable
self.addl_specs['turb_sequence'] += 1
# Calculate number of required strings to support plant capacity
self.output_dict['turb_per_string'], \
self.output_dict['num_full_strings'], \
self.output_dict['num_partial_strings'], \
self.output_dict['perc_partial_string'], \
self.output_dict['num_turb_per_cable'] = self.calc_num_strings()
# Calculate total length of cable run to substation
self.output_dict['num_strings'] = self.output_dict[
'num_full_strings'] + self.output_dict['num_partial_strings']
if self.input_dict['user_defined_distance_to_grid_connection'] == 0: # where (0 = No) and (1 = Yes)
# This only gets used if number of strings is <= 1 :
distributed_wind_distance_to_grid = (self.input_dict[
'turbine_spacing_rotor_diameters'] * self.input_dict['rotor_diameter_m']) / 1000
self.output_dict['distance_to_grid_connection_km'] = self.\
calc_cable_len_to_substation(distributed_wind_distance_to_grid,
self.input_dict['turbine_spacing_rotor_diameters'],
self.input_dict['row_spacing_rotor_diameters'],
self.output_dict['num_strings'])
else:
self.output_dict['distance_to_grid_connection_km'] = self.input_dict['distance_to_grid_connection_km']
self.output_dict['cable_len_to_grid_connection_km'] = self.output_dict[
'distance_to_grid_connection_km'] # assumes 3 conductors and fiber and neutral
cable_sequence = 0
# Make a list of how many turbines per cable
for _, (name, cable) in enumerate(self.cables.items()):
if cable_sequence == 0:
self.turbines_on_cable.append(cable.num_turb_per_cable)
else:
self.turbines_on_cable.append(cable.num_turb_per_cable + self.turbines_on_cable[(cable_sequence - 1)])
# turbines_on_cable[cable_sequence] += cable.num_turb_per_cable
cable_sequence += 1
self.__turbines_on_cable = self.turbines_on_cable
# Calculate total length of each cable type, and total cost that calculated length of cable:
count = 0
for idx, (name, cable) in enumerate(self.cables.items()):
total_cable_len, self.check_terminal = self.calc_total_cable_length(self.output_dict['total_turb'], count,
self.check_terminal,
self.__turbines_on_cable,
cable, self.input_dict['cable_specs'],
self.output_dict['num_full_strings'],
self.output_dict['num_partial_strings'],
self.output_dict[
'distance_to_grid_connection_km'],
self.output_dict['perc_partial_string'][
idx],
)
count += 1
# self._total_turbine_counter = turbine_tally
self._cable_length_km[name] = total_cable_len
cable.total_length = total_cable_len
self.output_dict['total_cable_len_km'] += total_cable_len
cable.total_cost = (total_cable_len / self._km_to_LF) * cable.cost
self._total_cable_cost += cable.total_cost # Keep running tally of total cable cost used in wind farm.
# Repopulate the turbines per cable sequence to make sure it reflects any changes that happened since
# the first time this sequence was populated.
self.output_dict['num_turb_per_cable'] = [cable.num_turb_per_cable for cable in self.cables.values()]
self.output_dict['total_turb_per_string'] = sum(self.output_dict['num_turb_per_cable'])
def calculate_trench_properties(self, trench_properties_input, trench_properties_output):
"""
Calculates the length of trench needed based on cable length and width of mulcher.
"""
# units of cubic meters
trench_properties_output['trench_length_km'] = trench_properties_output['total_cable_len_km']
def calculate_weather_delay(self, weather_delay_input_data, weather_delay_output_data):
"""Calculates wind delays for roads"""
# construct WeatherDelay module
WD(weather_delay_input_data, weather_delay_output_data)
# compute weather delay
wind_delay = pd.DataFrame(weather_delay_output_data['wind_delays'])
# if greater than 4 hour delay, then shut down for full day (10 hours)
wind_delay[(wind_delay > 4)] = 10
weather_delay_output_data['wind_delay_time'] = float(wind_delay.sum())
return weather_delay_output_data
def estimate_construction_time(self, construction_time_input_data, construction_time_output_data):
"""
Function to estimate construction time on per turbine basis.
Parameters
-------
duration_construction
pd.DataFrame
rsmeans
pd.DataFrame
trench_length_km
Returns
-------
(pd.DataFrame) operation_data
"""
collection_construction_time = construction_time_input_data['construct_duration'] * 1 / 3 # assumes collection construction occurs for one-third of project duration
throughput_operations = construction_time_input_data['rsmeans']
trench_length_km = construction_time_output_data['trench_length_km']
if construction_time_input_data['turbine_rating_MW'] >= 0.1:
operation_data = throughput_operations.where(throughput_operations['Module'] == 'Collection').dropna(
thresh=4)
# from rsmeans data, only read in Collection related data and filter out the rest:
cable_trenching = throughput_operations[throughput_operations.Module == 'Collection']
else: #switch for small DW
operation_data = throughput_operations.where(
throughput_operations['Module'] == 'Small DW Collection').dropna(thresh=4)
# from rsmeans data, only read in Collection related data and filter out the rest:
cable_trenching = throughput_operations[throughput_operations.Module == 'Small DW Collection']
# operation_data = pd.merge()
# from rsmeans data, only read in Collection related data and filter out the rest:
cable_trenching = throughput_operations[throughput_operations.Module == 'Collection']
# Storing data with labor related inputs:
trenching_labor = cable_trenching[cable_trenching.values == 'Labor']
trenching_labor_usd_per_hr = trenching_labor['Rate USD per unit'].sum()
construction_time_output_data['trenching_labor_usd_per_hr']=trenching_labor_usd_per_hr
trenching_labor_daily_output = trenching_labor['Daily output'].values[0] # Units: LF/day -> where LF = Linear Foot
trenching_labor_num_workers = trenching_labor['Number of workers'].sum()
# Storing data with equipment related inputs:
trenching_equipment = cable_trenching[cable_trenching.values == 'Equipment']
trenching_cable_equipment_usd_per_hr = trenching_equipment['Rate USD per unit'].sum()
construction_time_output_data['trenching_cable_equipment_usd_per_hr']=trenching_cable_equipment_usd_per_hr
trenching_equipment_daily_output = trenching_equipment['Daily output'].values[0] # Units: LF/day -> where LF = Linear Foot
construction_time_output_data['trenching_labor_daily_output'] = trenching_labor_daily_output
construction_time_output_data['trenching_equipment_daily_output'] = trenching_equipment_daily_output
operation_data['Number of days taken by single crew'] = ((trench_length_km / self._km_to_LF) / trenching_labor_daily_output)
operation_data['Number of crews'] = np.ceil((operation_data['Number of days taken by single crew'] / 30) / collection_construction_time)
operation_data['Cost USD without weather delays'] = ((trench_length_km / self._km_to_LF) / trenching_labor_daily_output) * (operation_data['Rate USD per unit'] * construction_time_input_data['operational_hrs_per_day'])
alpha = operation_data[operation_data['Type of cost'] == 'Collection']
operation_data_id_days_crews_workers = alpha[['Operation ID', 'Number of days taken by single crew', 'Number of crews', 'Number of workers']]
alpha = operation_data[operation_data['Type of cost'] == 'Labor']
operation_data_id_days_crews_workers = alpha[['Operation ID', 'Number of days taken by single crew', 'Number of crews', 'Number of workers']]
# if more than one crew needed to complete within construction duration then assume that all construction
# happens within that window and use that timeframe for weather delays;
# if not, use the number of days calculated
operation_data['time_construct_bool'] = operation_data['Number of days taken by single crew'] > collection_construction_time * 30
boolean_dictionary = {True: collection_construction_time * 30, False: np.NAN}
operation_data['time_construct_bool'] = operation_data['time_construct_bool'].map(boolean_dictionary)
operation_data['Time construct days'] = operation_data[['time_construct_bool', 'Number of days taken by single crew']].min(axis=1)
num_days = operation_data['Time construct days'].max()
# No 'management crew' in small DW
if construction_time_input_data['turbine_rating_MW'] >= 0.1:
# pull out management data
crew_cost = self.input_dict['crew_cost']
crew = self.input_dict['crew'][self.input_dict['crew']['Crew type ID'].str.contains('M0')]
management_crew = pd.merge(crew_cost, crew, on=['Labor type ID'])
management_crew = management_crew.assign(per_diem_total=management_crew['Per diem USD per day'] * management_crew['Number of workers'] * num_days)
management_crew = management_crew.assign(hourly_costs_total=management_crew['Hourly rate USD per hour'] * self.input_dict['hour_day'][self.input_dict['time_construct']] * num_days)
management_crew = management_crew.assign(total_crew_cost_before_wind_delay=management_crew['per_diem_total'] + management_crew['hourly_costs_total'])
self.output_dict['management_crew'] = management_crew
self.output_dict['managament_crew_cost_before_wind_delay'] = management_crew['total_crew_cost_before_wind_delay'].sum()
else:
self.output_dict['managament_crew_cost_before_wind_delay'] = 0.0
construction_time_output_data['operation_data_id_days_crews_workers'] = operation_data_id_days_crews_workers
construction_time_output_data['operation_data_entire_farm'] = operation_data
return construction_time_output_data['operation_data_entire_farm']
def calculate_costs(self, calculate_costs_input_dict, calculate_costs_output_dict):
#read in rsmeans data:
# rsmeans = calculate_costs_input_dict['rsmeans']
operation_data = calculate_costs_output_dict['operation_data_entire_farm']
per_diem = operation_data['Number of workers'] * operation_data['Number of crews'] * (operation_data['Time construct days'] + np.ceil(operation_data['Time construct days'] / 7)) * calculate_costs_input_dict['rsmeans_per_diem']
per_diem = per_diem.dropna()
calculate_costs_output_dict['time_construct_days'] = (calculate_costs_output_dict['trench_length_km'] / self._km_to_LF) / calculate_costs_output_dict['trenching_labor_daily_output']
wind_delay_fraction = (calculate_costs_output_dict['wind_delay_time'] / calculate_costs_input_dict['operational_hrs_per_day']) / calculate_costs_output_dict['time_construct_days']
# check if wind_delay_fraction is greater than 1, which would mean weather delays are longer than they can possibily be for the input data
if wind_delay_fraction > 1:
raise ValueError('{}: Error: Wind delay greater than 100%'.format(type(self).__name__))
calculate_costs_output_dict['wind_multiplier'] = 1 / (1 - wind_delay_fraction)
#Calculating trenching cost:
calculate_costs_output_dict['Days taken for trenching (equipment)'] = (calculate_costs_output_dict['trench_length_km'] / self._km_to_LF) / calculate_costs_output_dict['trenching_equipment_daily_output']
calculate_costs_output_dict['Equipment cost of trenching per day {usd/day)'] = calculate_costs_output_dict['trenching_cable_equipment_usd_per_hr'] * calculate_costs_input_dict['operational_hrs_per_day']
calculate_costs_output_dict['Equipment Cost USD without weather delays'] = (calculate_costs_output_dict['Days taken for trenching (equipment)'] * calculate_costs_output_dict['Equipment cost of trenching per day {usd/day)'])
calculate_costs_output_dict['Equipment Cost USD with weather delays'] = calculate_costs_output_dict['Equipment Cost USD without weather delays'] * calculate_costs_output_dict['wind_multiplier']
if calculate_costs_input_dict['turbine_rating_MW'] >= 0.1:
trenching_equipment_rental_cost_df = pd.DataFrame([['Equipment rental', calculate_costs_output_dict[
'Equipment Cost USD with weather delays'], 'Collection']],
columns=['Type of cost', 'Cost USD',
'Phase of construction'])
# switch for small DW
else:
if calculate_costs_output_dict['Equipment Cost USD with weather delays'] < 137:
calculate_costs_output_dict['Equipment Cost USD with weather delays'] = 137 #cost of renting for a day
trenching_equipment_rental_cost_df = pd.DataFrame([['Equipment rental', calculate_costs_output_dict[
'Equipment Cost USD with weather delays'], 'Collection']],
columns=['Type of cost', 'Cost USD',
'Phase of construction'])
else:
trenching_equipment_rental_cost_df = pd.DataFrame([['Equipment rental', calculate_costs_output_dict[
'Equipment Cost USD with weather delays'], 'Small DW Collection']],
columns=['Type of cost', 'Cost USD',
'Phase of construction'])
#Calculating labor cost:
calculate_costs_output_dict['Days taken for trenching (labor)'] = ((calculate_costs_output_dict['trench_length_km'] / self._km_to_LF) / calculate_costs_output_dict['trenching_labor_daily_output'])
calculate_costs_output_dict['Labor cost of trenching per day (usd/day)'] = (calculate_costs_output_dict['trenching_labor_usd_per_hr'] * calculate_costs_input_dict['operational_hrs_per_day'] * calculate_costs_input_dict['overtime_multiplier'])
calculate_costs_output_dict['Total per diem costs (USD)'] = per_diem.sum()
calculate_costs_output_dict['Labor Cost USD without weather delays'] =((calculate_costs_output_dict['Days taken for trenching (labor)'] * calculate_costs_output_dict['Labor cost of trenching per day (usd/day)']) + (calculate_costs_output_dict['Total per diem costs (USD)'] + calculate_costs_output_dict['managament_crew_cost_before_wind_delay']))
calculate_costs_output_dict['Labor Cost USD with weather delays'] = calculate_costs_output_dict['Labor Cost USD without weather delays'] * calculate_costs_output_dict['wind_multiplier']
if calculate_costs_input_dict['turbine_rating_MW'] >= 0.1:
trenching_labor_cost_df = pd.DataFrame(
[['Labor', calculate_costs_output_dict['Labor Cost USD with weather delays'], 'Collection']],
columns=['Type of cost', 'Cost USD', 'Phase of construction'])
# switch for small DW
else:
trenching_labor_cost_df = pd.DataFrame(
[['Labor', calculate_costs_output_dict['Labor Cost USD with weather delays'], 'Small DW Collection']],
columns=['Type of cost', 'Cost USD', 'Phase of construction'])
#Calculate cable cost:
cable_cost_usd_per_LF_df = pd.DataFrame([['Materials',self._total_cable_cost, 'Collection']],
columns = ['Type of cost', 'Cost USD', 'Phase of construction'])
# Combine all calculated cost items into the 'collection_cost' dataframe:
collection_cost = pd.DataFrame([],columns = ['Type of cost', 'Cost USD', 'Phase of construction'])
collection_cost = pd.concat( (collection_cost,
trenching_equipment_rental_cost_df,
trenching_labor_cost_df,
cable_cost_usd_per_LF_df) )
# Calculate Mobilization Cost and add to collection_cost dataframe.
# For utility scale plants, mobilization is assumed to be 5% of the sum of labor, equipment, and material costs.
# For distributed mode, mobilization is a calculated % that is a function of turbine size.
if calculate_costs_input_dict['num_turbines'] > 10:
calculate_costs_output_dict['mob_cost'] = collection_cost['Cost USD'].sum() * 0.05
else:
if calculate_costs_input_dict['turbine_rating_MW'] >= 0.1:
calculate_costs_output_dict['mob_cost'] = collection_cost[
'Cost USD'].sum() * self.mobilization_cost_multiplier(calculate_costs_input_dict['turbine_rating_MW'])
# switch for small DW
else: # mobilization cost included in equipment rental cost
calculate_costs_output_dict['mob_cost'] = 0.0
mobilization_cost = pd.DataFrame([['Mobilization', calculate_costs_output_dict['mob_cost'], 'Collection']],
columns=['Type of cost', 'Cost USD', 'Phase of construction'])
collection_cost = pd.concat((collection_cost, mobilization_cost))
calculate_costs_output_dict['total_collection_cost'] = collection_cost
return collection_cost
def outputs_for_detailed_tab(self, input_dict, output_dict):
"""
Creates a list of dictionaries which can be used on their own or
used to make a dataframe.
Returns
-------
list(dict)
A list of dicts, with each dict representing a row of the data.
"""
result = []
module = 'Collection Cost'
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Total Number of Turbines',
'value': float(self.output_dict['total_turb'])
})
result.append({
'unit': 'km',
'type': 'variable',
'variable_df_key_col_name': 'Total trench length',
'value': float(self.output_dict['trench_length_km'])
})
result.append({
'unit': 'km',
'type': 'variable',
'variable_df_key_col_name': 'Total cable length',
'value': float(self.output_dict['total_cable_len_km'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Number of Turbines Per String in Full String',
'value': float(self.output_dict['total_turb_per_string'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Number of Full Strings',
'value': float(self.output_dict['num_full_strings'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Number of Turbines in Partial String',
'value': float(self.output_dict['num_leftover_turb'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Number of Partial Strings',
'value': float(self.output_dict['num_partial_strings'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Total number of strings full + partial',
'value': float(self.output_dict['num_full_strings'] + self.output_dict['num_partial_strings'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Trench Length to Substation (km)',
'value': float(self.output_dict['distance_to_grid_connection_km'])
})
result.append({
'unit': '',
'type': 'variable',
'variable_df_key_col_name': 'Cable Length to Substation (km)',
'value': float(self.output_dict['cable_len_to_grid_connection_km'])
})
cables = ''
n = 1 # to keep tab of number of cables input by user.
for cable, specs in self.output_dict['cables'].items():
if n == len(self.output_dict['cables']):
cables += str(cable)
else:
cables += str(cable) + ' , '
for variable, value in specs.__dict__.items():
if variable == 'array_cable_len':
result.append({
'unit': 'km',
'type': 'variable',
'variable_df_key_col_name': 'Array cable length for cable ' + cable,
'value': float(value)
})
elif variable == 'total_length':
result.append({
'unit': 'km',
'type': 'variable',
'variable_df_key_col_name': 'Total cable length for cable ' + cable,
'value': float(value)
})
elif variable == 'total_cost':
result.append({
'unit': 'usd',
'type': 'variable',
'variable_df_key_col_name': 'Total cable cost for cable ' + cable,
'value': float(value)
})
n += 1
result.append({
'unit': '',
'type': 'list',
'variable_df_key_col_name': 'Number of turbines per cable type in full strings [' + cables + ']',
'value': str(self.output_dict['num_turb_per_cable'])
})
if self.input_dict['turbine_rating_MW'] > 0.1:
for row in self.output_dict['management_crew'].itertuples():
dashed_row = ' <--> '.join(str(x) for x in list(row))
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'Labor type ID <--> Hourly rate USD per hour <--> Per diem USD per day <--> Operation <--> Crew type <--> Crew name <--> Number of workers <--> Per Diem Total <--> Hourly costs total <--> Crew total cost ',
'value': dashed_row
})
result.append({
'unit': '',
'type': 'list',
'variable_df_key_col_name': 'Percent length of cable in partial string [' + cables + ']',
'value': str(self.output_dict['perc_partial_string'])
})
for row in self.output_dict['total_collection_cost'].itertuples():
dashed_row = '{} <--> {} <--> {}'.format(row[1], row[3], math.ceil(row[2]))
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'Type of Cost <--> Phase of Construction <--> Cost in USD ',
'value': dashed_row,
'last_number': row[2]
})
for _dict in result:
_dict['project_id_with_serial'] = self.project_name
_dict['module'] = module
self.output_dict['collection_cost_csv'] = result
return result
def run_module(self):
"""
Runs the CollectionCost module and populates the IO dictionaries with calculated values.
"""
try:
self.create_ArraySystem()
self.calculate_trench_properties(self.input_dict, self.output_dict)
operation_data = self.estimate_construction_time(self.input_dict, self.output_dict)
# pull only global inputs for weather delay from input_dict
weather_data_keys = ('wind_shear_exponent',
'weather_window')
# specify collection-specific weather delay inputs
self.weather_input_dict = dict(
[(i, self.input_dict[i]) for i in self.input_dict if i in set(weather_data_keys)])
self.weather_input_dict[
'start_delay_hours'] = 0 # assume zero start for when collection construction begins (start at beginning of construction time)
self.weather_input_dict[
'critical_wind_speed_m_per_s'] = self.input_dict['critical_speed_non_erection_wind_delays_m_per_s']
self.weather_input_dict[
'wind_height_of_interest_m'] = self.input_dict['critical_height_non_erection_wind_delays_m']
# Compute the duration of the construction for electrical collection
duration_construction = operation_data['Time construct days'].max(skipna=True)
days_per_month = 30
duration_construction_months = duration_construction / days_per_month
self.output_dict['collection_construction_months'] = duration_construction_months
# compute and specify weather delay mission time for roads
operational_hrs_per_day = self.input_dict['hour_day'][self.input_dict['time_construct']]
mission_time_hrs = duration_construction * operational_hrs_per_day
self.weather_input_dict['mission_time_hours'] = int(mission_time_hrs)
self.calculate_weather_delay(self.weather_input_dict, self.output_dict)
self.calculate_costs(self.input_dict, self.output_dict)
self.outputs_for_detailed_tab(self.input_dict, self.output_dict)
self.output_dict['collection_cost_module_type_operation'] = self.outputs_for_costs_by_module_type_operation(
input_df=self.output_dict['total_collection_cost'],
project_id=self.project_name,
total_or_turbine=True
)
return 0, 0 # module ran successfully
except Exception as error:
traceback.print_exc()
print(f"Fail {self.project_name} CollectionCost")
return 1, error # module did not run successfully
|
<reponame>lidofinance/AVotesParser<gh_stars>0
"""
Decoding payload of aragon votes.
"""
from dataclasses import dataclass
from typing import (
Union, Tuple,
List, Any,
Dict, Optional
)
import web3
from .ABI.storage import (
CachedStorage, ABI, ABIKey
)
from .pretty_printed import PrettyPrinted
from .spec import HEX_PREFIX, PRETTY_PRINT_NEXT_LEVEL_OFFSET
# ============================================================================
# ======================= Decoding stage structures ==========================
# ============================================================================
@dataclass
class FuncInput(PrettyPrinted):
"""
Single function input
"""
name: str
type: str
value: Any
def __post_init__(self):
"""Conversion from raw bytes to string for bytes values."""
if callable(getattr(self.value, 'hex', None)):
self.value = self.value.hex()
def pretty_print(self, *_, **kwargs) -> str:
"""Get human-readable representation."""
offset_size = kwargs.pop('offset', 0)
offset = PrettyPrinted.get_tabular(offset_size)
if isinstance(self.value, list):
value_repr = []
for entry in self.value:
if isinstance(entry, PrettyPrinted):
entry_repr = entry.pretty_print(
offset=offset_size + PRETTY_PRINT_NEXT_LEVEL_OFFSET,
**kwargs
)
value_repr.append(entry_repr)
else:
value_repr.append(str(entry))
value_repr = '\n'.join(value_repr)
value_repr = f'[\n{value_repr}\n]'
else:
value_repr = str(self.value)
return f'{offset}{self.name}: {self.type} = {value_repr}'
def __repr__(self) -> str:
"""Get human-readable representation."""
return self.pretty_print(offset=0)
@dataclass
class Call(PrettyPrinted):
"""
Single function call
"""
contract_address: str
function_signature: str
function_name: str
inputs: List[FuncInput]
properties: Dict[str, Any]
outputs: Optional[List[Any]]
def pretty_print(self, *_, **kwargs) -> str:
"""Get human-readable representation."""
offset_size = kwargs.pop('offset', 0)
offset = PrettyPrinted.get_tabular(offset_size)
header = (
f'{offset}Function call\n'
f'{offset}Contract: {self.contract_address}\n'
f'{offset}Signature: {self.function_signature}\n'
f'{offset}Name: {self.function_name}'
)
inputs = '\n'.join((inp.pretty_print(
offset=offset_size, **kwargs
) if isinstance(
inp, PrettyPrinted
) else repr(inp) for inp in self.inputs))
inputs = (
f'{offset}Inputs:\n'
f'{inputs}'
)
return (
f'{header}\n'
f'{inputs}'
)
def __repr__(self) -> str:
"""Get human-readable representation."""
return self.pretty_print(offset=0)
# ============================================================================
# ======================= Decoding stage actions =============================
# ============================================================================
_CacheT = CachedStorage[Union[ABIKey, Tuple[ABIKey, ABIKey]], ABI]
def decode_function_call(
contract_address: str, function_signature: str,
call_data: str, abi_storage: _CacheT
) -> Optional[Call]:
"""
Decode function call.
:param contract_address: str, contract address.
:param function_signature: str, the first fourth bytes
of function signature
:param call_data: str, encoded call data.
:param abi_storage: CachedStorage, storage of contracts ABI.
:return: Call, decoded description of function calling.
"""
key = ABIKey(contract_address, function_signature)
abi = abi_storage[key]
function_description = abi.func_storage.get(function_signature, None)
if function_description is None:
return function_description
address = web3.Web3.toChecksumAddress(contract_address)
contract = web3.Web3().eth.contract(
address=address, abi=abi.raw
)
inputs_spec = function_description['inputs']
if call_data.startswith(HEX_PREFIX):
call_data = call_data[len(HEX_PREFIX):]
_, decoded_inputs = contract.decode_function_input(
f'{function_signature}{call_data}'
)
inputs = [
FuncInput(
inp['name'],
inp['type'],
decoded_inputs[inp['name']]
)
for inp in inputs_spec
]
properties = {
'constant': function_description.get(
'constant', 'unknown'
),
'payable': function_description.get(
'payable', 'unknown'
),
'stateMutability': function_description.get(
'stateMutability', 'unknown'
),
'type': function_description.get(
'type', 'unknown'
)
}
return Call(
contract_address, function_signature,
function_description.get('name', 'unknown'), inputs,
properties, function_description['outputs']
)
|
# Copyright 2018 Tile, Inc. All Rights Reserved.
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import html
from itertools import chain
from mistletoe import block_token, span_token
from mistletoe.base_renderer import BaseRenderer
import re
import sys
class JIRARenderer(BaseRenderer):
"""
JIRA renderer class.
See mistletoe.base_renderer module for more info.
"""
def __init__(self, *extras):
"""
Args:
extras (list): allows subclasses to add even more custom tokens.
"""
self.listTokens = []
self.lastChildOfQuotes = []
super().__init__(*chain([block_token.HTMLBlock, span_token.HTMLSpan], extras))
def render_strong(self, token):
template = '*{}*'
return template.format(self.render_inner(token))
def render_emphasis(self, token):
template = '_{}_'
return template.format(self.render_inner(token))
def render_inline_code(self, token):
template = '{{{{{}}}}}'
return template.format(self.render_inner(token))
def render_strikethrough(self, token):
template = '-{}-'
return template.format(self.render_inner(token))
def render_image(self, token):
template = '!{src}!'
inner = self.render_inner(token)
return template.format(src=token.src)
def render_link(self, token):
template = '[{inner}|{target}]'
target = escape_url(token.target)
inner = self.render_inner(token)
return template.format(target=target, inner=inner)
def render_auto_link(self, token):
template = '[{target}]'
target = escape_url(token.target)
#inner = self.render_inner(token)
return template.format(target=target)
def render_escape_sequence(self, token):
return self.render_inner(token)
def render_raw_text(self, token, escape=True):
if escape:
def repl(match):
return '\\' + match.group(0)
# The following regex tries to find special chars that are one of the following:
# 1. the whole string (typically in an EscapeSequence)
# 2. just after a non-whitespace
# 3. just before a non-whitespace
re_esc_chars = r'[{}\[\]\-*_+^~]'
re_find = r'(^{esc_chars}$)|((?<=\S)({esc_chars}))|(({esc_chars})(?=\S))'.format(esc_chars=re_esc_chars)
return re.sub(re_find, repl, token.content)
else:
return token.content
@staticmethod
def render_html_span(token):
return token.content
def render_heading(self, token):
template = 'h{level}. {inner}'
inner = self.render_inner(token)
return template.format(level=token.level, inner=inner) + self._block_eol(token)
def render_quote(self, token):
self.lastChildOfQuotes.append(token.children[-1])
inner = self.render_inner(token)
del (self.lastChildOfQuotes[-1])
if len(token.children) == 1 and isinstance(token.children[0], block_token.Paragraph):
template = 'bq. {inner}' + self._block_eol(token)[0:-1]
else:
template = '{{quote}}\n{inner}{{quote}}' + self._block_eol(token)
return template.format(inner=inner)
def render_paragraph(self, token):
return '{}'.format(self.render_inner(token)) + self._block_eol(token)
def render_block_code(self, token):
template = '{{code{attr}}}\n{inner}{{code}}' + self._block_eol(token)
if token.language:
attr = ':{}'.format(token.language)
else:
attr = ''
inner = self.render_raw_text(token.children[0], False)
return template.format(attr=attr, inner=inner)
def render_list(self, token):
inner = self.render_inner(token)
return inner + self._block_eol(token)[0:-1]
def render_list_item(self, token):
template = '{prefix} {inner}'
prefix = ''.join(self.listTokens)
result = template.format(prefix=prefix, inner=self.render_inner(token))
return result
def render_inner(self, token):
if isinstance(token, block_token.List):
if token.start:
self.listTokens.append('#')
else:
self.listTokens.append('*')
rendered = [self.render(child) for child in token.children]
if isinstance(token, block_token.List):
del (self.listTokens[-1])
return ''.join(rendered)
def render_table(self, token):
# This is actually gross and I wonder if there's a better way to do it.
#
# The primary difficulty seems to be passing down alignment options to
# reach individual cells.
template = '{inner}\n'
if hasattr(token, 'header'):
head_template = '{inner}'
header = token.header
head_inner = self.render_table_row(header, True)
head_rendered = head_template.format(inner=head_inner)
else:
head_rendered = ''
body_template = '{inner}'
body_inner = self.render_inner(token)
body_rendered = body_template.format(inner=body_inner)
return template.format(inner=head_rendered+body_rendered)
def render_table_row(self, token, is_header=False):
if is_header:
template = '{inner}||\n'
else:
template = '{inner}|\n'
inner = ''.join([self.render_table_cell(child, is_header)
for child in token.children])
return template.format(inner=inner)
def render_table_cell(self, token, in_header=False):
if in_header:
template = '||{inner}'
else:
template = '|{inner}'
inner = self.render_inner(token)
return template.format(inner=inner)
@staticmethod
def render_thematic_break(token):
return '----\n'
@staticmethod
def render_line_break(token):
# Note: In Jira, outputting just '\n' instead of '\\\n' should be usually sufficient as well.
# It is not clear when it wouldn't be sufficient though, so we use the longer variant for sure.
return ' ' if token.soft else '\\\\\n'
@staticmethod
def render_html_block(token):
return token.content
def render_document(self, token):
self.footnotes.update(token.footnotes)
return self.render_inner(token)
def _block_eol(self, token):
"""
Jira syntax is very limited when it comes to lists: whenever
we put an empty line anywhere in a list, it gets terminated
and there seems to be no workaround for this. Also to have blocks
like paragraphs really vertically separated, we need to put
an empty line between them. This function handles these two cases.
"""
return '\n' if len(self.listTokens) > 0 or (len(self.lastChildOfQuotes) > 0 and token is self.lastChildOfQuotes[-1]) else '\n\n'
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
from urllib.parse import quote
return quote(raw, safe='/#:')
|
<filename>top_daily_run.py
"""
Top level python launcher
User must define a few constant:
_PYTHON : the complete path to your Python exe
_ROOT_DIR: the complete path to the location of this repository
"""
from datetime import datetime
import os
import key as pconst
import top_runner_util as util
_ROOT_DIR = os.getcwd() #pconst.ROOT_DIR
print(_ROOT_DIR)
today = datetime.today()
# ------------ Main Script ----------------------
print("Data downloader launcher")
os.chdir(_ROOT_DIR)
# ----------- List your script here----------------
"""
Examples:
- To run a script on every business day, use run_daily(subfolder_name, script_name, timezone_name).
- New York timezone is "US/Eastern". London timezone is "GMT"
- Example: run_daily("\\Commodity\\", "comodityDailyRun_A.py", "US/Eastern")
- To run a script on specific day of a week on weekly basis, use run_day_of_week(subfolder_name, script_name, day_of_week)
- day_of_week: a integer. 0 is Monday, 6 is Sunday
- Example: run_day_of_week("\\market_breadth\\", "run.py", 5)
- To run a script regardless of schedule, use run_script(subfolder_name, script_name)
- Example: run_script("\\Commodity\\", "run.py")
"""
util.log_info("======= Start of daily run =======", 1)
#run_script("\\market_breadth\\", "run.py")
# import SHFE_A as mySHFE_A
# import LME_weekly_traderReport as myLME_weeklyTraderReport
# import LME_A as myLME_A
# import LME_daily_volume as myLME_dailyVol
# import LME_daily_openInterest_E as myLME_daily_openInterest
# import COMEX_A as myCOMEX_A
# import COMEX_daily_openInterest_VOL as myCOMEX_dailyVolOI
#----Saturday --------
# util.run_script("\\Commodity\\", "SHFE_A.py")
# util.run_script("\\Commodity\\", "LME_weekly_traderReport.py")
# util.run_script("\\Commodity\\", "LME_A.py")
# util.run_script("\\Commodity\\", "LME_daily_volume.py")
# util.run_script("\\Commodity\\", "LME_daily_openInterest_E.py")
# util.run_script("\\SP500_Ratios\\", "Guru_shiller_sectors.py")
# util.run_script("\\Commodity\\", "COMEX_A.py")
# util.run_script("\\Commodity\\", "COMEX_daily_openInterest_VOL.py")
#----Saturday --------
if util.isToday_Saturday():
util.run_script("\\Commodity\\", "SHFE_A.py")
util.run_script("\\Commodity\\", "LME_weekly_traderReport.py")
#----LME daily --------
if util.is_uk_business_day():
util.run_script("\\Commodity\\", "LME_A.py")
util.run_script("\\Commodity\\", "LME_daily_volume.py")
util.run_script("\\Commodity\\", "LME_daily_openInterest_E.py")
if util.is_us_business_day():
#----Thursday --------
if util.is_COMEX_thursday_run:
util.run_script("\\Commodity\\", "COMEX_A_gainStock_Tuesday_weekly_Run.py")
util.run_script("\\SP500_Ratios\\", "Guru_shiller_sectors.py")
# util.run_script("\\Commodity\\", "test.py")
util.run_script("\\Commodity\\", "COMEX_A.py")
util.run_script("\\Commodity\\", "COMEX_daily_openInterest_VOL.py")
# Schedule monthly tasks. If the scheduled date is a holiday, task will be run on the next business day. US only!
# day_on_every_month = 5
# if util.is_US_biz_day_of_month(day_on_every_month):
# util.log_info("Running monthly US procedure on day {} of every month".format(day_on_every_month), fname_prefix="monthly_log_")
# # Schedule tasks based on fixed day of nth week of certain month.
# # For example, 四巫日, is third Friday of 3,6,9,12 month
# day_of_week=4 # Friday
# week_of_month=3 # Third week
# months=[3,6,9,12]
# if util.is_day_of_nweek(day_of_week, week_of_month, months):
# util.log_info("Running tasks for every third Friday of {} months".format(months), fname_prefix="quarterly_log_")
#FINRA generally publishes updates to the Margin Statistics on the third week of the month following the reference month.
# # TODO: this to be run monthly
# util.run_script("\\FINRA\\", "FINRA_Margin_Statistics.py")
# # TODO: 13F is updated quaterly over a few weeks time. Schedule needs special treatment.
# util.run_script("\\SEC_13F\\", "SEC_13F_sina.py")
# util.run_script("\\Commodity\\", "COMEX_A_bondsFuture_Delivered_Q_Run.py")
|
<reponame>wtsi-hgi/warden
import json
import urllib.request
import datetime
import base64
import ldap
import socket
import flask
app = flask.Flask(__name__, static_url_path="/treeserve/static")
# group:IP mapping for active instances
ACTIVE_INSTANCES = {}
def isUserHumgen():
"""
Determines whether the user is a member of the Human Genetics
or the Tree of Life Genomics department
Returns
-------
True
User is a member of the specified departments
False
User is not a member of the specified departments
"""
try:
username = flask.request.headers['X-Forwarded-User']
except KeyError:
return False
if username == None or username == "":
return False
conn = ldap.initialize("ldap://ldap-ro.internal.sanger.ac.uk:389")
conn.bind('','')
result = conn.search_s("ou=people,dc=sanger,dc=ac,dc=uk",
ldap.SCOPE_ONELEVEL, "(uid={})".format(username), ['sangerBomArea'])
# extracts user's BoM area from the LDAP results object
area = result[0][1]['sangerBomArea'][0].decode('UTF-8')
if area == "Human Genetics" or area == "Tree of Life Genomics":
return True
else:
return False
@app.route('/treeserve/')
def index():
"""
Builds and returns the page that a user is served when they
go to [IP Address]/treeserve/
Returns
-------
resp
index.html template and fed group list
"""
if not isUserHumgen():
return 'Sorry, Human Genetics/Tree of Life faculty only.'
try:
print("[{:%Y-%m-%d %H:%M:%S}] New request:\n\tUser: {}\n\tUser Agent: {}"
.format(datetime.datetime.now(),
flask.request.headers["X-Forwarded-User"],
flask.request.headers["User-Agent"]))
except KeyError:
print("[{:%Y-%m-%d %H:%M:%S}] New request: Malformed request header!"
.format(datetime.datetime.now()))
req = urllib.request.urlopen("http://localhost:8000/groups")
groups = json.loads(req.read())
resp = flask.make_response(
flask.render_template('index.html', groups=groups, arboretum='Arboretum'))
# cookie stops POST requests from doing anything unless the user visits
# the root page first
resp.set_cookie('warden_active_session', 'humgen')
return resp
@app.route('/treeserve/create/<group>', methods = ['POST'])
def createInstance(group):
"""
Creates a treeserve instance
Parameters
----------
group
Name of the UNIX group to start an instance for
"""
if not flask.request.cookies.get('warden_active_session'):
return 'This URL should not be accessed directly.'
req = urllib.request.urlopen("http://localhost:8000/create?group={}"
.format(group))
return 'OK'
@app.route('/treeserve/destroy/<group>', methods = ['POST'])
def destroyInstance(group):
"""
Destroys a treeserve instance
Parameters
----------
group
Name of the UNIX group to start an instance for
"""
if not flask.request.cookies.get('warden_active_session'):
return 'This URL should not be accessed directly.'
req = urllib.request.urlopen("http://localhost:8000/destroy?group={}"
.format(group))
return 'OK'
@app.route('/treeserve/status')
def checkArboretumStatus():
"""
Checks whether the Arboretum daemon is active, which is required for
Warden to function
"""
# The Arboretum daemon is expected to have an open socket on localhost
# at port 4510. As of the time of writing, 127.0.0.1:4510 is hardcoded
# into the daemon, so that's what we'll query.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect(('127.0.0.1', 4510))
except ConnectionRefusedError:
return '"down"'
sock.send(b'status')
# Response is in the form "subdaemon=status" ie, "prune_process=up"
# where each entry is separated by a space
data = sock.recv(1024).decode("UTF-8")
statuses = data.split()
problems = {}
for item in statuses:
name, status = item.split("=")
if status != "up":
return '"partial"'
return '"up"'
@app.route('/treeserve/update')
def getGroupTable():
"""
Getter for group table
Returns
-------
response
Dictionary of the updated stamp and list of groups
"""
if not flask.request.cookies.get('warden_active_session'):
return 'This URL should not be accessed directly.'
js_stamp = flask.request.args.get('stamp')
req = urllib.request.urlopen("http://localhost:8000/lastmodified")
arboretum_stamp = json.loads(req.read())
if js_stamp != arboretum_stamp:
req = urllib.request.urlopen("http://localhost:8000/groups")
groups = json.loads(req.read())
response = {'stamp': arboretum_stamp, 'groups': groups}
global ACTIVE_INSTANCES
ACTIVE_INSTANCES = {}
for name, group in groups.items():
if group['status'] == "up":
ACTIVE_INSTANCES[name] = group['instance_ip']
return response
else:
# double quoted to be valid json (which this function is expected to
# return)
return '"OK"'
@app.route('/treeserve/view/<group>/<path:path>')
def proxy(group, path):
"""
Proxy for treeserve instance requests
Parameters
----------
group
A UNIX group
path
A path subsequent to the group
Returns
-------
response
Flask response dependent on the request method
"""
if not isUserHumgen():
return 'Sorry, Human Genetics faculty only.'
req = urllib.request.urlopen("http://localhost:8000/activegroups")
active = json.loads(req.read())
if group not in active.keys():
return 'NOT ACTIVE'
else:
group_ip = active[group]['instance_ip']
group_url = "http://{}/{}".format(group_ip, path)
if path[:3] == "api":
# Treeserve's API parameters get caught by Flask, so the URL has to
# be reconstructed
depth = flask.request.args.get('depth')
directory = flask.request.args.get('path')
group_url = "{}?depth={}&path={}".format(group_url, depth, directory)
if flask.request.method=='GET':
resp = urllib.request.urlopen(group_url)
excluded_headers = ['content-encoding', 'content-length',
'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.getheaders()
if name.lower() not in excluded_headers]
code = resp.code
content = resp.read()
if path[:3] == "api":
response = flask.Response(content, code, headers,
mimetype="application/json")
else:
response = flask.Response(content, code, headers)
return response
elif flask.request.method=='POST':
resp = urllib.request.Request(group_url, json=flask.request.get_json())
excluded_headers = ['content-encoding', 'content-length',
'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.getheaders()
if name.lower() not in excluded_headers]
code = resp.code
content = resp.read()
response = flask.Response(content, code, headers)
return response
|
<filename>models/NASFPN/builder.py
import mxnet as mx
import mxnext as X
from mxnext.complicate import normalizer_factory
from symbol.builder import Neck
def merge_sum(f1, f2, name):
"""
:param f1: feature 1
:param f2: feature 2, major feature
:param name: name
:return: sum(f1, f2), feature map size is the same as f2
"""
f1 = mx.sym.contrib.BilinearResize2D(data=f1, like=f2, mode='like', name=name + '_resize')
return mx.sym.ElementWiseSum(f1, f2, name=name + '_sum')
def merge_gp(f1, f2, name):
"""
the input feature layers are adjusted to the output resolution
by nearest neighbor upsampling or max pooling if needed before
applying the binary operation
:param f1: feature 1, attention feature, prefered high level feature I guess
:param f2: feature 2, major feature
:param name: name
:return: global pooling fusion of f1 and f2, feature map size is the same as f2
"""
f1 = mx.sym.contrib.BilinearResize2D(data=f1, like=f2, mode='like', name=name + '_resize')
gp = mx.sym.Pooling(f1, name=name + '_gp', kernel=(1, 1), pool_type="max", global_pool=True)
gp = mx.sym.Activation(gp, act_type='sigmoid', name=name + '_sigmoid')
fuse_mul = mx.sym.broadcast_mul(f2, gp, name=name + '_mul')
fuse_sum = mx.sym.ElementWiseSum(f1, fuse_mul, name=name + '_sum')
return fuse_sum
def reluconvbn(data, num_filter, init, norm, name, prefix):
"""
:param data: data
:param num_filter: number of convolution filter
:param init: init method of conv weight
:param norm: normalizer
:param name: name
:return: relu-3x3conv-bn
"""
data = mx.sym.Activation(data, name=name+'_relu', act_type='relu')
weight = mx.sym.var(name=prefix + name + "_weight", init=init)
bias = mx.sym.var(name=prefix + name + "_bias", init=X.zero_init())
data = mx.sym.Convolution(data, name=prefix + name, weight=weight, bias=bias, num_filter=num_filter, kernel=(3, 3), pad=(1, 1), stride=(1, 1))
data = norm(data, name=name+'_bn')
return data
class NASFPNNeck(Neck):
def __init__(self, pNeck):
super(NASFPNNeck, self).__init__(pNeck)
self.norm = self.pNeck.normalizer
self.dim_reduced = self.pNeck.dim_reduced
self.num_stage = self.pNeck.num_stage
@staticmethod
def get_P0_features(c_features, p_names, dim_reduced, init, norm):
p_features = {}
for c_feature, p_name in zip(c_features, p_names):
p = X.conv(
data=c_feature,
filter=dim_reduced,
no_bias=False,
weight=X.var(name=p_name + "_weight", init=init),
bias=X.var(name=p_name + "_bias", init=X.zero_init()),
name=p_name
)
p = norm(p, name=p_name + '_bn')
p_features[p_name] = p
return p_features
@staticmethod
def get_fused_P_feature(p_features, stage, dim_reduced, init, norm):
prefix = "S{}_".format(stage)
with mx.name.Prefix(prefix):
P3_0 = p_features['S{}_P3'.format(stage-1)] # s8
P4_0 = p_features['S{}_P4'.format(stage-1)] # s16
P5_0 = p_features['S{}_P5'.format(stage-1)] # s32
P6_0 = p_features['S{}_P6'.format(stage-1)] # s64
P7_0 = p_features['S{}_P7'.format(stage-1)] # s128
# P4_1 = gp(P6_0, P4_0)
P4_1 = merge_gp(P6_0, P4_0, name="gp_P6_0_P4_0")
P4_1 = reluconvbn(P4_1, dim_reduced, init, norm, name="P4_1", prefix=prefix)
# P4_2 = sum(P4_0, P4_1)
P4_2 = merge_sum(P4_0, P4_1, name="sum_P4_0_P4_1")
P4_2 = reluconvbn(P4_2, dim_reduced, init, norm, name="P4_2", prefix=prefix)
# P3_3 = sum(P4_2, P3_0) end node
P3_3 = merge_sum(P4_2, P3_0, name="sum_P4_2_P3_0")
P3_3 = reluconvbn(P3_3, dim_reduced, init, norm, name="P3_3", prefix=prefix)
P3 = P3_3
# P4_4 = sum(P3_3, P4_2) end node
P4_4 = merge_sum(P3_3, P4_2, name="sum_P3_3_P4_2")
P4_4 = reluconvbn(P4_4, dim_reduced, init, norm, name="P4_4", prefix=prefix)
P4 = P4_4
# P5_5 = sum(gp(P3_3, P4_4), P5_0) end node
gp_P3_3_P4_4 = merge_gp(P3_3, P4_4, name="gp_P3_3_P4_4")
P5_5 = merge_sum(gp_P3_3_P4_4, P5_0, name="sum_[gp_P3_3_P4_4]_P5_0")
P5_5 = reluconvbn(P5_5, dim_reduced, init, norm, name="P5_5", prefix=prefix)
P5 = P5_5
# P7_6 = sum(gp(P4_2, P5_5), P7_0) end node
gp_P4_2_P5_5 = merge_gp(P4_2, P5_5, name="gp_p4_2_P5_5")
P7_6 = merge_sum(gp_P4_2_P5_5, P7_0, name="sum_[gp_P4_2_P5_5]_P7_0")
P7_6 = reluconvbn(P7_6, dim_reduced, init, norm, name="P7_6", prefix=prefix)
P7 = P7_6
# P6_7 = gp(P7_6, P5_5) end node
P7_6_to_P6 = mx.sym.contrib.BilinearResize2D(data=P7_6, like=P6_0, mode='like', name='P7_6_to_P6_0_resize')
P5_5_to_P6 = mx.sym.contrib.BilinearResize2D(data=P5_5, like=P6_0, mode='like', name='P5_5_to_P6_0_resize')
P6_7 = merge_gp(P7_6_to_P6, P5_5_to_P6, name="gp_P7_6_to_P6_P5_5_to_P6")
P6_7 = reluconvbn(P6_7, dim_reduced, init, norm, name="P6_7", prefix=prefix)
P6 = P6_7
return {'S{}_P3'.format(stage): P3,
'S{}_P4'.format(stage): P4,
'S{}_P5'.format(stage): P5,
'S{}_P6'.format(stage): P6,
'S{}_P7'.format(stage): P7}
def get_nasfpn_neck(self, data):
dim_reduced = self.dim_reduced
norm = self.norm
num_stage = self.num_stage
import mxnet as mx
xavier_init = mx.init.Xavier(factor_type="in", rnd_type="uniform", magnitude=3)
c2, c3, c4, c5 = data
c6 = X.pool(data=c5, name="C6", kernel=3, stride=2, pool_type="max")
c7 = X.pool(data=c5, name="C7", kernel=5, stride=4, pool_type="max")
c_features = [c3, c4, c5, c6, c7]
# the 0 stage
p0_names = ['S0_P3', 'S0_P4', 'S0_P5', 'S0_P6', 'S0_P7']
p_features = self.get_P0_features(c_features, p0_names, dim_reduced, xavier_init, norm)
# stack stage
for i in range(num_stage):
p_features = self.get_fused_P_feature(p_features, i + 1, dim_reduced, xavier_init, norm)
return p_features['S{}_P3'.format(num_stage)], \
p_features['S{}_P4'.format(num_stage)], \
p_features['S{}_P5'.format(num_stage)], \
p_features['S{}_P6'.format(num_stage)], \
p_features['S{}_P7'.format(num_stage)]
def get_rpn_feature(self, rpn_feat):
return self.get_nasfpn_neck(rpn_feat)
def get_rcnn_feature(self, rcnn_feat):
return self.get_nasfpn_neck(rcnn_feat)
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os, six, sys, unittest
ME = os.path.abspath(__file__)
# Make CAmkES importable
sys.path.append(os.path.join(os.path.dirname(ME), '../../..'))
from camkes.ast import ASTError
from camkes.internal.tests.utils import CAmkESTest
from camkes.parser import ParseError
from camkes.parser.stage0 import Reader
from camkes.parser.stage1 import Parse1
from camkes.parser.stage2 import Parse2
from camkes.parser.stage3 import Parse3
from camkes.parser.stage4 import Parse4
from camkes.parser.stage5 import Parse5
from camkes.parser.stage6 import Parse6
from camkes.parser.stage7 import Parse7
from camkes.parser.stage8 import Parse8
from camkes.parser.stage9 import Parse9
from camkes.parser.stage10 import Parse10
class TestStage10(CAmkESTest):
def setUp(self):
super(TestStage10, self).setUp()
r = Reader()
s1 = Parse1(r)
s2 = Parse2(s1)
s3 = Parse3(s2, debug=True)
s4 = Parse4(s3)
s5 = Parse5(s4)
s6 = Parse6(s5)
s7 = Parse7(s6)
s8 = Parse8(s7)
s9 = Parse9(s8)
self.parser = Parse10(s9)
def test_basic_assignment(self):
ast, _ = self.parser.parse_string('''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
attribute string t;
provides P p;
}
component Baz {
uses P p;
}
assembly {
composition {
component Foo f;
component Baz b;
connection C c(from b.p, to f.p);
}
configuration {
f.t = "hello world";
}
}
''')
self.assertLen(ast.items, 5)
C, P, Foo, Baz, assembly = ast.items
self.assertLen(assembly.configuration.settings, 1)
s = assembly.configuration.settings[0]
self.assertEqual(s.value, 'hello world')
def test_mistyped_assignment(self):
with self.assertRaises((ASTError, ParseError)):
ast, _ = self.parser.parse_string('''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
attribute string t;
}
component Baz {
uses P p;
}
assembly {
composition {
component Foo f;
component Baz b;
connection C c(from b.p, to f.p);
}
configuration {
f.t = 2;
}
}
''')
def test_mistyped_hierarchical_assignment(self):
with self.assertRaises((ASTError, ParseError)):
ast, _ = self.parser.parse_string('''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
attribute string t;
}
component Bar {
attribute string s;
provides P p;
composition {
component Foo f;
}
configuration {
f.t <- s;
}
}
component Baz {
uses P p;
}
assembly {
composition {
component Bar b1;
component Baz b2;
connection C c(from b2.p, to b1.p);
}
configuration {
b1.s = 2;
}
}
''')
def test_mistyped_attribute(self):
with self.assertRaises((ASTError, ParseError)):
ast, _ = self.parser.parse_string('''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
attribute int t;
}
component Bar {
attribute string s;
provides P p;
composition {
component Foo f;
}
configuration {
f.t <- s;
}
}
component Baz {
uses P p;
}
assembly {
composition {
component Bar b1;
component Baz b2;
connection C c(from b2.p, to b1.p);
}
configuration {
b1.s = "hello world";
}
}
''')
def test_setting_duplication_bug(self):
'''
There was a bug in an early implementation of the parser that led to
settings in non-trivial hierarchical specs being potentially duplicated
as they were lifted up the AST. This tests whether such a bug has been
re-introduced. If it has, this test should throw a ParseError with a
message about duplicate settings of the attribute string_to_append.
'''
spec = '''
/* This spec lifted from the hierarchical components example at time of
* writing.
*/
connector seL4RPC {
from Procedure;
to Procedure;
}
procedure StringProcessor {
void process(in string input);
};
component Client {
control;
uses StringProcessor o1;
uses StringProcessor o2;
}
component Server {
provides StringProcessor i;
}
component UpperCase {
provides StringProcessor i;
uses StringProcessor o;
}
component Reverse {
provides StringProcessor i;
uses StringProcessor o;
}
component Append {
provides StringProcessor i;
uses StringProcessor o;
attribute string string_to_append;
}
component SubPipeline {
provides StringProcessor i;
uses StringProcessor o;
composition {
component UpperCase uc;
component Reverse r;
connection seL4RPC internal(from uc.o, to r.i);
export r.o -> o;
export uc.i -> i;
}
}
component Pipeline {
provides StringProcessor i;
uses StringProcessor o;
provides StringProcessor extra;
composition {
component SubPipeline sp;
component Append a;
connection seL4RPC internal1(from a.o, to sp.i);
export sp.o -> o;
export a.i -> i;
}
configuration {
a.string_to_append = "world";
}
}
assembly {
composition {
component Client c;
component Server s;
component Pipeline p1;
component Pipeline p2;
connection seL4RPC client_external(from c.o1, to p1.i);
connection seL4RPC pipeline_connection(from p1.o, to p2.i);
connection seL4RPC server_external(from p2.o, to s.i);
connection seL4RPC extra_external(from c.o2, to p1.extra);
}
}
'''
self.parser.parse_string(spec)
def test_attribute_default_values_in_settings(self):
'''
Test that an attribute without its value set has its default value
accessible through the configuration settings.
'''
spec = '''
connector C {
from Procedures;
to Procedure;
}
procedure P {}
component Foo {
attribute string x = "hello world";
attribute int y;
uses P p;
}
component Bar {
provides P p;
}
assembly {
composition {
component Foo f1;
component Foo f2;
component Bar b;
connection C conn1(from f1.p, from f2.p, to b.p);
}
configuration {
f1.x = "moo cow";
f1.y = 1;
f2.y = 2;
}
}
'''
ast, _ = self.parser.parse_string(spec)
conf = ast.assembly.configuration
self.assertIn('f1', conf)
self.assertIn('x', conf['f1'])
self.assertEqual(conf['f1']['x'], 'moo cow')
self.assertIn('y', conf['f1'])
self.assertEqual(conf['f1']['y'], 1)
self.assertIn('f2', conf)
self.assertIn('x', conf['f2'])
self.assertEqual(conf['f2']['x'], 'hello world')
self.assertIn('y', conf['f2'])
self.assertEqual(conf['f2']['y'], 2)
def test_attribute_c_keyword(self):
'''
Confirm that we can't use an attribute name that is a C keyword.
'''
spec = '''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
attribute string for;
provides P p;
}
component Baz {
uses P p;
}
assembly {
composition {
component Foo f;
component Baz b;
connection C c(from b.p, to f.p);
}
}
'''
with self.assertRaises(ASTError):
self.parser.parse_string(spec)
def test_setting_c_keyword(self):
'''
Confirm that we can't set an undeclared attribute with the name of a C
keyword.
'''
spec = '''
connector C {
from Procedure;
to Procedure;
}
procedure P {
}
component Foo {
provides P p;
}
component Baz {
uses P p;
}
assembly {
composition {
component Foo f;
component Baz b;
connection C c(from b.p, to f.p);
}
configuration {
f.for = "hello world";
}
}
'''
with self.assertRaises(ASTError):
self.parser.parse_string(spec)
def test_both_sides(self):
with six.assertRaisesRegex(self, ASTError, r'.*duplicate use of interface f.p1 \(deprecated form of N-way connections\?\)'):
self.parser.parse_string('''
connector C {
from Dataports;
to Dataports;
}
component Foo {
dataport Buf p1;
dataport Buf p2;
}
component Baz {
dataport Buf p1;
dataport Buf p2;
}
assembly {
composition {
component Foo f;
component Baz b;
connection C c1(from b.p1, to f.p1);
connection C c2(from f.p1, to b.p2);
}
}
''')
if __name__ == '__main__':
unittest.main()
|
<reponame>seznam/flexp
"""Inspector has the ability to print out data flowing through any module.
Usage: Chain([inspect(MyModule())])
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import collections
import pprint
from flexp.flow import Chain
from flexp.utils import get_logger
log = get_logger(__name__)
def inspect(module, stream=False, depth=3):
"""Inspect module's data flow based on its keys `requires` and `provides` if available.
Inspect prints out data into `log` - standard python `logging` library with level `INFO`.
:param object|function module: Module that should be inspected
:param bool stream: if `True` then log out stats for every record otherwise (default) at the end
:param int depth: how many submersions to do while printing out data
"""
return Inspector(module, stream=stream, depth=depth)
class Inspector(object):
"""Inspect dataflow based on optional requires/provides and log out statistics as `INFO`.
The inspector summarizes data structure into a comprehensive form because it takes into account
that some structures will be wide.
There is currently no change recording between rounds but will be added one day.
:param bool stream: if `True` then log out stats for every record otherwise (default) at the end
:param int depth: how many submersions to do while printing out data
"""
class Metrics:
"""Enum of counter keys."""
KEY = 0
LEN = 1
def __init__(self, module, stream=False, depth=3):
self.name = Chain.module_name(module)
self.requires = getattr(module, "requires", [])
self.provides = getattr(module, "provides", [])
self.relevant = self.requires + self.provides
self._module = module
self._stream = stream
self._depth = depth
self._process = getattr(module, "process", module)
self._prev = None
self._counters = [collections.Counter() for _ in range(2)]
self._calls = 0
self._structure = None
def relevant_data(self, data):
"""Pick up only requires/provides keys if available."""
if not self.relevant:
return data
return dict([(relevant, data[relevant]) for relevant in self.relevant])
def process(self, data):
self._calls += 1
pre_keys = sorted(data.keys())
self._process(data)
post_keys = sorted(data.keys())
self._counters[Inspector.Metrics.KEY]["{!s} -> {!s}".format(pre_keys, post_keys)] += 1
relevant = self.relevant_data(data)
for key, value in relevant.items():
if hasattr(value, "__len__"):
self._counters[Inspector.Metrics.LEN]["{}: {:d}".format(key, len(value))] += 1
if self._structure is None:
self._structure = dict([(key, self._inspect_structure(val)) for key, val in relevant.items()])
# sledovat pamet pres psutils
if self._prev is not None:
self._inspect_changes(self._prev, data)
self._prev = relevant
if self._stream:
self.print_log()
def _inspect_structure(self, data, d=0):
if d >= self._depth:
if isinstance(data, dict) and len(data.keys()) > 10:
key = list(data.keys())[0]
return {
"{:d} keys of type {!s}; ex: ({!s})".format(len(data.keys()), type(key), key):
"{!s} ({!s})".format(type(data[key]), data[key])
}
if isinstance(data, (tuple, list)):
if len(data) > 0:
return "[list of {!s}]".format(type(data[0]))
return "[empty]"
return data
if isinstance(data, (list, tuple)):
if len(data) > 0:
return {"[len={:d}]".format(len(data)): self._inspect_structure(data[0], d + 1)}
return {"[]": "empty"}
if isinstance(data, dict):
if len(data.keys()) > 10:
# very likely a dist used as a list - inspenct only one item
key = list(data.keys())[0]
return {
"{!s}#{:d} times ({!s})".format(type(key), len(data.keys()), key):
self._inspect_structure(data[key], d + 1)
}
return dict([(key, self._inspect_structure(data[key], d + 1)) for key in data])
return data
def _inspect_changes(self, prev, curr):
pass
def print_log(self):
log.info("Data flow structure")
log.info(pprint.pformat(self._structure, indent=4, width=200))
log.info("End of data flow structure")
self._structure = None
def close(self):
if hasattr(self._module, "close"):
self._module.close()
if not self._stream:
self.print_log()
|
import sys
seed = 42
eps = sys.float_info.min
log_eps = sys.float_info.min_exp
min_x = 0.001
max_x = 3.501
n_bins = 10
n_gals = 4
cat_out_rate = 0.1
cat_out_mean = 1.
cat_out_sigma = 0.01
constant_sigma = 0.03
constant_bias = 0.003
gr_threshold = 1.2
n_accepted = 3
n_burned = 2
plot_colors = 5
dpi = 250
def check_sim_params(params={}):
"""
Checks simulation parameter dictionary for various keywords and sets to
default values if not present
Parameters
----------
params: dict, optional
dictionary containing initial key/value pairs for simulation of catalog
Returns
-------
params: dict
dictionary containing final key/value pairs for simulation of catalog
"""
params = check_basic_setup(params)
params = check_bias_params(params)
params = check_variable_sigmas(params)
params = check_catastrophic_outliers(params)
return params
def check_basic_setup(params):
"""
Sets parameter values pertaining to basic constants of simulation
Parameters
----------
params: dict
dictionary containing key/value pairs for simulation
Returns
-------
params: dict
dictionary containing key/value pairs for simulation
"""
if 'n_gals' not in params:
params['n_gals'] = n_gals
else:
params['n_gals'] = int(params['n_gals'][0])
if 'n_bins' not in params:
params['n_bins'] = n_bins
else:
params['n_bins'] = int(params['n_bins'][0])
if 'bin_min' not in params:
params['bin_min'] = min_x
else:
params['bin_min'] = float(params['bin_min'][0])
if 'bin_max' not in params:
params['bin_max'] = max_x
else:
params['bin_max'] = float(params['bin_max'][0])
return params
def check_bias_params(params):
"""
Sets parameter values pertaining to presence of a systematic bias
Parameters
----------
params: dict
dictionary containing key/value pairs for simulation
Returns
-------
params: dict
dictionary containing key/value pairs for simulation
"""
if 'ez_bias' not in params:
params['ez_bias'] = False
else:
params['ez_bias'] = bool(int(params['ez_bias'][0]))
if 'ez_bias_val' not in params:
params['ez_bias_val'] = constant_bias
else:
params['ez_bias_val'] = float(params['ez_bias_val'][0])
if 'variable_bias' not in params:
params['variable_bias'] = False
else:
params['variable_bias'] = bool(int(params['variable_bias'][0]))
# print(params['variable_bias'])
return params
def check_variable_sigmas(params):
"""
Sets parameter values pertaining to widths of Gaussian PDF components
Parameters
----------
params: dict
dictionary containing key/value pairs for simulation
Returns
-------
params: dict
dictionary containing key/value pairs for simulation
Notes
-----
rms_scatter --> variable_sigmas
"""
if 'constant_sigma' not in params:
params['constant_sigma'] = constant_sigma
else:
params['constant_sigma'] = float(params['constant_sigma'][0])
if 'variable_sigmas' not in params:
params['variable_sigmas'] = 0
else:
params['variable_sigmas'] = int(params['variable_sigmas'][0])
return params
def check_catastrophic_outliers(params):
"""
Sets parameter values pertaining to presence of a catastrophic outlier
population
Parameters
----------
params: dict
dictionary containing key/value pairs for simulation
Returns
-------
params: dict
dictionary containing key/value pairs for simulation
Notes
-----
"""
if 'catastrophic_outliers' not in params:
params['catastrophic_outliers'] = '0'
else:
params['catastrophic_outliers'] = str(params['catastrophic_outliers'][0])
if 'outlier_fraction' not in params:
params['outlier_fraction'] = cat_out_rate
else:
params['outlier_fraction'] = float(params['outlier_fraction'][0])
if 'outlier_mean' in params:#params['outlier_fraction'] > 0.:
params['outlier_mean'] = float(params['outlier_mean'][0])
else:
params['outlier_mean'] = cat_out_mean
if 'outlier_sigma' in params:
params['outlier_sigma'] = float(params['outlier_sigma'][0])
else:
params['outlier_sigma'] = cat_out_sigma
return params
def check_inf_params(params={}):
"""
Checks inference parameter dictionary for various keywords and sets to
default values if not present
Parameters
----------
params: dict, optional
dictionary containing initial key/value pairs for inference
Returns
-------
params: dict
dictionary containing final key/value pairs for inference
"""
params = check_sampler_params(params)
return params
def check_sampler_params(params):
"""
Sets parameter values pertaining to basic constants of inference
Parameters
----------
params: dict
dictionary containing key/value pairs for inference
Returns
-------
params: dict
dictionary containing key/value pairs for inference
"""
if 'gr_threshold' not in params:
params['gr_threshold'] = gr_threshold
else:
params['gr_threshold'] = float(params['gr_threshold'][0])
if 'n_accepted' not in params:
params['n_accepted'] = 10 ** n_accepted
else:
params['n_accepted'] = 10 ** int(params['n_accepted'][0])
if 'n_burned' not in params:
params['n_burned'] = 10 ** n_burned
else:
params['n_burned'] = 10 ** int(params['n_burned'][0])
if 'n_walkers' not in params:
params['n_walkers'] = None
else:
params['n_walkers'] = int(params['n_walkers'][0])
return params
|
<reponame>kalekundert/linersock
class Conversation:
"""
Manages a messaging system that allows two participants to carry out brief
conversations. During the conversation, each participant can easily
transition back and forth between sending requests and waiting for
responses. These transitions are configured in advance and played out
after the conversation starts.
"""
def __init__(self, pipe, *exchanges):
self.pipe = pipe
self.exchanges = exchanges
self.closed = False
def get_pipe(self):
return self.pipe
def configure(self, *exchanges):
self.exchanges += exchanges
def start(self, *exchanges):
self.pipe.lock()
self.configure(*exchanges)
def update(self):
if not self.finished():
self.update_outgoing()
self.update_incoming()
self.update_finished()
return self.finished()
def update_outgoing(self):
for exchange in self.exchanges:
message = exchange.send()
if message is not None:
self.pipe.send(message)
self.pipe.deliver()
self.update_exchanges()
def update_incoming(self):
for message in self.pipe.receive():
for exchange in self.exchanges:
exchange.receive(message)
self.update_exchanges()
def update_exchanges(self):
self.exchanges = [ exchange.next()
for exchange in self.exchanges
if not exchange.finish() ]
def update_finished(self):
if not self.exchanges and self.pipe.idle():
self.finish()
def finish(self):
self.pipe.unlock()
self.exchanges = []
self.closed = True
def finished(self):
return self.closed
class SimpleSend(Conversation):
"""
Sends a single message and finishes without waiting for a response.
This class is intended only for brief exchanges with SimpleReceive. It
should not be used in more complex protocols.
"""
def __init__(self, pipe, message):
send = Send(message); finish = Finish()
send.transition(finish)
Conversation.__init__(self, pipe, send)
class SimpleReceive(Conversation):
"""
Waits to receive a single message, then finishes. This class is meant to
work with SimpleSend and should not be used in more complicated protocols.
"""
def __init__(self, pipe, flavor, callback=lambda message: None):
self.receive = Receive(); finish = Finish()
self.receive.transition(finish, flavor, callback)
Conversation.__init__(self, pipe, self.receive)
def get_message(self):
return self.receive.get_message()
class SimpleRequest(Conversation):
"""
Sends a single message and waits to receive a response. This class is can
be used in conjunction with SimpleResponse to request that information be
sent over the network.
"""
def __init__(self, pipe, message, flavor, callback):
request = Send(message)
response = Receive()
finish = Finish()
request.transition(response)
response.transition(finish, flavor, callback)
Conversation.__init__(self, pipe, request)
self.response = response
def get_response(self):
return self.response.get_message()
class SimpleResponse(Conversation):
"""
Wait to receive a request, then respond with a predefined response. This
exchange is specifically meant to be used with SimpleRequest.
"""
def __init__(self, pipe, flavor, message):
request = Receive()
response = Send(message)
finish = Finish()
request.transition(response, flavor)
request.transition(finish)
Conversation.__init__(self, pipe, request)
class FullRequest(Conversation):
"""
Sends a request and waits for it to either be accepted or rejected on the
other end. This class is a wrapper around a conversation and a number of
different exchanges, meant to be useful in the most common situations. If
you want to make a custom conversation, this may be useful to look at.
"""
def __init__(self, pipe, message, accept_flavor, reject_flavor):
# Begin by setting up all the exchanges that can happen on this side
# of the conversation. Once the request is sent, the conversation
# will begin listening for a confirmation from its partner. Once that
# confirmation is received, the conversation ends and reports either
# accept or reject, as appropriate.
request = Send(message); reply = Receive()
def accept_callback(): self.result = True
def reject_callback(): self.result = False
accept = Finish(accept_callback)
reject = Finish(reject_callback)
request.transition(reply)
def save_response(message): self.response = message
reply.transition(accept, accept_flavor, save_response)
reply.transition(reject, reject_flavor, save_response)
# Once the exchanges have been set up properly, create and store a
# conversation object. The second argument to the constructor
# indicates that the conversation will begin by sending the request.
Conversation.__init__(self, pipe, request)
self.result = False
self.response = None
def get_accepted(self):
assert self.finished()
return self.finished() and self.result
def get_rejected(self):
assert self.finished()
return self.finished() and not self.result
def get_response(self):
assert self.finished()
return self.response
class FullResponse(Conversation):
"""
Waits for a request to arrive and, once it does, decides whether or not to
accept it. This class is meant to work with the request class above.
Normally the request will come from the client side and the response from
the server side.
"""
def __init__(self, pipe, flavor_callback, accept_message, reject_message):
# Begin by setting up all the exchanges that can happen on this side of
# the conversation. Once a request is received, it is evaluated using
# the given callback. If the callback returns True, the request is
# accepted and the conversation is finished. Otherwise, it is rejected
# and another request is awaited.
request = Receive(flavor_callback)
accept = Send(accept_message)
reject = Send(reject_message)
def save_request(message): self.request = message
request.transition(accept, True, save_request)
request.transition(reject, False, save_request)
finish = Finish()
accept.transition(finish)
reject.transition(request)
Conversation.__init__(self, pipe, request)
self.request = None
def get_request(self):
assert self.finished()
return self.request
# The classes beyond this point are primarily intended for use within the
# classes above this point. Some of these classes can still be used on their
# own, but are only necessary in unusual situations, while others should never
# be directly used. Just be sure you know what you are doing.
class Exchange:
"""
Represents a single exchange in a conversation. The basic examples, which
are all implemented by subclasses below, include sending messages,
receiving messages, and ending the conversation. Complex conversations can
be created by linking a number of these exchanges together.
"""
def send(self):
"""
Returns a message that should be sent to the other end of the
conversation. Be careful, because this method will be called every
update cycle for as long as the exchange lasts.
"""
return None
def receive(self, message):
"""
Accepts a message that was received from the other end of the
conversation. The message is not necessarily relevant to this
exchange, but in many cases it will cause a transition to occur.
"""
pass
def next(self):
"""
Returns the exchange that should be executed on the next update cycle.
To remain in the same exchange, return self.
"""
raise NotImplementedError
def finish(self):
"""
Returns true if this side of the conversation is over. The
conversation itself will keep updating until all outgoing and incoming
messages have been completely sent and received, respectively.
"""
return False
class Send(Exchange):
"""
Sends a message and immediately transitions to a different exchange. That
exchange must be specified before the conversation starts.
"""
def __init__(self, message):
self.message = message
self.exchange = None
def send(self):
return self.message
def transition(self, exchange):
self.exchange = exchange
def next(self):
return self.exchange
class Receive(Exchange):
"""
Waits for a message to be received, then transitions the conversation to
another exchanges based on the content of the message. Different types of
messages can cause different transitions. The message type is the class of
the message by default, but this can be controlled by a callback.
"""
def __init__(self, flavor=lambda message: type(message)):
self.flavor = flavor
# The received attribute contains the last message received, no matter
# what its type is. This allows receive() to communicate with next().
self.received = None
# The messages list contains all of the messages that were received and
# recognized. New messages are pushed onto the front of this list, so
# the last message can be found at the 0th index.
self.messages = []
self.exchanges = {}
self.callbacks = {}
def get_message(self, index=0):
return self.messages[index]
def get_messages(self):
return self.messages
def receive(self, message):
self.received = message
def transition(self, exchange, flavor, callback=lambda message: None):
self.exchanges[flavor] = exchange
self.callbacks[flavor] = callback
def next(self):
message, self.received = self.received, None
transition = self
if message is not None:
flavor = self.flavor(message)
transition = self.exchanges.get(flavor, self)
if transition is not self:
self.callbacks[flavor](message)
self.messages.insert(0, message)
return transition
class Finish(Exchange):
"""
Ends the conversation without sending or receiving anything. Note that
this does not end the conversation running on the far side of the
connection.
"""
def __init__(self, callback=lambda: None):
self.callback = callback
def finish(self):
self.callback()
return True
|
from pyrosetta import init, create_score_function
from pyrosetta import rosetta
from pyrosetta.rosetta.core.pose import setPoseExtraScore
from pyrosetta import pose_from_file
from pyrosetta.rosetta.core.scoring import CA_rmsd
from pyrosetta.rosetta.protocols import rosetta_scripts
from pyrosetta.rosetta.core.scoring import all_atom_rmsd
from pyrosetta.rosetta.core.pack.task import TaskFactory
from pyrosetta.rosetta.core.pack.task import operation
from pyrosetta.rosetta.core.pack.task import residue_selector
from pyrosetta.rosetta.core import select
from pyrosetta.rosetta.protocols.minimization_packing import RotamerTrialsMover
from roseasy import pipeline
from roseasy import big_jobs
from roseasy.utils.mover_utils import setup_movemap_from_resselectors
import os, sys, subprocess, gzip
import json
from roseasy.movers import fastdesign
from roseasy.utils import numeric
#from roseasy.standard_params.filters import FilterContainer
def get_workspace(root_dir, step):
return pipeline.DesignWorkspace(root_dir, step)
def boollist_to_vector1_bool(a):
# Should also go in utils eventually
from pyrosetta.rosetta.utility import vector1_bool
vector = vector1_bool()
for item in a:
vector.append(item)
return vector
def size_list_to_res_selector(list_of_residues, pose):
# Should go in utils eventually
bool_list = []
pdbinfo = pose.pdb_info()
for res in range(0, pose.size()):
bool_list.append(res in list_of_residues)
return boollist_to_vector1_bool(bool_list)
def strlist_to_vector1_str(strlist):
# Should also go in utils eventually
from pyrosetta.rosetta.utility import vector1_std_string
vector = vector1_std_string()
for string in strlist:
vector.append(string)
return vector
def get_insertion(pdbpath):
# hardcoding for now
insertion_path = os.path.join(
os.environ['HOME'],
'cas', 'dels', 'rec2_helix',
'lucs', 'data', 'compatible'
)
model_number = os.path.basename(pdbpath).split('.')[0].split('_')[-1]
insertion_json = os.path.join(
insertion_path,
'insertion_points_{}.json'.format(model_number)
)
with open(insertion_json, 'r') as f:
insertion = json.load(f)
# Return only first insertion for this script because there is only
# one
return insertion[0]
def clash_based_taskfactory(pdbpath, pose):
insertion = get_insertion(pdbpath)
# How much to add to insertion['stop'] to get the residue number to
# design
lucs_relative_resfile = [4, 5, 6, 8, 12, 16, 20, 23, 24, 26, 27, 30]
# subtract 1 because LUCS loop definitions stop on the residue after the
# insertion, whereas normal loop definition stops at the end of the
# loop.
lucs_relative_resfile = [i + insertion['stop'] - 1 for i in lucs_relative_resfile]
# This should give us all designed positions
loop_list = [j for j in range(insertion['start'], insertion['stop'])]
lucs_relative_resfile += loop_list
design_mask = size_list_to_res_selector(lucs_relative_resfile, pose)
design_selector = select.residue_selector.ResidueIndexSelector(
numeric.intlist_to_vector1_size(lucs_relative_resfile)
)
# Takes a resfile and returns a task factory with a clash-based
# repack shell.
tf = TaskFactory()
cl = operation.InitializeFromCommandline()
notaa = operation.ProhibitSpecifiedBaseResidueTypes(
strlist_to_vector1_str(['C', 'H']),
design_selector)
# read = operation.ReadResfile(resfile)
tf.push_back(cl)
tf.push_back(notaa)
# tf.push_back(read)
# Select repackable residues from designed residues
repack_only = operation.RestrictToRepackingRLT()
repack = operation.OperateOnResidueSubset(repack_only,
design_selector, False)
repack.flip_subset(True)
tf.push_back(repack)
all_selector = residue_selector.ClashBasedShellSelector(design_mask)
all_selector.set_num_shells(2)
all_selector.set_include_focus(True)
all_selector.invert(True)
no_packing = operation.PreventRepackingRLT()
static = operation.OperateOnResidueSubset(no_packing, all_selector,
False)
packertask = tf.create_task_and_apply_taskoperations(pose)
ld = rosetta_scripts.XmlObjects.static_get_task_operation(
'''<LayerDesign name="layer_all" layer="core_boundary_surface_Nterm_Cterm" use_sidechain_neighbors="True">
<Nterm>
<all append="DEGHKNQRST" />
<all exclude="CAFILMPVWY" />
</Nterm>
<Cterm>
<all append="DEGHKNQRST" />
<all exclude="CAFILMPVWY" />
</Cterm>
</LayerDesign>''')
tf.push_back(static)
tf.push_back(ld)
packertask = tf.create_task_and_apply_taskoperations(pose)
print('PRINTING PACKERTASK')
print(packertask)
repack_mask = packertask.repacking_residues()
design_mask = packertask.designing_residues()
movemap = setup_movemap_from_resselectors(design_mask, repack_mask)
return tf, movemap
if __name__=='__main__':
test=False
# test = True
if not test:
workspace, job_info = big_jobs.initiate()
test_run = job_info.get('test_run', False)
pdbpath = workspace.input_path(job_info)
else:
workspace = pipeline.workspace_from_dir(sys.argv[1])
pdbpath = '02_designs/inputs/model_0.pdb.gz'
test_run = True
task_id = 1
init('-total_threads 1 -packing:ex1 -packing:ex2 -packing:ex1aro '\
'-use_input_sc')
pose = pose_from_file(pdbpath)
task_factory, movemap = clash_based_taskfactory(pdbpath,
pose)
ref = create_score_function('ref2015')
rot = RotamerTrialsMover(ref, task_factory)
print('APPLYING ROTAMERTRIALS')
rot.apply(pose)
# cst_gen_str = '''
# <CoordinateConstraintGenerator name="backbone"
# ca_only="true"
# '''
# backbone_cst = rosetta_scripts.XmlObjects.static_get_mover(cst_gen_str)
# csts = rosetta.protocols.constraint_generator.CoordinateConstraintGenerator()
# csts.set_ca_only(True)
# fd = fastdesign.FastDesign()
xml = rosetta_scripts.XmlObjects.create_from_file(os.path.join(
workspace.find_path('fastdesign_cst.xml')
))
protocol = xml.get_mover('ParsedProtocol')
init_args = []
dalphaball_path = os.path.join(workspace.rosetta_dir, 'source',
'external', 'DAlpahBall', 'DAlphaBall.gcc')
init_args.append('-holes:dalphaball {} -in:file:s {}'.format(dalphaball_path, pdbpath))
init_args.append('-total_threads 1')
init_args.append('-packing:ex1 -packing:ex2')
init_args.append('-packing:ex1aro')
init_args.append('-use_input_sc')
init_args.append('-relax:constrain_relax_to_start_coords')
if test_run:
protocol.get_mover(2).rounds = 1
# fd.rounds = 1
# fd.pose = pose
# Warning: This is an all-atom movemap. Constrain to input coords if
# you don't want things to move around a lot.
# loop = workspace.largest_loop
protocol.get_mover(2).set_task_factory(task_factory)
protocol.get_mover(2).set_movemap(movemap)
# print('PRINTING MOVEMAP AND TASK FACTORY')
# print(fd.movemap)
# print(fd.task_factory)
# fd.mover.add_constraint_generator(csts)
# fd.mover.constrain_relax_to_start_coords(True)
# fd.mover.ramp_down_constraints(False)
# Before we apply FastDesign, also setup and run RotamerTrials
# fd.apply()
protocol.apply(pose)
# This will compare it to the input to the step
input_pose = pose_from_file(pdbpath)
# But you can uncomment this to compare it to the input to the
# project
#input_pose = pose_from_file(workspace.input_pdb_path)
ca_rmsd = CA_rmsd(pose, input_pose)
all_atom_rmsd = all_atom_rmsd(pose, input_pose)
# score_fragments = os.path.exists(workspace.loops_path)
score_fragments=False
filters = workspace.get_filters(pose,
task_id=job_info['task_id'], score_fragments=score_fragments,
test_run=test_run)
filters.run_filters()
input_name = os.path.basename(pdbpath).split(".")[0]
out = workspace.output_prefix(job_info) + input_name + workspace.output_suffix(job_info) + '.pdb.gz'
setPoseExtraScore(pose, 'EXTRA_METRIC_CA_RMSD', ca_rmsd)
setPoseExtraScore(pose, 'EXTRA_METRIC_AllAtom_RMSD', all_atom_rmsd)
pose.dump_pdb(out)
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SEED agent using Keras for continuous control tasks."""
import collections
import gin
from seed_rl.agents.policy_gradient.modules import input_normalization
from seed_rl.agents.policy_gradient.modules import running_statistics
from seed_rl.common import utils
import tensorflow as tf
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline')
@gin.configurable
class ContinuousControlAgent(tf.Module):
"""Agent for continuous control tasks."""
def __init__(self,
parametric_action_distribution,
observation_normalizer=None,
num_layers_policy=3,
num_layers_value=3,
num_layers_rnn=0,
num_units_policy=256,
num_units_value=256,
num_units_rnn=256,
layer_normalizer=None,
shared=False,
residual_connections=False,
activation=None,
kernel_init='glorot_uniform',
last_kernel_init_value=None,
last_kernel_init_value_scaling=None,
last_kernel_init_policy=None,
last_kernel_init_policy_scaling=None,
correct_observations=False,
std_independent_of_input=False,
input_clipping=None):
"""Creates the ContinuousControlAgent.
Args:
parametric_action_distribution: SEED distribution used for the actions.
observation_normalizer: InputNormalization instance used to normalize
observations or None for no normalization.
num_layers_policy: Integer with the number of hidden layers in the policy
MLP. Needs to be the same as `num_layers_value` if shared=True.
num_layers_value: Integer with the number of hidden layers in the value
MLP. If None, the number of layers is the same as in the policy.
Needs to be the same as `num_layers_policy` if shared=True.
num_layers_rnn: Number of RNN layers.
num_units_policy: Integer with the number of hidden units in the policy
MLP. Needs to be the same as `num_units_value` if shared=True.
num_units_value: Integer with the number of hidden units in the value
MLP. If None, the number of units is the same as in the policy.
Needs to be the same as `num_units_policy` if shared=True.
num_units_rnn: Integer with the number of hidden units in the RNN.
layer_normalizer: Function that returns a tf.keras.Layer instance used to
normalize observations or None for no layer normalization.
shared: Boolean indicating whether the MLPs (except the heads) should be
shared for the value and the policy networks.
residual_connections: Boolean indicating whether residual connections
should be added to all the layers except the first and last ones in the
MLPs.
activation: Activation function to be passed to the dense layers in the
MLPs or None (in which case the swish activation function is used).
kernel_init: tf.keras.initializers.Initializer instance used to initialize
the dense layers of the MLPs.
last_kernel_init_value: tf.keras.initializers.Initializer instance used to
initialize the last dense layers of the value MLP or None (in which case
`kernel_init` is used).
last_kernel_init_value_scaling: None or a float that is used to rescale
the initial weights of the value network.
last_kernel_init_policy: tf.keras.initializers.Initializer instance used
to initialize the last dense layers of the policy MLP or None (in which
case `kernel_init` is used).
last_kernel_init_policy_scaling: None or a float that is used to rescale
the initial weights of the policy network.
correct_observations: Boolean indicating if changes in the
`observation_normalizer` due to updates should be compensated in
trainable compensation variables.
std_independent_of_input: If a Gaussian action distribution is used,
this parameter makes the standard deviation trainable but independent
of the policy input.
input_clipping: None or float that is used to clip input values to range
[-input_clipping, input_clipping] after (potential) input normalization.
"""
super(ContinuousControlAgent, self).__init__(name='continuous_control')
# Default values.
if observation_normalizer is None:
# No input normalization.
observation_normalizer = input_normalization.InputNormalization(
running_statistics.FixedMeanStd())
if activation is None:
activation = swish
if last_kernel_init_value is None:
last_kernel_init_value = kernel_init
last_kernel_init_value = _rescale_initializer(
last_kernel_init_value, last_kernel_init_value_scaling)
if last_kernel_init_policy is None:
last_kernel_init_policy = kernel_init
last_kernel_init_policy = _rescale_initializer(
last_kernel_init_policy, last_kernel_init_policy_scaling)
if layer_normalizer is None:
layer_normalizer = lambda: (lambda x: x)
# Parameters and layers for unroll.
self._parametric_action_distribution = parametric_action_distribution
self.observation_normalizer = observation_normalizer
self._correct_observations = correct_observations
# Build the required submodules.
self._shared = tf.keras.Sequential()
self._policy = tf.keras.Sequential()
self._value = tf.keras.Sequential()
# Build the torso(s).
num_layers_value = num_layers_value or num_layers_policy
num_units_value = num_units_value or num_units_policy
if shared:
if num_layers_value != num_layers_policy:
raise ValueError('If shared=True, num_layers_value needs to be equal to'
' num_layers_policy')
if num_units_value != num_units_policy:
raise ValueError('If shared=True, num_units_value needs to be equal to'
' num_units_policy')
_add_layers(self._shared, num_layers_value, num_units_value, kernel_init,
activation, layer_normalizer, residual_connections)
else:
_add_layers(self._policy,
num_layers_policy, num_units_policy, kernel_init, activation,
layer_normalizer, residual_connections)
_add_layers(self._value, num_layers_value, num_units_value, kernel_init,
activation, layer_normalizer, residual_connections)
# Build the recurrent layers (if needed).
if num_layers_rnn:
lstm_sizes = [num_units_rnn] * num_layers_rnn
lstm_cells = [tf.keras.layers.LSTMCell(size) for size in lstm_sizes]
self._rnn = tf.keras.layers.StackedRNNCells(lstm_cells)
else:
self._rnn = None
# Build the policy head.
normalizer_policy = layer_normalizer()
policy_output_size = self._parametric_action_distribution.param_size
if std_independent_of_input:
policy_output_size //= 2
self._policy.add(
_Layer(policy_output_size,
last_kernel_init_policy, lambda x: x, normalizer_policy, False))
if std_independent_of_input:
self._policy.add(_ConcatTrainableTensor(tf.zeros(policy_output_size,
tf.float32)))
# Build the value head.
normalizer_value = normalizer_policy if shared else layer_normalizer()
self._value.add(
_Layer(1, last_kernel_init_value, lambda x: x, normalizer_value, False))
self._input_clipping = input_clipping
@tf.function
def initial_state(self, batch_size):
if self._rnn is None:
return ()
return self._rnn.get_initial_state(batch_size=batch_size, dtype=tf.float32)
# Not clear why, but if "@tf.function" declarator is placed directly onto
# __call__, training fails with "uninitialized variable *baseline".
# when running on multiple learning tpu cores.
@tf.function
def get_action(self, input_, core_state):
return self.__call__(input_, core_state)
def update_observation_normalization_statistics(self, observations):
"""Updates the observation normalization statistics.
Args:
observations: a batch of observations with shape [time, batch_size,
obs_size].
"""
self.observation_normalizer.update_normalization_statistics(observations)
def __call__(self, input_, core_state, unroll=False, is_training=False):
"""Applies the network.
Args:
input_: A pair (prev_actions: <int32>[batch_size], env_outputs: EnvOutput
structure where each tensor has a [batch_size] front dimension). When
unroll is True, an unroll (sequence of transitions) is expected, and
those tensors are expected to have [time, batch_size] front dimensions.
core_state: Opaque (batched) recurrent state structure corresponding to
the beginning of the input sequence of transitions.
unroll: Whether the input is an unroll (sequence of transitions) or just a
single (batched) transition.
is_training: Enables normalization statistics updates (when unroll is
True).
Returns:
A pair:
- agent_output: AgentOutput structure. Tensors have front dimensions
[batch_size] or [time, batch_size] depending on the value of 'unroll'.
- core_state: Opaque (batched) recurrent state structure.
"""
_, env_outputs = input_
# We first handle initializing and (potentially) updating normalization
# statistics. We only update during the gradient update steps.
# `is_training` is slightly misleading as it is also True during inference
# steps in the training phase. We hence also require unroll=True which
# indicates gradient updates.
training_model_update = is_training and unroll
data = env_outputs[2]
if not self.observation_normalizer.initialized:
if training_model_update:
raise ValueError('It seems unlikely that stats should be updated in the'
' same call where the stats are initialized.')
self.observation_normalizer.init_normalization_stats(data.shape[-1])
if self._rnn is not None:
if unroll:
representations = utils.batch_apply(self._flat_apply_pre_lstm,
(env_outputs,))
representations, core_state = self._apply_rnn(
representations, core_state, env_outputs.done)
outputs = utils.batch_apply(self._flat_apply_post_lstm,
(representations,))
else:
representations = self._flat_apply_pre_lstm(env_outputs)
representations, done = tf.nest.map_structure(
lambda t: tf.expand_dims(t, 0),
(representations, env_outputs.done))
representations, core_state = self._apply_rnn(
representations, core_state, done)
representations = tf.nest.map_structure(
lambda t: tf.squeeze(t, 0), representations)
outputs = self._flat_apply_post_lstm(representations)
else:
# Simplify.
if unroll:
outputs = utils.batch_apply(self._flat_apply_no_lstm, (env_outputs,))
else:
outputs = self._flat_apply_no_lstm(env_outputs)
return outputs, core_state
def _apply_rnn(self, representations, core_state, done):
"""Apply the recurrent part of the network.
Args:
representations: The representations coming out of the non-recurrent
part of the network, tensor of size [num_timesteps, batch_size, depth].
core_state: The recurrent state, given as nested structure of
sub-states. Each sub-states is of size [batch_size, substate_depth].
done: Tensor of size [num_timesteps, batch_size] which indicates
the end of a trajectory.
Returns:
A pair holding the representations coming out of the RNN (tensor of size
[num_timesteps, batch_size, depth]) and the updated RNN state (same size
as the input core_state.
"""
batch_size = tf.shape(representations)[1]
initial_core_state = self._rnn.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
core_output_list = []
for input_, d in zip(tf.unstack(representations), tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = tf.nest.map_structure(
lambda x, y, d=d: tf.where(
tf.reshape(d, [d.shape[0]] + [1] * (x.shape.rank - 1)), x, y),
initial_core_state,
core_state)
core_output, core_state = self._rnn(input_, core_state)
core_output_list.append(core_output)
outputs = tf.stack(core_output_list)
return outputs, core_state
def _flat_apply_pre_lstm(self, env_outputs):
_, _, observations, _, _ = env_outputs
# Input normalization.
observations = self.observation_normalizer.normalize(observations)
if self._input_clipping is not None:
observations = tf.clip_by_value(
observations,
-self._input_clipping,
self._input_clipping)
if self._correct_observations:
observations = self.observation_normalizer.correct(observations)
# The actual MLPs with the different heads.
representations = self._shared(observations)
return representations
def _flat_apply_no_lstm(self, env_outputs):
"""Applies the modules."""
representations = self._flat_apply_pre_lstm(env_outputs)
return self._flat_apply_post_lstm(representations)
def _flat_apply_post_lstm(self, representations):
values = self._value(representations)
logits = self._policy(representations)
baselines = tf.squeeze(values, axis=-1)
new_action = self._parametric_action_distribution(logits).sample(seed=None)
return AgentOutput(new_action, logits, baselines)
@gin.configurable
def swish(input_activation):
"""Swish activation function."""
return tf.multiply(input_activation, tf.nn.sigmoid(input_activation))
def _add_layers(sequential, num_layers, num_units, kernel_init, activation,
normalizer, residual_connections):
"""Adds several layers to a tf.keras.Sequential instance."""
for i in range(num_layers):
sequential.add(
_Layer(num_units, kernel_init, activation, normalizer(),
False if i == 0 else residual_connections))
class _Layer(tf.keras.layers.Layer):
"""Custom layer for our MLPs."""
def __init__(self, num_units, kernel_init, activation, normalizer,
residual_connection):
"""Creates a _Layer."""
super(_Layer, self).__init__()
self.dense = tf.keras.layers.Dense(
num_units, kernel_initializer=kernel_init, activation=activation)
self.normalizer = normalizer
self.residual_connection = residual_connection
def call(self, tensor):
new_tensor = self.dense(self.normalizer(tensor))
return tensor + new_tensor if self.residual_connection else new_tensor
class _ConcatTrainableTensor(tf.keras.layers.Layer):
"""Layer which concatenates a trainable tensor to its input."""
def __init__(self, init_value):
"""Creates a layer."""
super(_ConcatTrainableTensor, self).__init__()
assert init_value.ndim == 1
self.init_value = init_value
def build(self, shape):
self.var = tf.Variable(self.init_value, trainable=True)
def call(self, tensor):
return tf.concat(values=[
tensor,
tf.broadcast_to(self.var, tensor.shape[:-1] + self.var.shape)
], axis=-1)
def _rescale_initializer(initializer, rescale):
if rescale is None:
return initializer
if isinstance(initializer, str):
initializer = tf.keras.initializers.get(initializer)
def rescaled_initializer(*args, **kwargs):
return rescale*initializer(*args, **kwargs)
return rescaled_initializer
|
import logging
from claf.metric.glue import pearson_and_spearman
from claf.metric.regression import mse
from claf.model.base import ModelBase
logger = logging.getLogger(__name__)
class Regression:
""" Regression Mixin Class """
def make_predictions(self, output_dict):
"""
Make predictions with model's output_dict
* Args:
output_dict: model's output dictionary consisting of
- sequence_embed: embedding vector of the sequence
- class_logits: representing unnormalized log probabilities of the class
- class_idx: target class idx
- data_idx: data idx
- loss: a scalar loss to be optimized
* Returns:
predictions: prediction dictionary consisting of
- key: 'id' (sequence id)
- value: dictionary consisting of
- score
"""
data_indices = output_dict["data_idx"]
pred_logits = output_dict["logits"]
predictions = {
self._dataset.get_id(data_idx.item()): {"score": pred_score.item()}
for data_idx, pred_score in zip(list(data_indices.data), list(pred_logits.data))
}
return predictions
def predict(self, output_dict, arguments, helper):
"""
Inference by raw_feature
* Args:
output_dict: model's output dictionary consisting of
- sequence_embed: embedding vector of the sequence
- logits: model's score
arguments: arguments dictionary consisting of user_input
helper: dictionary to get the classification result, consisting of
-
* Returns: output dict (dict) consisting of
- score: model's score
"""
score = output_dict["logits"]
return {
"score": score,
}
def make_metrics(self, predictions):
"""
Make metrics with prediction dictionary
* Args:
predictions: prediction dictionary consisting of
- key: 'id' (sequence id)
- value: dictionary consisting of
- class_idx
* Returns:
metrics: metric dictionary consisting of
- 'mse': Mean Squard Error
- 'pearson': Pearson correlation coefficient
- 'spearmanr': Spearman correlation coefficient
- 'pearson_spearman_corr': (pearson_corr + spearman_corr) / 2,
"""
pred_scores = []
target_scores = []
preds = {}
for data_id, pred in predictions.items():
target = self._dataset.get_ground_truth(data_id)
preds[data_id] = pred["score"]
pred_scores.append(pred["score"])
target_scores.append(target["score"])
self.write_predictions(preds)
metrics = {"mse": mse(pred_scores, target_scores) / len(target_scores)}
pearson_spearman_metrics = pearson_and_spearman(pred_scores, target_scores)
metrics.update(pearson_spearman_metrics)
return metrics
def write_predictions(self, predictions, ):
try:
super(Regression, self).write_predictions(predictions)
except AttributeError:
# TODO: Need to Fix
model_base = ModelBase()
model_base._log_dir = self._log_dir
model_base._train_counter = self._train_counter
model_base.training = self.training
model_base.write_predictions(predictions)
def print_examples(self, index, inputs, predictions):
"""
Print evaluation examples
* Args:
index: data index
inputs: mini-batch inputs
predictions: prediction dictionary consisting of
- key: 'id' (sequence id)
- value: dictionary consisting of
- class_idx
* Returns:
print(Sequence, Target Class, Predicted Class)
"""
data_idx = inputs["labels"]["data_idx"][index].item()
data_id = self._dataset.get_id(data_idx)
helper = self._dataset.helper
sequence = helper["examples"][data_id]["sequence"]
target_score = helper["examples"][data_id]["score"]
pred_score = predictions[data_id]["score"]
print()
print("- Sequence:", sequence)
print("- Target:")
print(" Score:", target_score)
print("- Predict:")
print(" Score:", pred_score)
print()
|
<reponame>SmartDogHouse/SmartDogHouse-Software
from secret import MQTT_HOST
from pins import VALVE_PIN, WATER_SENSOR_PIN, SCALE_PIN_SCK, SCALE_PIN_DT, MOTOR_PIN, B_MOTOR_PIN, F_MOTOR_PIN, LIGHT_SENSOR_PIN
from pins import LASER_PIN, LIMIT_SWITCH_OPEN_PIN, LIMIT_SWITCH_CLOSE_PIN, DS18x20_PIN, HEARTBEAT_PIN
from static_values import CERT_FILE, KEY_FILE, MQTT_PORT, MQTT_CLIENT_ID, MQTT_FOOD_TOPIC, MQTT_RECEIVE_TOPIC
from static_values import DEF_MAX_FOOD_LVL_PERC, DEF_MAX_WATER_LVL_PERC, DEF_MIN_FOOD_LVL_PERC, DEF_MIN_WATER_LVL_PERC
from static_values import MQTT_SMARTCOLLAR_TOPIC
from scheduler import Scheduler
from mqtt_manager import MQTTManager
from smart_water_bowl_task import SmartWaterBowlTask
from smart_food_bowl_task import SmartFoodBowlTask
from check_bimotor_task import CheckBimotorTask
from smart_collar_temperature_task import SmartCollarTemperatureTask
from smart_collar_heartbeat_task import SmartCollarHeartbeatTask
from mqtt_message_checker_task import MqttMessageCheckerTask
from mqtt_message_handler_task import MqttMessageHandlerTask
import utime
print("Main Execution")
# callback for message received
def on_message_callback(topic, msg):
"""execution when a message is received, it schedules a task that reacts to that message"""
scheduler.schedule_once(MqttMessageHandlerTask().get_behaviour(topic=topic, msg=msg))
# creating manager for MQTT connection and connect
mqtt_manager = MQTTManager(key=KEY_FILE, cert=CERT_FILE, port=MQTT_PORT, client_id=MQTT_CLIENT_ID,
server=MQTT_HOST,
topic=MQTT_RECEIVE_TOPIC, callback=on_message_callback)
try:
mqtt_manager.connect()
except Exception as e:
print('Cannot connect MQTT: ' + str(e))
# creating bowls
swb_task = SmartWaterBowlTask(valve_pin=VALVE_PIN, water_pin=WATER_SENSOR_PIN,
max_water_lvl_perc=DEF_MAX_WATER_LVL_PERC,
min_water_lvl_perc=DEF_MIN_WATER_LVL_PERC, mqtt_manager=mqtt_manager,
topic=MQTT_RECEIVE_TOPIC)
sfb_task = SmartFoodBowlTask(scale_pin_sck=SCALE_PIN_SCK, scale_pin_dt=SCALE_PIN_DT, bmotor_pinb=B_MOTOR_PIN,
bmotor_pinf=F_MOTOR_PIN,
motor_pin=MOTOR_PIN, light_sensor_pin=LIGHT_SENSOR_PIN, laser_pin=LASER_PIN,
topic=MQTT_FOOD_TOPIC,
mqtt_manager=mqtt_manager, min_food_lvl_perc=DEF_MIN_FOOD_LVL_PERC,
max_food_lvl_perc=DEF_MAX_FOOD_LVL_PERC,
limit_switch_close_pin=LIMIT_SWITCH_CLOSE_PIN,
limit_switch_open_pin=LIMIT_SWITCH_OPEN_PIN)
mqtt_msg_chk_task = MqttMessageCheckerTask(mqtt_manager=mqtt_manager)
check_bimotor_task = CheckBimotorTask(bmotor_pinb=B_MOTOR_PIN,
bmotor_pinf=F_MOTOR_PIN,
limit_switch_close_pin=LIMIT_SWITCH_CLOSE_PIN,
limit_switch_open_pin=LIMIT_SWITCH_OPEN_PIN)
#sct_task = SmartCollarTemperatureTask(mqtt_manager=mqtt_manager, topic=MQTT_SMARTCOLLAR_TOPIC, tmp_pin=DS18x20_PIN,
# using_ds18x20=True)
#sch_task = SmartCollarHeartbeatTask(mqtt_manager=mqtt_manager, topic=MQTT_SMARTCOLLAR_TOPIC, hb_pin=HEARTBEAT_PIN)
# array of tasks
print("Creating array of tasks")
tasks = [swb_task.get_behaviour(), mqtt_msg_chk_task.get_behaviour()] # array of coroutines
# create the scheduler and start with tasks
print("--- Starting Scheduler --- {}-{}-{} {}:{}".format(
utime.localtime()[0], utime.localtime()[1],
utime.localtime()[2], utime.localtime()[3], utime.localtime()[4]))
scheduler = Scheduler()
scheduler.start(tasks)
|
<gh_stars>1-10
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput, CheckboxButtonGroup
from bokeh.plotting import figure
# Set up widgets
text = TextInput(title="title", value='Sound Interference')
offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.1)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1)
speed = Slider(title="speed", value=0.1, start=0.0, end=2, step=0.1)
reflection = CheckboxButtonGroup(labels=["1st Reflection", "2nd Reflection", "3rd Reflection", "Total Interference"], active=[])
# Set up data
N = 200
x = np.linspace(0, 12, N)
# Incident Wave
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
# First Reflection
x1 = np.linspace(12, 24, N)
y1 = np.flip(np.sin(x1))
source1 = ColumnDataSource(data=dict(x=x, y=y1))
# Second Reflection
x2 = np.linspace(24, 36, N)
y2 = np.sin(x2)
source2 = ColumnDataSource(data=dict(x=x, y=y1))
# Third Reflection
x3 = np.linspace(36, 48, N)
y3 = np.flip(np.sin(x2))
source3 = ColumnDataSource(data=dict(x=x, y=y3))
# Total Interference
y4 = y + y1 + y2 + y3
source4 = ColumnDataSource(data=dict(x=x, y=y4))
# Set up plot
plot = figure(plot_height=600, plot_width=400, title="Sound Interference",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 12], y_range=[-3.5, 3.5])
p = plot.line('x', 'y', source=source, line_width=1, line_alpha=0.6, color='blue', legend='Incident Wave')
p1 = plot.line('x', 'y', source=source1, line_width=1, line_alpha=0.6, color='red', legend='First Reflection')
p2 = plot.line('x', 'y', source=source2, line_width=1, line_alpha=0.6, color='green', legend='Second Reflection')
p3 = plot.line('x', 'y', source=source3, line_width=1, line_alpha=0.6, color='purple', legend='Third Reflection')
p4 = plot.line('x', 'y', source=source4, line_width=1, line_alpha=0.6, color='black', legend='Total Interference')
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change('value', update_title)
sec = 0.0
def update_data(attrname, old, new):
# Get the current slider values
a = amplitude.value
b = offset.value
w = sec
k = freq.value
# Generate the new curve
x = np.linspace(0, 12, N)
y = a*np.sin(k*x + w) + b
source.data = dict(x=x, y=y)
# First Reflection
x1 = np.linspace(12, 24, N)
y1 = np.flip(a*np.sin(k*x1 + w) + b)
source1.data = dict(x=x, y=y1)
# Second Reflection
x2 = np.linspace(24, 36, N)
y2 = a*np.sin(k*x2 + w) + b
source2.data = dict(x=x, y=y2)
# Third Reflection
x3 = np.linspace(36, 48, N)
y3 = np.flip(a*np.sin(k*x3 + w) + b)
source3.data = dict(x=x, y=y3)
# Total Interference
y4 = y + y1 + y2 + y3
source4.data = dict(x=x, y=y4)
def update_live():
global sec
# Get the current slider values
a = amplitude.value
b = offset.value
w = sec
sec -= speed.value
k = freq.value
# Generate the new curve
x = np.linspace(0, 12, N)
y = a*np.sin(k*x + w) + b
source.data = dict(x=x, y=y)
# First Reflection
x1 = np.linspace(12, 24, N)
y1 = np.flip(a*np.sin(k*x1 + w) + b)
source1.data = dict(x=x, y=y1)
# Second Reflection
x2 = np.linspace(24, 36, N)
y2 = a*np.sin(k*x2 + w) + b
source2.data = dict(x=x, y=y2)
# Third Reflection
x3 = np.linspace(36, 48, N)
y3 = np.flip(a*np.sin(k*x3 + w) + b)
source3.data = dict(x=x, y=y3)
# Total Interference
y4 = y + y1 + y2 + y3
source4.data = dict(x=x, y=y4)
if 0 in reflection.active:
p1.visible = True
else:
p1.visible = False
if 1 in reflection.active:
p2.visible = True
else:
p2.visible = False
if 2 in reflection.active:
p3.visible = True
else:
p3.visible = False
if 3 in reflection.active:
p4.visible = True
else:
p4.visible = False
for w in [offset, amplitude, freq]:
w.on_change('value', update_data)
# Set up layouts and add to document
inputs = column(children=[text, offset, amplitude, freq, speed, reflection], sizing_mode='stretch_width')
curdoc().add_periodic_callback(update_live, 100)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "Sound"
|
<reponame>birkin/reporting_project
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Django settings for reporting_project.
Environmental variables triggered in project's env_ts_rprt/bin/activate, when using runserver,
or env_ts_rprt/bin/activate_this.py, when using apache via passenger.
"""
import json, logging, os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['TS_RPRT__SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = json.loads( os.environ['TS_RPRT__DEBUG_JSON'] ) # will be True or False
ADMINS = json.loads( os.environ['TS_RPRT__ADMINS_JSON'] )
ALLOWED_HOSTS = json.loads( os.environ['TS_RPRT__ALLOWED_HOSTS'] ) # list
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tech_services_reports',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = json.loads( os.environ['TS_RPRT__TEMPLATES_JSON'] ) # list of dict(s)
WSGI_APPLICATION = 'config.passenger_wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = json.loads( os.environ['TS_RPRT__DATABASES_JSON'] )
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York' # was 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True # was False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = os.environ['TS_RPRT__STATIC_URL']
STATIC_ROOT = os.environ['TS_RPRT__STATIC_ROOT'] # needed for collectstatic command
# Email
EMAIL_HOST = os.environ['TS_RPRT__EMAIL_HOST']
EMAIL_PORT = int( os.environ['TS_RPRT__EMAIL_PORT'] )
# sessions
# <https://docs.djangoproject.com/en/1.10/ref/settings/#std:setting-SESSION_SAVE_EVERY_REQUEST>
# Thinking: not that many concurrent users, and no pages where session info isn't required, so overhead is reasonable.
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# logging
## disable module loggers
# existing_logger_names = logging.getLogger().manager.loggerDict.keys()
# print '- EXISTING_LOGGER_NAMES, `%s`' % existing_logger_names
logging.getLogger('requests').setLevel( logging.WARNING )
"""
logging reminder:
- loggers['processing'] will evaluate any log-message with a level higher than the specified log-level
- a log entry will only be _written_ to a file if the message's level meets a level set in a handler
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'webapp_handler': {
'level': os.environ['TS_RPRT__WEBAPP_LOG_LEVEL'],
'class':'logging.FileHandler',
'filename': os.environ['TS_RPRT__WEBAPP_LOG_PATH'],
'formatter': 'standard'
},
'processing_detail':{
'level': os.environ['TS_RPRT__PROCESSING_LOG_LEVEL'],
'class': 'logging.FileHandler',
'filename': os.environ['TS_RPRT__PROCESSING_LOG_PATH'],
'formatter': 'standard'
},
'processing_serious':{
'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': os.environ['TS_RPRT__PROCESSING_ERROR_LOG_PATH'],
'formatter': 'standard'
},
'console':{
'level':'WARNING',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'webapp': {
'handlers': ['webapp_handler'],
'level': os.environ['TS_RPRT__WEBAPP_LOG_LEVEL'],
'propagate': False
},
'processing': {
'handlers': ['console', 'processing_detail', 'processing_serious'],
'level': os.environ['TS_RPRT__PROCESSING_LOG_LEVEL'],
'propagate': False
},
}
}
# docs <https://docs.djangoproject.com/en/1.10/topics/cache/>
CACHES = json.loads( os.environ['TS_RPRT__CACHES_JSON'] )
CSRF_TRUSTED_ORIGINS = json.loads( os.environ['TS_RPRT__CSRF_TRUSTED_ORIGINS_JSON'] )
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepke.model import BasicModule, Embedding
class CNN(BasicModule):
def __init__(self, vocab_size, config):
super(CNN, self).__init__()
self.model_name = 'CNN'
self.vocab_size = vocab_size
self.word_dim = config.model.word_dim
self.pos_size = config.model.pos_size
self.pos_dim = config.model.pos_dim
self.hidden_dim = config.model.hidden_dim
self.dropout = config.model.dropout
self.use_pcnn = config.cnn.use_pcnn
self.out_channels = config.cnn.out_channels
self.kernel_size = config.cnn.kernel_size
self.out_dim = config.relation_type
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size]
for k in self.kernel_size:
assert k % 2 == 1, "kernel size has to be odd numbers."
self.embedding = Embedding(self.vocab_size, self.word_dim, self.pos_size, self.pos_dim)
# PCNN embedding
self.mask_embed = nn.Embedding(4, 3)
masks = torch.tensor([[0, 0, 0], [100, 0, 0], [0, 100, 0], [0, 0, 100]])
self.mask_embed.weight.data.copy_(masks)
self.mask_embed.weight.requires_grad = False
self.input_dim = self.word_dim + self.pos_dim * 2
self.convs = nn.ModuleList([
nn.Conv1d(in_channels=self.input_dim,
out_channels=self.out_channels,
kernel_size=k,
padding=k // 2,
bias=None) for k in self.kernel_size
])
self.conv_dim = len(self.kernel_size) * self.out_channels
if self.use_pcnn:
self.conv_dim *= 3
self.fc1 = nn.Linear(self.conv_dim, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.out_dim)
self.dropout = nn.Dropout(self.dropout)
def forward(self, input):
*x, mask = input
x = self.embedding(x)
mask_embed = self.mask_embed(mask)
# [B,L,C] -> [B,C,L]
x = torch.transpose(x, 1, 2)
# CNN
x = [F.leaky_relu(conv(x)) for conv in self.convs]
x = torch.cat(x, dim=1)
# mask
mask = mask.unsqueeze(1) # B x 1 x L
x = x.masked_fill_(mask.eq(0), float('-inf'))
if self.use_pcnn:
# triple max_pooling
x = x.unsqueeze(-1).permute(0, 2, 1, 3) # [B, L, C, 1]
mask_embed = mask_embed.unsqueeze(-2) # [B, L, 1, 3]
x = x + mask_embed # [B, L, C, 3]
x = torch.max(x, dim=1)[0] - 100 # [B, C, 3]
x = x.view(x.size(0), -1) # [B, 3*C]
else:
# max_pooling
x = F.max_pool1d(x, x.size(-1)).squeeze(-1) # [[B,C],..]
# droup
x = self.dropout(x)
# linear
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
return x
if __name__ == '__main__':
pass
|
<filename>rbac/ledger_sync/subscriber.py
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import client_event_pb2
from sawtooth_sdk.protobuf import events_pb2
from sawtooth_sdk.protobuf import transaction_receipt_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
from rbac.common import addresser
LOGGER = logging.getLogger(__name__)
class Subscriber(object):
"""Creates an object that can subscribe to state delta events using the
Sawtooth SDK's Stream class. Handler functions can be added prior to
subscribing, and each will be called on each delta event received.
"""
def __init__(self, validator_url):
LOGGER.info("Connecting to validator: %s", validator_url)
self._stream = Stream(validator_url)
self._delta_handlers = []
self._is_active = False
def add_handler(self, event_handler):
"""Adds a handler which will be passed state delta events when they
occur. Note that this event is mutable.
"""
self._delta_handlers.append(event_handler)
def clear_handlers(self):
"""Clears any delta handlers.
"""
self._delta_handlers = []
def start(self, known_ids=None):
"""Subscribes to state delta events, and then waits to receive deltas.
Sends any events received to delta handlers.
"""
self._stream.wait_for_ready()
LOGGER.debug("Subscribing to client state events")
request = client_event_pb2.ClientEventsSubscribeRequest(
last_known_block_ids=known_ids,
subscriptions=[
events_pb2.EventSubscription(event_type="sawtooth/block-commit"),
events_pb2.EventSubscription(
event_type="sawtooth/state-delta",
filters=[
events_pb2.EventFilter(
key="address",
match_string="^" + addresser.family.namespace + ".*",
filter_type=events_pb2.EventFilter.REGEX_ANY,
)
],
),
],
)
response_future = self._stream.send(
Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST, request.SerializeToString()
)
response = client_event_pb2.ClientEventsSubscribeResponse()
response.ParseFromString(response_future.result().content)
# Forked all the way back to genesis, restart with no known_ids
if (
known_ids
and response.status
== client_event_pb2.ClientEventsSubscribeResponse.UNKNOWN_BLOCK
):
return self.start()
if response.status != client_event_pb2.ClientEventsSubscribeResponse.OK:
raise RuntimeError(
"Subscription failed with status: {}".format(
client_event_pb2.ClientEventsSubscribeResponse.Status.Name(
response.status
)
)
)
self._is_active = True
LOGGER.debug("Successfully subscribed to state delta events")
while self._is_active:
message_future = self._stream.receive()
msg = message_future.result()
if msg.message_type == Message.CLIENT_EVENTS:
event_list = events_pb2.EventList()
event_list.ParseFromString(msg.content)
events = list(event_list.events)
event = StateDeltaEvent(events)
delta_count = len(event.state_changes)
if delta_count > 0:
for handler in self._delta_handlers:
handler(event)
def stop(self):
"""Stops the Subscriber, unsubscribing from state delta events and
closing the the stream's connection.
"""
self._is_active = False
LOGGER.debug("Unsubscribing from client events")
request = client_event_pb2.ClientEventsUnsubscribeResponse()
response_future = self._stream.send(
Message.CLIENT_EVENTS_UNSUBSCRIBE_REQUEST, request.SerializeToString()
)
response = client_event_pb2.ClientEventsUnsubscribeResponse()
response.ParseFromString(response_future.result().content)
if response.status != client_event_pb2.ClientEventsUnsubscribeResponse.OK:
LOGGER.warning(
"Failed to unsubscribe with status: %s",
client_event_pb2.ClientEventsUnsubscribeResponse.Status.Name(
response.status
),
)
self._stream.close()
class StateDeltaEvent:
def __init__(self, event_list):
"""
Convert an event list into an object that is similar to the previous
state delta event for compatibility.
Raises
KeyError
An event was missing from the event list or an attribute was
missing from an event.
"""
block_commit = self._get_event("sawtooth/block-commit", event_list)
self.block_id = self._get_attr(block_commit, "block_id")
self.block_num = self._get_attr(block_commit, "block_num")
self.previous_block_id = self._get_attr(block_commit, "previous_block_id")
self.state_root_hash = self._get_attr(block_commit, "state_root_hash")
try:
state_delta = self._get_event("sawtooth/state-delta", event_list)
state_change_list = transaction_receipt_pb2.StateChangeList()
state_change_list.ParseFromString(state_delta.data)
self.state_changes = state_change_list.state_changes
except KeyError:
self.state_changes = []
@staticmethod
def _get_attr(event, key):
attrs = list(filter(lambda attr: attr.key == key, event.attributes))
if attrs:
return attrs[0].value
raise KeyError("Key '%s' not found in event attributes" % key)
@staticmethod
def _get_event(event_type, event_list):
events = list(filter(lambda event: event.event_type == event_type, event_list))
if events:
return events[0]
raise KeyError("Event type '%s' not found" % event_type)
|
<reponame>hubbs5/public_drl_sc
# <NAME>
# 29.01.2018
# Update 16.05.2018
# Adjust the inventory portion of the state representation to be the inventory
# divided by the order required within the planning horizon.
# This function takes a network as an argument to generate schedules.
import numpy as np
from copy import copy
from ada.agents.rl_algos.rl_utils import torchToNumpy
from ada.environments.env_utils import get_current_state
def build_network_schedule(env, network, schedule=None):
try:
schedule = network_scheduler(env, network, schedule)
except ValueError:
schedule = network_scheduler(env, network, None)
return schedule
# State prediction to determine what the state will look like at a future date
# based solely on information available in the order book and the current
# schedule. This could be improved in the future to include ML/statistical
# predictions rather than simply a summation of orders in the books.
def predict_state(env, schedule=None):
# Get copy of state information
inv_prediction = env.inventory.copy()
# Get the last scheduled day
last_scheduled_hour = env.sim_time
if schedule is not None:
last_scheduled_hour = schedule[-1,
env.sched_indices['prod_start_time']]
# Extract unbooked production up to the schedule limit
pred_production = schedule[np.where(
(schedule[:,
env.sched_indices['cure_end_time']]<=last_scheduled_hour) &
(schedule[:,
env.sched_indices['booked_inventory']]==0))]
# Sum scheduled, unbooked production
un_prod, un_prod_id = np.unique(pred_production[:,
env.sched_indices['gmid']], return_inverse=True)
prod_pred_qty = np.bincount(un_prod_id,
pred_production[:,
env.sched_indices['prod_qty']])
# Sum off-grade production
pred_og_prod = pred_production[:,
env.sched_indices['off_grade_production']].sum()
# Add scheduled production
inv_prediction[un_prod.astype('int')] += prod_pred_qty
# Add off-grade
inv_prediction[0] += pred_og_prod
# Aggregate orders on the books
# Filtering by sim_time ensures all orders are already entered
# Filtering by last_scheduled_hour gives an inventory prediction
# for that specific day.
pred_orders = env.order_book[np.where(
(env.order_book[:,
env.ob_indices['doc_create_date']]<=env.sim_time) &
(env.order_book[:,
env.ob_indices['planned_gi_date']]<=last_scheduled_hour) &
(env.order_book[:,
env.ob_indices['shipped']]==0))]
un_order, un_order_id = np.unique(pred_orders[:,
env.ob_indices['material_code']], return_inverse=True)
order_pred_qty = np.bincount(un_order_id,
pred_orders[:, env.ob_indices['order_qty']])
# Calculate state prediction
# Note: this formulation ignores off-grade as the state
state_prediction = np.array([inv_prediction[i] / order_pred_qty[k] for
k, i in enumerate(un_order)])
state_pred = np.zeros(env.n_products)
# Subtract 1 from the index to ignore off-grade levels
state_pred[un_order-1] += state_prediction
# Include product to be produced in state prediction as one-hot vector
one_hot = np.zeros(env.n_products + 1)
if schedule is None:
current_prod = 0
# NOTE: Should current_prod be set on prod_end_time or prod_start_time?
else:
current_prod = schedule[
schedule[:,
env.sched_indices['prod_end_time']]==last_scheduled_hour,
env.sched_indices['gmid']].astype('int')
# Check to see if there is nothing scheduled i.e. in case of
# shut-down or start-up
if current_prod.size == 0:
current_prod = 0
one_hot[current_prod] = 1
state = np.hstack([one_hot, state_pred])
return state
# Generate schedule from policy network
def network_scheduler(env, network, schedule, confidence_level=None, test=False):
'''
Inputs
=========================================================================
env: productionFacility object
network: policy network object
schedule: numpy array containing schedule
confidence_level: float or None. If the max probability that comes
from the policy network is below the confidence_level, then the
schedule defaults to the heuristic.
'''
a_probs = []
heuristic_selection = []
predicted_state = []
# TODO: TEMPORARY BUG FIX!
# Sometimes schedule is an empty array and not None.
# Check here to see if it is an empty array, if so, set schedule = None
# In the future, see what is causing this problem upstream
# Note that this fails with shut_downs. Gives rise to dimensional
# mis-match during update (i.e. one less action than reward)
try:
if schedule.shape[0] < 1:
schedule = None
except AttributeError:
pass
# Get last scheduled day
if schedule is None:
planning_day = env.sim_time
else:
planning_day = np.max(schedule[:,
env.sched_indices['prod_end_time']])
planning_limit = env.sim_time + env.fixed_planning_horizon
while planning_day < planning_limit:
state = get_current_state(env, schedule=schedule,
day=planning_day)
action_probs = torchToNumpy(network.predict(state))
if action_probs.ndim > 1:
action_probs = np.squeeze(action_probs)
# nan should not appear in action probabilities
if any(np.isnan(action_probs)):
raise ValueError("nan found in action probability output. {}".format(
action_probs))
# Run heuristic
if confidence_level is not None and action_probs.max() < confidence_level:
action = heuristic_scheduler(env)
heuristic_selection.append([planning_day, 1])
elif test == True:
action = env.action_list[np.argmax(action_probs)]
heuristic_selection.append([planning_day, 0])
else:
action = np.random.choice(env.action_list, p=action_probs)
heuristic_selection.append([planning_day, 0])
a_probs.append(action_probs)
predicted_state.append(state)
schedule = env.append_schedule(schedule, action)
# TODO: this loop leaves actions and predicted_state with one less
# entry than it ought to in the presence of a production outtage over
# the course of an episode. This causes problems with data logging.
if planning_day < env.n_steps: # Log actions inside simulation horizon
if not planning_day in env.containers.planning_day:
env.containers.planning_day.append(planning_day)
env.containers.actions.append(int(action))
env.containers.predicted_state.append(state)
else:
idc = env.containers.planning_day.index(planning_day)
env.containers.actions[idc] = int(action)
env.containers.predicted_state[idc] = state
sched_end = np.max(schedule[:,env.sched_indices['prod_end_time']])
planning_day = sched_end
# Reshape probs and heuristics
if len(heuristic_selection) == 0 or len(a_probs) == 0:
# Occurs if schedule is unchanged
planning_data = None
else:
heuristic_selection = np.vstack(heuristic_selection)
a_probs = np.vstack(a_probs)
planning_data = np.hstack([heuristic_selection, a_probs])
return schedule, planning_data
# Get schedule value estimation
def estimate_schedule_value(env, network, schedule):
state = get_current_state(env, schedule=schedule,day=int(copy(env.sim_time)))
value_estimate = network.predict(state).item()
return value_estimate
# def estimate_schedule_value(env, network, schedule,
# value_estimate_array):
# current_day = int(copy(env.sim_time))
# planning_limit = env.sim_time + env.fixed_planning_horizon
# while current_day < planning_limit and current_day < env.n_steps:
# # Get the value estimate for each day from the current_day to the
# # planning horizon. Use predictions for future days as that is the
# # information the agent is making its decisions on.
# state = get_current_state(env, schedule=schedule,
# day=current_day)
# value_estimate_array[current_day] = network.predict(state).item()
# current_day += 1
# return value_estimate_array
# Generate schedule from policy network
def q_scheduler(env, network, schedule, epsilon):
'''
Inputs
=========================================================================
env: productionFacility object
network: policy network object
schedule: numpy array containing schedule
confidence_level: float or None. If the max probability that comes
from the policy network is below the confidence_level, then the
schedule defaults to the heuristic.
'''
q_vals = []
random_selection = []
predicted_state = []
try:
if schedule.shape[0] < 1:
schedule = None
except AttributeError:
pass
# Get last scheduled day
if schedule is None:
planning_day = env.sim_time
else:
planning_day = np.max(schedule[:,
env.sched_indices['prod_end_time']])
planning_limit = env.sim_time + env.planning_horizon
while planning_day < planning_limit:
state = env.get_current_state(schedule=schedule,
day=planning_day)
qvals = network.predict(state)
if np.random.random() < epsilon:
action = np.random.choice(env.action_list)
random_selection.append([planning_day, 1])
else:
action = np.argmax(qvals, dim=-1)
random_selection.append([planning_day, 0])
# nan should not appear in action probabilities
if any(np.isnan(action_probs)):
print(action_probs)
print("Output from last layer")
# TODO: Change output from TF to PT code
# print(network.sess.run(
# [network.hidden_dict[network.n_hidden_layers]],
# feed_dict={
# network.state: state
# })[0])
raise ValueError("nan found in state-action output.")
q_vals.append(qvals)
predicted_state.append(state)
schedule = env.append_schedule(schedule, action)
# Log actions if within simulation horizon to avoid dimension
# mismatch when updating network
if planning_day < env.n_steps:
if planning_day >= len(env.containers.actions):
env.containers.actions.append(int(action))
env.containers.predicted_state.append(state)
else:
env.containers.actions[int(planning_day)] = int(action)
env.containers.predicted_state[int(planning_day)] = state
sched_end = np.max(schedule[:,env.sched_indices['prod_end_time']])
planning_day = sched_end
# Reshape probs and heuristics
if len(random_selection) == 0 or len(q_vals) == 0:
# Occurs if schedule is unchanged
planning_data = None
else:
random_selection = np.vstack(random_selection)
q_vals = np.vstack(q_vals)
predicted_state_ = np.vstack(predicted_state)
planning_data = np.hstack([random_selection, q_vals,
predicted_state])
return schedule, planning_data, predicted_state_
|
<reponame>JRC1995/Continuous-RvNN
class optimizer_config:
def __init__(self):
# optimizer config
self.max_grad_norm = 1
self.batch_size = 128
self.train_batch_size = 128
self.dev_batch_size = 128
self.bucket_size_factor = 10
self.DataParallel = True
self.weight_decay = 1e-2
self.lr = 1e-3
self.epochs = 100
self.early_stop_patience = 10
self.scheduler_patience = 2
self.optimizer = "ranger"
self.save_by = "accuracy"
self.metric_direction = 1
class base_config(optimizer_config):
def __init__(self):
super().__init__()
# word embedding
self.word_embd_freeze = False
self.left_padded = False
# hidden size
self.embd_dim = 128
self.hidden_size = 128
self.cell_hidden_size = 4 * 128
self.small_d = 64
self.window_size = 5
self.global_state_return = True
self.recurrent_momentum = True
self.no_modulation = False
self.in_dropout = 0.3
self.hidden_dropout = 0.1
self.out_dropout = 0.2
self.penalty_gamma = 1.0
self.speed_gamma = 0.0
self.entropy_gamma = 0.01
self.stop_threshold = 0.01
self.hidden_activation = "gelu"
self.classifier_layer_num = 1
self.early_stopping = True
self.encoder = "FOCN"
class FOCN_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "FOCN"
self.model_name = "(FOCN)"
class FOCN_LSTM_config(base_config):
def __init__(self):
super().__init__()
self.in_dropout = 0.3
self.hidden_dropout = 0.1
self.out_dropout = 0.2
self.encoder = "FOCN_LSTM"
self.model_name = "(FOCN_LSTM)"
class LR_FOCN_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "LR_FOCN"
self.model_name = "(Left to Right FOCN)"
class FOCN_no_recurrency_bias_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "FOCN"
self.recurrent_momentum = False
self.model_name = "(FOCN NO RECURRENCY BIAS)"
class FOCN_no_modulation_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "FOCN"
self.no_modulation = True
self.model_name = "(FOCN NO MODULATION)"
class FOCN_no_gelu_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "FOCN"
self.hidden_activation = "relu"
self.model_name = "(FOCN NO GELU)"
class FOCN_no_entropy_config(base_config):
def __init__(self):
super().__init__()
self.encoder = "FOCN"
self.entropy_gamma = 0.0
self.model_name = "(FOCN NO ENTROPY)"
class ordered_memory_config(base_config):
def __init__(self):
super().__init__()
self.dropout = 0.1
self.output_last = True
self.left_padded = True
self.memory_dropout = 0.3
self.left_padded = True
self.memory_slots = 21
self.hidden_size = 128
self.bidirection = False
self.encoder = "ordered_memory"
self.model_name = "(ordered_memory)"
self.optimizer = "adam_"
self.weight_decay = 1.2e-6
|
import traceback
import wx, wx.xrc
from WikiExceptions import *
# from wxHelper import *
from . import MiscEvent
from .Utilities import DUMBTHREADSTOP
from .wxHelper import GUI_ID, XrcControls, autosizeColumn, wxKeyFunctionSink
from .WikiPyparsing import buildSyntaxNode
try:
from EnchantDriver import Dict
import EnchantDriver
except (AttributeError, ImportError, WindowsError):
import ExceptionLogger
ExceptionLogger.logOptionalComponentException(
"Initialize enchant driver (spell checking)")
Dict = None
# traceback.print_exc()
# WindowsError may happen if an incomplete enchant installation is found
# in the system
from DocPages import AliasWikiPage, WikiPage
from StringOps import uniToGui, guiToUni
class SpellCheckerDialog(wx.Dialog):
def __init__(self, parent, ID, mainControl, title=None,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.NO_3D):
d = wx.PreDialog()
self.PostCreate(d)
self.mainControl = mainControl
res = wx.xrc.XmlResource.Get()
res.LoadOnDialog(self, parent, "SpellCheckDialog")
if title is not None:
self.SetTitle(title)
# Create styled explanation
tfToCheck = wx.TextCtrl(self, GUI_ID.tfToCheck,
style=wx.TE_MULTILINE|wx.TE_RICH)
res.AttachUnknownControl("tfToCheck", tfToCheck, self)
tfReplaceWith = wx.TextCtrl(self, GUI_ID.tfReplaceWith, style=wx.TE_RICH)
res.AttachUnknownControl("tfReplaceWith", tfReplaceWith, self)
self.ctrls = XrcControls(self)
self.ctrls.btnCancel.SetId(wx.ID_CANCEL)
self.ctrls.lbReplaceSuggestions.InsertColumn(0, "Suggestion")
self.session = SpellCheckerSession(self.mainControl.getWikiDocument())
self.currentCheckedWord = None
self.currentStart = -1
self.currentEnd = -1
self.session.setCurrentDocPage(
self.mainControl.getActiveEditor().getLoadedDocPage())
# Fixes focus bug under Linux
self.SetFocus()
wx.EVT_BUTTON(self, GUI_ID.btnIgnore, self.OnIgnore)
wx.EVT_BUTTON(self, GUI_ID.btnIgnoreAll, self.OnIgnoreAll)
wx.EVT_BUTTON(self, GUI_ID.btnReplace, self.OnReplace)
wx.EVT_BUTTON(self, GUI_ID.btnReplaceAll, self.OnReplaceAll)
wx.EVT_BUTTON(self, GUI_ID.btnAddWordGlobal, self.OnAddWordGlobal)
wx.EVT_BUTTON(self, GUI_ID.btnAddWordLocal, self.OnAddWordLocal)
wx.EVT_BUTTON(self, wx.ID_CANCEL, self.OnClose)
wx.EVT_CLOSE(self, self.OnClose)
# EVT_LISTBOX(self, GUI_ID.lbReplaceSuggestions,
# self.OnLbReplaceSuggestions)
wx.EVT_LIST_ITEM_SELECTED(self, GUI_ID.lbReplaceSuggestions,
self.OnLbReplaceSuggestions)
wx.EVT_CHAR(self.ctrls.tfReplaceWith, self.OnCharReplaceWith)
wx.EVT_CHAR(self.ctrls.lbReplaceSuggestions, self.OnCharReplaceSuggestions)
def _showInfo(self, msg):
"""
Set dialog controls to show an info/error message
"""
self.ctrls.tfToCheck.SetValue("")
# Show message in blue
self.ctrls.tfToCheck.SetDefaultStyle(wx.TextAttr(wx.BLUE))
self.ctrls.tfToCheck.AppendText(uniToGui(msg))
self.ctrls.tfToCheck.SetDefaultStyle(wx.TextAttr(wx.BLACK))
# To scroll text to beginning
self.ctrls.tfToCheck.SetInsertionPoint(0)
self.ctrls.tfReplaceWith.SetValue(u"")
self.ctrls.lbReplaceSuggestions.DeleteAllItems()
self.ctrls.tfReplaceWith.SetFocus()
def checkNext(self, startPos=0):
activeEditor = self.mainControl.getActiveEditor()
startWikiWord = self.mainControl.getCurrentWikiWord()
if startWikiWord is None:
# No wiki loaded or no wiki word in editor
self._showInfo(
_(u"No wiki open or current page is a functional page"))
return False
startWikiWord = self.mainControl.getWikiDocument()\
.getWikiPageNameForLinkTermOrAsIs(startWikiWord)
firstCheckedWikiWord = startWikiWord
if not self.mainControl.getWikiDocument().isDefinedWikiPageName(
firstCheckedWikiWord):
# This can happen if startWikiWord is a newly created, not yet
# saved page
if not self.ctrls.cbGoToNextPage.GetValue():
self._showInfo(_(u"Current page is not modified yet"))
return False
firstCheckedWikiWord = self.session.findAndLoadNextWikiPage(None,
firstCheckedWikiWord)
if firstCheckedWikiWord is None:
self._showInfo(_(u"No (more) misspelled words found"))
return False
else:
self.session.setCurrentDocPage(
self.mainControl.getWikiDocument().getWikiPage(
firstCheckedWikiWord))
if not self.session.hasEnchantDict():
if firstCheckedWikiWord == startWikiWord:
self._showInfo(_(u"No dictionary found for this page"))
return False # No dictionary
checkedWikiWord = firstCheckedWikiWord
langHelper = wx.GetApp().createWikiLanguageHelper(
self.session.getCurrentDocPage().getWikiLanguageName())
text = activeEditor.GetText()
self.ctrls.tfToCheck.SetValue("")
while True:
start, end, spWord = langHelper.findNextWordForSpellcheck(text,
startPos, self.session.getCurrentDocPage())
if start is None:
# End of page reached
if self.ctrls.cbGoToNextPage.GetValue():
checkedWikiWord = self.session.findAndLoadNextWikiPage(
firstCheckedWikiWord, checkedWikiWord)
if checkedWikiWord is None:
self._showInfo(_(u"No (more) misspelled words found"))
return False
text = self.mainControl.getWikiDocument()\
.getWikiPage(checkedWikiWord).getLiveText()
startPos = 0
continue
else:
self._showInfo(_(u"No (more) misspelled words found"))
return False
if self.session.checkWord(spWord):
# Ignore if word is in the ignore lists or is seen as correct
# by the spell checker
startPos = end
continue
if self.session.getAutoReplaceWords().has_key(spWord):
activeEditor.showSelectionByCharPos(start, end)
activeEditor.ReplaceSelection(
self.session.getAutoReplaceWords()[spWord])
startPos = activeEditor.GetSelectionCharPos()[1]
continue
break
if startWikiWord != checkedWikiWord:
# The search went on to another word, so load it into editor
self.mainControl.openWikiPage(checkedWikiWord)
self.currentCheckedWord = spWord
self.currentStart = start
self.currentEnd = end
activeEditor.showSelectionByCharPos(start, end)
conStart = max(0, start - 30)
contextPre = text[conStart:start]
contextPost = text[end:end+60]
contextPre = contextPre.split(u"\n")[-1]
contextPost = contextPost.split(u"\n", 1)[0]
# Show misspelled word in context
self.ctrls.tfToCheck.SetDefaultStyle(wx.TextAttr(wx.BLACK))
self.ctrls.tfToCheck.AppendText(contextPre)
self.ctrls.tfToCheck.SetDefaultStyle(wx.TextAttr(wx.RED))
self.ctrls.tfToCheck.AppendText(spWord)
self.ctrls.tfToCheck.SetDefaultStyle(wx.TextAttr(wx.BLACK))
self.ctrls.tfToCheck.AppendText(contextPost)
# To scroll text to beginning
self.ctrls.tfToCheck.SetInsertionPoint(0)
# List suggestions
sugglist = self.session.suggest(spWord)
self.ctrls.lbReplaceSuggestions.DeleteAllItems()
for s in sugglist:
self.ctrls.lbReplaceSuggestions.InsertStringItem(
self.ctrls.lbReplaceSuggestions.GetItemCount(), s)
# self.ctrls.lbReplaceSuggestions.SetColumnWidth(0, wx.LIST_AUTOSIZE)
autosizeColumn(self.ctrls.lbReplaceSuggestions, 0)
if len(sugglist) > 0:
self.ctrls.tfReplaceWith.SetValue(uniToGui(sugglist[0]))
else:
self.ctrls.tfReplaceWith.SetValue(uniToGui(spWord))
self.ctrls.tfReplaceWith.SetFocus()
return True
def OnClose(self, evt):
self.mainControl.spellChkDlg = None
self.session.close()
self.session = None
self.Destroy()
def OnIgnore(self, evt):
s, e = self.mainControl.getActiveEditor().GetSelectionCharPos()
self.checkNext(e)
def OnIgnoreAll(self, evt):
self.session.addIgnoreWordSession(self.currentCheckedWord)
self.OnIgnore(None)
def OnReplace(self, evt):
activeEditor = self.mainControl.getActiveEditor()
repl = guiToUni(self.ctrls.tfReplaceWith.GetValue())
if repl != self.currentCheckedWord:
activeEditor.ReplaceSelection(repl)
s, e = self.mainControl.getActiveEditor().GetSelectionCharPos()
self.checkNext(e)
def OnReplaceAll(self, evt):
self.session.addAutoReplace(self.currentCheckedWord,
guiToUni(self.ctrls.tfReplaceWith.GetValue()))
self.OnReplace(None)
def _getReplSuggSelect(self):
return self.ctrls.lbReplaceSuggestions.GetNextItem(-1,
state=wx.LIST_STATE_SELECTED)
def OnLbReplaceSuggestions(self, evt):
sel = self._getReplSuggSelect()
if sel == -1:
return
sel = self.ctrls.lbReplaceSuggestions.GetItemText(sel)
self.ctrls.tfReplaceWith.SetValue(sel)
def OnAddWordGlobal(self, evt):
"""
Add word globally (application-wide)
"""
self.session.addIgnoreWordGlobal(self.currentCheckedWord)
self.OnIgnore(None)
def OnAddWordLocal(self, evt):
"""
Add word locally (wiki-wide)
"""
self.session.addIgnoreWordLocal(self.currentCheckedWord)
self.OnIgnore(None)
def OnCharReplaceWith(self, evt):
if (evt.GetKeyCode() == wx.WXK_DOWN) and \
not self.ctrls.lbReplaceSuggestions.GetItemCount() == 0:
self.ctrls.lbReplaceSuggestions.SetFocus()
self.ctrls.lbReplaceSuggestions.SetItemState(0,
wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED,
wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED)
self.OnLbReplaceSuggestions(None)
elif (evt.GetKeyCode() == wx.WXK_UP):
pass
else:
evt.Skip()
def OnCharReplaceSuggestions(self, evt):
if (evt.GetKeyCode() == wx.WXK_UP) and \
(self._getReplSuggSelect() == 0):
self.ctrls.tfReplaceWith.SetFocus()
self.ctrls.lbReplaceSuggestions.SetItemState(0, 0,
wx.LIST_STATE_SELECTED)
else:
evt.Skip()
class SpellCheckerSession(MiscEvent.MiscEventSourceMixin):
def __init__(self, wikiDocument):
MiscEvent.MiscEventSourceMixin.__init__(self)
self.wikiDocument = wikiDocument
self.currentDocPage = None
self.enchantDict = None
self.dictLanguage = None
# For current session
self.autoReplaceWords = {}
self.spellChkIgnore = set() # set of words to ignore during spell checking
# For currently open dict file (if any)
self.spellChkAddedGlobal = None
self.globalPwlPage = None
self.spellChkAddedLocal = None
self.localPwlPage = None
self.__sinkWikiDocument = wxKeyFunctionSink((
("reread personal word list needed",
self.onRereadPersonalWordlistNeeded),
), self.wikiDocument.getMiscEvent())
self.__sinkApp = wxKeyFunctionSink((
("reread personal word list needed",
self.onRereadPersonalWordlistNeeded),
), wx.GetApp().getMiscEvent())
# self.currentDocPage = self.mainControl.getActiveEditor().getLoadedDocPage()
#
# self._refreshDictionary()
def close(self):
"""
Prepare for destruction
"""
# We need to delete (all?) these references otherwise we get a small
# (but noticable) memory leak when calling cloneForThread
self.enchantDict = None
self.dictLanguage = None
self.globalPwlPage = None
self.spellChkAddedGlobal = None
self.spellChkAddedLocal = None
self.localPwlPage = None
self.wikiDocument = None
self.__sinkWikiDocument.disconnect()
self.__sinkApp.disconnect()
def cloneForThread(self):
"""
Generates a clone which can be run in a different thread independently
of other clones.
"""
result = SpellCheckerSession(self.wikiDocument)
result.currentDocPage = self.currentDocPage
# For current session
result.autoReplaceWords = self.autoReplaceWords
result.spellChkIgnore = self.spellChkIgnore
result.dictLanguage = self.dictLanguage
result.enchantDict = self.enchantDict # Thread safety??? Dict(self.dictLanguage)
# For currently open dict file (if any)
result.spellChkAddedGlobal = self.spellChkAddedGlobal
result.globalPwlPage = self.globalPwlPage
result.spellChkAddedLocal = self.spellChkAddedLocal
result.localPwlPage = self.localPwlPage
return result
def getCurrentDocPage(self):
return self.currentDocPage
def setCurrentDocPage(self, docPage):
self.currentDocPage = docPage
self._refreshDictionary() # TODO Make faster?
def hasEnchantDict(self):
return not self.enchantDict is None
def _refreshDictionary(self):
"""
Creates the enchant spell checker object
"""
docPage = self.currentDocPage # self.mainControl.getActiveEditor().getLoadedDocPage()
if not isinstance(docPage, (AliasWikiPage, WikiPage)):
return # No support for functional pages
lang = docPage.getAttributeOrGlobal(u"language", self.dictLanguage)
try:
if lang == u"":
raise EnchantDriver.DictNotFoundError()
if lang != self.dictLanguage:
self.enchantDict = Dict(lang)
self.dictLanguage = lang
self.rereadPersonalWordLists()
except (UnicodeEncodeError, EnchantDriver.DictNotFoundError):
self.enchantDict = None
self.dictLanguage = None
self.globalPwlPage = None
self.spellChkAddedGlobal = None
self.spellChkAddedLocal = None
self.localPwlPage = None
def onRereadPersonalWordlistNeeded(self, miscevt):
self.rereadPersonalWordLists()
self.fireMiscEventKeys(("modified spell checker session",))
def rereadPersonalWordLists(self):
self.globalPwlPage = self.wikiDocument.getFuncPage("global/PWL")
self.spellChkAddedGlobal = \
set(self.globalPwlPage.getLiveText().split("\n"))
self.localPwlPage = self.wikiDocument.getFuncPage("wiki/PWL")
self.spellChkAddedLocal = \
set(self.localPwlPage.getLiveText().split("\n"))
def findAndLoadNextWikiPage(self, firstCheckedWikiWord, checkedWikiWord):
while True:
#Go to next page
nw = self.wikiDocument.getWikiData().getNextWikiPageName(
checkedWikiWord)
if nw is None:
nw = self.wikiDocument.getWikiData().getFirstWikiPageName()
if nw is None or nw == firstCheckedWikiWord:
# Something went wrong or we are where we started
return None
checkedWikiWord = nw
if firstCheckedWikiWord is None:
# To avoid infinite loop
firstCheckedWikiWord = checkedWikiWord
self.setCurrentDocPage(self.wikiDocument.getWikiPage(checkedWikiWord))
if self.enchantDict is None:
# This page has no defined language or dictionary not available
continue
else:
# Success
return checkedWikiWord
def checkWord(self, spWord):
return spWord in self.spellChkIgnore or \
spWord in self.spellChkAddedGlobal or \
spWord in self.spellChkAddedLocal or \
(self.enchantDict is not None and \
self.enchantDict.check(spWord))
def suggest(self, spWord):
if self.enchantDict is None:
return []
return self.enchantDict.suggest(spWord)
def getAutoReplaceWords(self):
return self.autoReplaceWords
def addAutoReplace(self, fromWord, toWord):
self.autoReplaceWords[fromWord] = toWord
def resetIgnoreListSession(self):
"""
Clear the list of words to ignore for this session.
"""
self.spellChkIgnore.clear()
self.fireMiscEventKeys(("modified spell checker session",))
def addIgnoreWordSession(self, spWord):
self.spellChkIgnore.add(spWord)
# For global and local ignores the changed FuncPage automatically
# issues an event which triggers a reread of the word lists
# and sends another event that session was modified.
# For the session ignore list this must be done here explicitly
self.fireMiscEventKeys(("modified spell checker session",))
def addIgnoreWordGlobal(self, spWord):
"""
Add spWord globally (application-wide)
"""
if self.spellChkAddedGlobal is None:
return # TODO When does this happen?
self.spellChkAddedGlobal.add(spWord)
words = list(self.spellChkAddedGlobal)
self.wikiDocument.getCollator().sort(words)
self.globalPwlPage.replaceLiveText(u"\n".join(words))
def addIgnoreWordLocal(self, spWord):
"""
Add spWord locally (wiki-wide)
"""
if self.spellChkAddedLocal is None:
return # TODO When does this happen?
self.spellChkAddedLocal.add(spWord)
words = list(self.spellChkAddedLocal)
self.wikiDocument.getCollator().sort(words)
self.localPwlPage.replaceLiveText(u"\n".join(words))
def buildUnknownWordList(self, text, threadstop=DUMBTHREADSTOP):
if not self.hasEnchantDict():
return buildSyntaxNode([], -1, "unknownSpellList")
docPage = self.getCurrentDocPage()
if docPage is None:
return buildSyntaxNode([], -1, "unknownSpellList")
result = []
langHelper = wx.GetApp().createWikiLanguageHelper(
docPage.getWikiLanguageName())
startPos = 0
while True:
threadstop.testValidThread()
start, end, spWord = langHelper.findNextWordForSpellcheck(text,
startPos, docPage)
if start is None:
# End of page reached
return buildSyntaxNode(result, -1, "unknownSpellList")
startPos = end
if self.checkWord(spWord):
# Ignore if word is in the ignore lists or is seen as correct
# by the spell checker
continue
# Word is unknown -> add to result
# It is added as a WikiPyparsing.TerminalNode
result.append(buildSyntaxNode(spWord, start, "unknownSpelling"))
def isSpellCheckSupported():
return Dict is not None
|
"""
To test the performance of Fine Tuned ResNet-50
"""
from __future__ import print_function
#To reduce verbosity
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']= '3'
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, TensorBoard
import resnet
from time import time
import pickle
import tensorflow as tf
import numpy as np
from tensorflow import FixedLenFeature
from tensorflow import keras
import keras.backend as K
from itertools import product
from functools import partial
import sklearn.metrics
from keras.callbacks import ModelCheckpoint
#For FineTuning ResNet50
from tensorflow.keras.applications.resnet50 import ResNet50
print(tf.__version__)
class Metrics(keras.callbacks.Callback):
"""
Implementation of custom metrics: Precision, Recall, F-Measure and Confusion Matrix
"""
def on_train_begin(self, logs={}):
self._data = []
def on_epoch_end(self, batch, logs={}):
X_val, y_val = self.validation_data[0], self.validation_data[1]
y_predict = np.asarray(model.predict(X_val, steps = 1))
with tf.Session() as sess:
y_val = (sess.run(y_val))
y_val = np.argmax(y_val, axis=1)
y_predict = np.argmax(y_predict, axis=1)
print("\nMetrics for Epoch")
print("Confusion Matrix:\n",sklearn.metrics.confusion_matrix(y_val,y_predict))
print("Recall: ", sklearn.metrics.recall_score(y_val,y_predict, pos_label = 1))
print("Precision: ", sklearn.metrics.precision_score(y_val,y_predict, pos_label=1))
print("F1_score: ", sklearn.metrics.f1_score(y_val,y_predict,pos_label=1))
print("ROC_AUC_score: ", sklearn.metrics.roc_auc_score(y_val,y_predict))
print("\n")
self._data.append({
'val_recall': sklearn.metrics.recall_score(y_val, y_predict, pos_label=1),
'val_precision': sklearn.metrics.precision_score(y_val, y_predict, pos_label=1),
'val_f1_score': sklearn.metrics.f1_score(y_val,y_predict, pos_label = 1),
'val_roc_auc_score': sklearn.metrics.roc_auc_score(y_val,y_predict)
})
return
def get_data(self):
return self._data
def _parse_function(proto):
"""
Parser for TFRecord file
"""
keys_to_features = {'train/image': tf.FixedLenFeature([], tf.string),
'train/label': tf.FixedLenFeature([], tf.int64)}
parsed_features = tf.parse_single_example(proto, keys_to_features)
parsed_features['train/image'] = tf.decode_raw(parsed_features['train/image'], tf.float32)
return parsed_features['train/image'], parsed_features["train/label"]
def create_dataset(filepath, batch_size, shuffle, augmentfilepath, augment, addPosPath, addPos):
"""
Reads TFRecord and creates the dataset. Returns image and label dataset as tensors.
"""
dataset = tf.data.TFRecordDataset(filepath)
#If want to add augmented dataset, put augmentfilepath
if augment is True:
augmented = tf.data.TFRecordDataset(augmentfilepath)
dataset = dataset.concatenate(augmented)
#If want to add positive only dataset, put addPosPath
if addPos is True:
added = tf.data.TFRecordDataset(addPosPath)
dataset = dataset.concatenate(added)
dataset = dataset.map(_parse_function,num_parallel_calls=8)
dataset = dataset.repeat()
if shuffle is True:
dataset = dataset.shuffle(5000)
dataset = dataset.shuffle(800)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
image, label = iterator.get_next()
#Image reshaped to 224x224x3 to match ImageNet dataset
image = tf.reshape(image, [-1,224,224,3])
image = tf.cast(image, tf.float32)
label = tf.one_hot(label, 2)
return image, label
def w_categorical_crossentropy(y_true, y_pred, weights):
"""
Implementation of Weighted Categorical Crossentropy Function for unbalanced datasets
"""
nb_cl = len(weights)
final_mask = K.zeros_like(y_pred[:, 0])
y_pred_max = K.max(y_pred, axis=1)
y_pred_max = K.expand_dims(y_pred_max, 1)
y_pred_max_mat = K.equal(y_pred, y_pred_max)
for c_p, c_t in product(range(nb_cl), range(nb_cl)):
final_mask += (K.cast(weights[c_t, c_p],K.floatx()) * K.cast(y_pred_max_mat[:, c_p] ,K.floatx())* K.cast(y_true[:, c_t],K.floatx()))
return K.categorical_crossentropy(y_true, y_pred) * final_mask
ValImage, ValLabel = create_dataset('/home/mudit/Skin Lesion Classification/TFrecord_Datasets/Imagenet/Melanoma_Validation_Imagenet.tfrecords',150, False,'',False, '', False)
TestImage, TestLabel = create_dataset('/home/mudit/Skin Lesion Classification/TFrecord_Datasets/Imagenet/Melanoma_Test_Imagenet.tfrecords',150, False,'',False, '', False)
TestCSV = np.genfromtxt('ISIC-2017_Validation_Part3_GroundTruth.csv', delimiter=',', usecols=(1), skip_header=1)
TestCSV = TestCSV.tolist()
TestCSV = [1 if i == 1.0 else 0 for i in TestCSV]
IMSIZE = 224
nb_classes = 2
#Weights for Weighted loss function
w_array = np.ones((2,2))
w_array[1,0] = 6
w_array[0,1] = 1
print(w_array)
ncce = partial(w_categorical_crossentropy,weights=w_array)
metrics = Metrics()
input_tensor = keras.layers.Input(shape = (224,224,3))
#Loading model
model = ResNet50(input_tensor= input_tensor,weights='imagenet',include_top=False)
for layer in model.layers[:-33]:
layer.trainable = False
x = model.output
x= tf.keras.layers.GlobalAveragePooling2D(data_format='channels_last')(x)
x=tf.keras.layers.Dense(nb_classes,activation ='softmax')(x)
model = tf.keras.models.Model(model.input, x)
model.compile(loss=ncce,
optimizer=tf.keras.optimizers.SGD(lr=0.0001, momentum =0.9),
metrics=['accuracy'])
#Printing test results
print(model.summary())
model.load_weights('weights_resnet012.h5')
Preds = np.asarray(model.predict(ValImage, steps = 1))
Preds = np.argmax(Preds, axis = 1)
print(len(Preds))
print("F1_score: ", sklearn.metrics.f1_score(TestCSV,Preds,pos_label=1))
print("ROC_AUC_score: ", sklearn.metrics.roc_auc_score(TestCSV,Preds))
|
<gh_stars>10-100
from . import SubCommand
from .common import *
from media_management_scripts.tvdb_api import TVDB
from media_management_scripts.utils import create_metadata_extractor
from media_management_scripts.support.metadata import Metadata
from media_management_scripts.support.episode_finder import extract
from media_management_scripts.renamer import rename_process
from typing import Dict
from difflib import SequenceMatcher
def _get_season_episode(tvdb_episode, use_dvd=False):
if use_dvd:
season = tvdb_episode['dvdSeason']
episode_num = tvdb_episode['dvdEpisodeNumber']
else:
season = tvdb_episode['airedSeason']
episode_num = tvdb_episode['airedEpisodeNumber']
return int(season), int(episode_num)
def _map_metadata(input_files, meta_shelve=None) -> Dict[str, Metadata]:
extractor = create_metadata_extractor()
ret = {}
for file in input_files:
if meta_shelve and file in meta_shelve:
ret[file] = meta_shelve[file]
elif meta_shelve:
ret[file] = meta_shelve[file] = extractor.extract(file)
else:
ret[file] = extractor.extract(file)
return ret
class ItunesCommand(SubCommand):
@property
def name(self):
return 'itunes'
def build_argparse(self, subparser):
itunes_parser = subparser.add_parser('itunes', parents=[parent_parser],
help='Attempts to rename iTunes episodes to the standard Plex format.')
itunes_parser.add_argument('-o', '--output', type=str, default='./', help='Directory to output to')
itunes_parser.add_argument('--meta-shelve', type=str, default=None, dest='meta_shelve',
help='A file to hold metadata information for faster subsequent runs on the same files')
itunes_parser.add_argument('input', nargs='+', help='Input files')
itunes_parser.add_argument('--dvd', action='store_const', default=False, const=True, help='Use DVD ordering')
itunes_parser.add_argument('--fuzzy', action='store_const', default=False, const=True,
help='Use fuzzy matching. Enables this uses less strict exact episode name matching.')
def subexecute(self, ns):
from media_management_scripts.tvdb_api import from_config
import os
input_to_cmd = ns['input']
tvdb = from_config()
output = ns['output']
dvd = ns['dvd']
fuzzy = ns['fuzzy']
meta_shelve = ns['meta_shelve']
dry_run = ns['dry_run']
if meta_shelve:
import shelve
with shelve.open(meta_shelve) as meta_store:
self.process_itunes_tv(input_to_cmd, output, tvdb, meta_shelve=meta_store, use_dvd=dvd, fuzzy=fuzzy,
dry_run=dry_run)
else:
self.process_itunes_tv(input_to_cmd, output, tvdb, meta_shelve=None, use_dvd=dvd, fuzzy=fuzzy,
dry_run=dry_run)
def _find_match(self, metadata: Metadata, episodes, use_dvd=False, fuzzy=False):
date = metadata.tags['date'].split('T')[0]
title = metadata.tags['title']
fuzzy_match = 0
fuzzy_episode = None
for ep in episodes:
ep_name = ep.get('episodeName', None)
ep_date = ep.get('firstAired', None)
if ep_date == date:
if title.lower() == ep_name.lower():
return ep
elif fuzzy:
ratio = SequenceMatcher(None, title, ep_name).ratio()
if ratio >= .85 and ratio > fuzzy_match:
fuzzy_episode = ep
fuzzy_match = ratio
if not fuzzy_episode:
if 'season_number' in metadata.tags and 'episode_sort' in metadata.tags:
season = metadata.tags['season_number']
ep_num = metadata.tags['episode_sort']
for ep in episodes:
tvdb_season, tvdb_ep_num = _get_season_episode(ep, use_dvd)
if tvdb_season == season and tvdb_ep_num == ep_num:
return ep
if not fuzzy_episode:
season, ep_num, part = extract(title)
if part is None:
for ep in episodes:
tvdb_season, tvdb_ep_num = _get_season_episode(ep, use_dvd)
if tvdb_season == season and tvdb_ep_num == ep_num:
return ep
return fuzzy_episode
def _output(self, matched, not_matched, output_dir, dry_run):
table = []
for file, params in matched.items():
new_name = rename_process(
'{plex}', files=[file],
output_dir=output_dir,
params=params)[0][1]
table.append((file, new_name))
table.sort(key=lambda s: s[1])
header = ('Input', 'Output')
self._bulk_move(table, header, src_index=0, dest_index=1, print_table=True)
if not_matched:
print('Not Matched:')
for file in not_matched:
print(' {}'.format(file))
def process_itunes_tv(self, input_files, output_dir, tvdb: TVDB, meta_shelve=None, use_dvd=False, fuzzy=False,
dry_run=True):
metadata_map = _map_metadata(input_files, meta_shelve)
series_name = {value.tags['show'] for value in metadata_map.values()}
if len(series_name) != 1:
raise Exception('Input files have different shows: {}'.format(series_name))
series_name = series_name.pop()
series_id = tvdb.get_series_id(series_name)
tvdb_episodes = tvdb.get_episodes(series_id)
matched = {}
not_matched = []
for file, metadata in metadata_map.items():
episode = self._find_match(metadata, tvdb_episodes, use_dvd, fuzzy)
if episode:
season, episode_num = _get_season_episode(episode, use_dvd)
params = {
'show': series_name,
'season': season,
'ep_num': episode_num,
'name': episode['episodeName']
}
matched[file] = params
else:
not_matched.append(file)
self._output(matched, not_matched, output_dir, dry_run)
SubCommand.register(ItunesCommand)
|
<reponame>Adeikalam/projet_trading_auto<gh_stars>1-10
import os
import re
import sys
import warnings
from colorsys import hls_to_rgb, rgb_to_hls
from itertools import cycle, combinations
from functools import partial
from typing import Callable, List, Union
import numpy as np
import pandas as pd
from bokeh.colors import RGB
from bokeh.colors.named import (
lime as BULL_COLOR,
tomato as BEAR_COLOR
)
from bokeh.plotting import figure as _figure
from bokeh.models import (
CrosshairTool,
CustomJS,
ColumnDataSource,
NumeralTickFormatter,
Span,
HoverTool,
Range1d,
DatetimeTickFormatter,
FuncTickFormatter,
WheelZoomTool,
LinearColorMapper,
)
from bokeh.io import output_notebook, output_file, show
from bokeh.io.state import curstate
from bokeh.layouts import gridplot
from bokeh.palettes import Category10
from bokeh.transform import factor_cmap
from backtesting._util import _data_period, _as_list, _Indicator
with open(os.path.join(os.path.dirname(__file__), 'autoscale_cb.js'),
encoding='utf-8') as _f:
_AUTOSCALE_JS_CALLBACK = _f.read()
IS_JUPYTER_NOTEBOOK = 'JPY_PARENT_PID' in os.environ
if IS_JUPYTER_NOTEBOOK:
warnings.warn('Jupyter Notebook detected. '
'Setting Bokeh output to notebook. '
'This may not work in Jupyter clients without JavaScript '
'support (e.g. PyCharm, Spyder IDE). '
'Reset with `backtesting.set_bokeh_output(notebook=False)`.')
output_notebook()
def set_bokeh_output(notebook=False):
"""
Set Bokeh to output either to a file or Jupyter notebook.
By default, Bokeh outputs to notebook if running from within
notebook was detected.
"""
global IS_JUPYTER_NOTEBOOK
IS_JUPYTER_NOTEBOOK = notebook
def _windos_safe_filename(filename):
if sys.platform.startswith('win'):
return re.sub(r'[^a-zA-Z0-9,_-]', '_', filename.replace('=', '-'))
return filename
def _bokeh_reset(filename=None):
curstate().reset()
if filename:
if not filename.endswith('.html'):
filename += '.html'
output_file(filename, title=filename)
elif IS_JUPYTER_NOTEBOOK:
curstate().output_notebook()
def colorgen():
yield from cycle(Category10[10])
def lightness(color, lightness=.94):
rgb = np.array([color.r, color.g, color.b]) / 255
h, _, s = rgb_to_hls(*rgb)
rgb = np.array(hls_to_rgb(h, lightness, s)) * 255
return RGB(*rgb)
_MAX_CANDLES = 10_000
def _maybe_resample_data(resample_rule, df, indicators, equity_data, trades):
if isinstance(resample_rule, str):
freq = resample_rule
else:
if resample_rule is False or len(df) <= _MAX_CANDLES:
return df, indicators, equity_data, trades
from_index = dict(day=-2, hour=-6, minute=1, second=0, millisecond=0,
microsecond=0, nanosecond=0)[df.index.resolution]
FREQS = ('1T', '5T', '10T', '15T', '30T', '1H', '2H', '4H', '8H', '1D', '1W', '1M')
freq = next((f for f in FREQS[from_index:]
if len(df.resample(f)) <= _MAX_CANDLES), FREQS[-1])
warnings.warn(f"Data contains too many candlesticks to plot; downsampling to {freq!r}. "
"See `Backtest.plot(resample=...)`")
from .lib import OHLCV_AGG, TRADES_AGG, _EQUITY_AGG
df = df.resample(freq, label='right').agg(OHLCV_AGG).dropna()
indicators = [_Indicator(i.df.resample(freq, label='right').mean()
.dropna().reindex(df.index).values.T,
**dict(i._opts, name=i.name,
# Replace saved index with the resampled one
index=df.index))
for i in indicators]
assert not indicators or indicators[0].df.index.equals(df.index)
equity_data = equity_data.resample(freq, label='right').agg(_EQUITY_AGG).dropna(how='all')
assert equity_data.index.equals(df.index)
def _weighted_returns(s, trades=trades):
df = trades.loc[s.index]
return ((df['Size'].abs() * df['ReturnPct']) / df['Size'].abs().sum()).sum()
def _group_trades(column):
def f(s, new_index=df.index.astype(np.int64), bars=trades[column]):
if s.size:
# Via int64 because on pandas recently broken datetime
mean_time = int(bars.loc[s.index].view('i8').mean())
new_bar_idx = new_index.get_loc(mean_time, method='nearest')
return new_bar_idx
return f
if len(trades): # Avoid pandas "resampling on Int64 index" error
trades = trades.assign(count=1).resample(freq, on='ExitTime', label='right').agg(dict(
TRADES_AGG,
ReturnPct=_weighted_returns,
count='sum',
EntryBar=_group_trades('EntryTime'),
ExitBar=_group_trades('ExitTime'),
)).dropna()
return df, indicators, equity_data, trades
def plot(*, results: pd.Series,
df: pd.DataFrame,
indicators: List[_Indicator],
filename='', plot_width=None,
plot_equity=True, plot_return=False, plot_pl=True,
plot_volume=True, plot_drawdown=False,
smooth_equity=False, relative_equity=True,
superimpose=True, resample=True,
reverse_indicators=True,
show_legend=True, open_browser=True):
"""
Like much of GUI code everywhere, this is a mess.
"""
# We need to reset global Bokeh state, otherwise subsequent runs of
# plot() contain some previous run's cruft data (was noticed when
# TestPlot.test_file_size() test was failing).
if not filename and not IS_JUPYTER_NOTEBOOK:
filename = _windos_safe_filename(str(results._strategy))
_bokeh_reset(filename)
COLORS = [BEAR_COLOR, BULL_COLOR]
BAR_WIDTH = .8
assert df.index.equals(results['_equity_curve'].index)
equity_data = results['_equity_curve'].copy(deep=False)
trades = results['_trades']
plot_volume = plot_volume and not df.Volume.isnull().all()
plot_equity = plot_equity and not trades.empty
plot_return = plot_return and not trades.empty
plot_pl = plot_pl and not trades.empty
is_datetime_index = isinstance(df.index, pd.DatetimeIndex)
from .lib import OHLCV_AGG
# ohlc df may contain many columns. We're only interested in, and pass on to Bokeh, these
df = df[list(OHLCV_AGG.keys())].copy(deep=False)
# Limit data to max_candles
if is_datetime_index:
df, indicators, equity_data, trades = _maybe_resample_data(
resample, df, indicators, equity_data, trades)
df.index.name = None # Provides source name @index
df['datetime'] = df.index # Save original, maybe datetime index
df = df.reset_index(drop=True)
equity_data = equity_data.reset_index(drop=True)
index = df.index
new_bokeh_figure = partial(
_figure,
x_axis_type='linear',
plot_width=plot_width,
plot_height=400,
tools="xpan,xwheel_zoom,box_zoom,undo,redo,reset,save",
active_drag='xpan',
active_scroll='xwheel_zoom')
pad = (index[-1] - index[0]) / 20
fig_ohlc = new_bokeh_figure(
x_range=Range1d(index[0], index[-1],
min_interval=10,
bounds=(index[0] - pad,
index[-1] + pad)) if index.size > 1 else None)
figs_above_ohlc, figs_below_ohlc = [], []
source = ColumnDataSource(df)
source.add((df.Close >= df.Open).values.astype(np.uint8).astype(str), 'inc')
trade_source = ColumnDataSource(dict(
index=trades['ExitBar'],
datetime=trades['ExitTime'],
exit_price=trades['ExitPrice'],
size=trades['Size'],
returns_positive=(trades['ReturnPct'] > 0).astype(int).astype(str),
))
inc_cmap = factor_cmap('inc', COLORS, ['0', '1'])
cmap = factor_cmap('returns_positive', COLORS, ['0', '1'])
colors_darker = [lightness(BEAR_COLOR, .35),
lightness(BULL_COLOR, .35)]
trades_cmap = factor_cmap('returns_positive', colors_darker, ['0', '1'])
if is_datetime_index:
fig_ohlc.xaxis.formatter = FuncTickFormatter(
args=dict(axis=fig_ohlc.xaxis[0],
formatter=DatetimeTickFormatter(days=['%d %b', '%a %d'],
months=['%m/%Y', "%b'%y"]),
source=source),
code='''
this.labels = this.labels || formatter.doFormat(ticks
.map(i => source.data.datetime[i])
.filter(t => t !== undefined));
return this.labels[index] || "";
''')
NBSP = '\N{NBSP}' * 4
ohlc_extreme_values = df[['High', 'Low']].copy(deep=False)
ohlc_tooltips = [
('x, y', NBSP.join(('$index',
'$y{0,0.0[0000]}'))),
('OHLC', NBSP.join(('@Open{0,0.0[0000]}',
'@High{0,0.0[0000]}',
'@Low{0,0.0[0000]}',
'@Close{0,0.0[0000]}'))),
('Volume', '@Volume{0,0}')]
def new_indicator_figure(**kwargs):
kwargs.setdefault('plot_height', 90)
fig = new_bokeh_figure(x_range=fig_ohlc.x_range,
active_scroll='xwheel_zoom',
active_drag='xpan',
**kwargs)
fig.xaxis.visible = False
fig.yaxis.minor_tick_line_color = None
return fig
def set_tooltips(fig, tooltips=(), vline=True, renderers=()):
tooltips = list(tooltips)
renderers = list(renderers)
if is_datetime_index:
formatters = {'@datetime': 'datetime'}
tooltips = [("Date", "@datetime{%c}")] + tooltips
else:
formatters = {}
tooltips = [("#", "@index")] + tooltips
fig.add_tools(HoverTool(
point_policy='follow_mouse',
renderers=renderers, formatters=formatters,
tooltips=tooltips, mode='vline' if vline else 'mouse'))
def _plot_equity_section(is_return=False):
"""Equity section"""
# Max DD Dur. line
equity = equity_data['Equity'].copy()
dd_end = equity_data['DrawdownDuration'].idxmax()
if np.isnan(dd_end):
dd_start = dd_end = equity.index[0]
else:
dd_start = equity[:dd_end].idxmax()
# If DD not extending into the future, get exact point of intersection with equity
if dd_end != equity.index[-1]:
dd_end = np.interp(equity[dd_start],
(equity[dd_end - 1], equity[dd_end]),
(dd_end - 1, dd_end))
if smooth_equity:
interest_points = pd.Index([
# Beginning and end
equity.index[0], equity.index[-1],
# Peak equity and peak DD
equity.idxmax(), equity_data['DrawdownPct'].idxmax(),
# Include max dd end points. Otherwise the MaxDD line looks amiss.
dd_start, int(dd_end), min(int(dd_end + 1), equity.size - 1),
])
select = pd.Index(trades['ExitBar']).union(interest_points)
select = select.unique().dropna()
equity = equity.iloc[select].reindex(equity.index)
equity.interpolate(inplace=True)
assert equity.index.equals(equity_data.index)
if relative_equity:
equity /= equity.iloc[0]
if is_return:
equity -= equity.iloc[0]
yaxis_label = 'Return' if is_return else 'Equity'
source_key = 'eq_return' if is_return else 'equity'
source.add(equity, source_key)
fig = new_indicator_figure(
y_axis_label=yaxis_label,
**({} if plot_drawdown else dict(plot_height=110)))
# High-watermark drawdown dents
fig.patch('index', 'equity_dd',
source=ColumnDataSource(dict(
index=np.r_[index, index[::-1]],
equity_dd=np.r_[equity, equity.cummax()[::-1]]
)),
fill_color='#ffffea', line_color='#ffcb66')
# Equity line
r = fig.line('index', source_key, source=source, line_width=1.5, line_alpha=1)
if relative_equity:
tooltip_format = f'@{source_key}{{+0,0.[000]%}}'
tick_format = '0,0.[00]%'
legend_format = '{:,.0f}%'
else:
tooltip_format = f'@{source_key}{{$ 0,0}}'
tick_format = '$ 0.0 a'
legend_format = '${:,.0f}'
set_tooltips(fig, [(yaxis_label, tooltip_format)], renderers=[r])
fig.yaxis.formatter = NumeralTickFormatter(format=tick_format)
# Peaks
argmax = equity.idxmax()
fig.scatter(argmax, equity[argmax],
legend_label='Peak ({})'.format(
legend_format.format(equity[argmax] * (100 if relative_equity else 1))),
color='cyan', size=8)
fig.scatter(index[-1], equity.values[-1],
legend_label='Final ({})'.format(
legend_format.format(equity.iloc[-1] * (100 if relative_equity else 1))),
color='blue', size=8)
if not plot_drawdown:
drawdown = equity_data['DrawdownPct']
argmax = drawdown.idxmax()
fig.scatter(argmax, equity[argmax],
legend_label='Max Drawdown (-{:.1f}%)'.format(100 * drawdown[argmax]),
color='red', size=8)
dd_timedelta_label = df['datetime'].iloc[int(round(dd_end))] - df['datetime'].iloc[dd_start]
fig.line([dd_start, dd_end], equity.iloc[dd_start],
line_color='red', line_width=2,
legend_label=f'Max Dd Dur. ({dd_timedelta_label})'
.replace(' 00:00:00', '')
.replace('(0 days ', '('))
figs_above_ohlc.append(fig)
def _plot_drawdown_section():
"""Drawdown section"""
fig = new_indicator_figure(y_axis_label="Drawdown")
drawdown = equity_data['DrawdownPct']
argmax = drawdown.idxmax()
source.add(drawdown, 'drawdown')
r = fig.line('index', 'drawdown', source=source, line_width=1.3)
fig.scatter(argmax, drawdown[argmax],
legend_label='Peak (-{:.1f}%)'.format(100 * drawdown[argmax]),
color='red', size=8)
set_tooltips(fig, [('Drawdown', '@drawdown{-0.[0]%}')], renderers=[r])
fig.yaxis.formatter = NumeralTickFormatter(format="-0.[0]%")
return fig
def _plot_pl_section():
"""Profit/Loss markers section"""
fig = new_indicator_figure(y_axis_label="Profit / Loss")
fig.add_layout(Span(location=0, dimension='width', line_color='#666666',
line_dash='dashed', line_width=1))
returns_long = np.where(trades['Size'] > 0, trades['ReturnPct'], np.nan)
returns_short = np.where(trades['Size'] < 0, trades['ReturnPct'], np.nan)
size = trades['Size'].abs()
size = np.interp(size, (size.min(), size.max()), (8, 20))
trade_source.add(returns_long, 'returns_long')
trade_source.add(returns_short, 'returns_short')
trade_source.add(size, 'marker_size')
if 'count' in trades:
trade_source.add(trades['count'], 'count')
r1 = fig.scatter('index', 'returns_long', source=trade_source, fill_color=cmap,
marker='triangle', line_color='black', size='marker_size')
r2 = fig.scatter('index', 'returns_short', source=trade_source, fill_color=cmap,
marker='inverted_triangle', line_color='black', size='marker_size')
tooltips = [("Size", "@size{0,0}")]
if 'count' in trades:
tooltips.append(("Count", "@count{0,0}"))
set_tooltips(fig, tooltips + [("P/L", "@returns_long{+0.[000]%}")],
vline=False, renderers=[r1])
set_tooltips(fig, tooltips + [("P/L", "@returns_short{+0.[000]%}")],
vline=False, renderers=[r2])
fig.yaxis.formatter = NumeralTickFormatter(format="0.[00]%")
return fig
def _plot_volume_section():
"""Volume section"""
fig = new_indicator_figure(y_axis_label="Volume")
fig.xaxis.formatter = fig_ohlc.xaxis[0].formatter
fig.xaxis.visible = True
fig_ohlc.xaxis.visible = False # Show only Volume's xaxis
r = fig.vbar('index', BAR_WIDTH, 'Volume', source=source, color=inc_cmap)
set_tooltips(fig, [('Volume', '@Volume{0.00 a}')], renderers=[r])
fig.yaxis.formatter = NumeralTickFormatter(format="0 a")
return fig
def _plot_superimposed_ohlc():
"""Superimposed, downsampled vbars"""
time_resolution = pd.DatetimeIndex(df['datetime']).resolution
resample_rule = (superimpose if isinstance(superimpose, str) else
dict(day='M',
hour='D',
minute='H',
second='T',
millisecond='S').get(time_resolution))
if not resample_rule:
warnings.warn(
f"'Can't superimpose OHLC data with rule '{resample_rule}'"
f"(index datetime resolution: '{time_resolution}'). Skipping.",
stacklevel=4)
return
df2 = (df.assign(_width=1).set_index('datetime')
.resample(resample_rule, label='left')
.agg(dict(OHLCV_AGG, _width='count')))
# Check if resampling was downsampling; error on upsampling
orig_freq = _data_period(df['datetime'])
resample_freq = _data_period(df2.index)
if resample_freq < orig_freq:
raise ValueError('Invalid value for `superimpose`: Upsampling not supported.')
if resample_freq == orig_freq:
warnings.warn('Superimposed OHLC plot matches the original plot. Skipping.',
stacklevel=4)
return
df2.index = df2['_width'].cumsum().shift(1).fillna(0)
df2.index += df2['_width'] / 2 - .5
df2['_width'] -= .1 # Candles don't touch
df2['inc'] = (df2.Close >= df2.Open).astype(int).astype(str)
df2.index.name = None
source2 = ColumnDataSource(df2)
fig_ohlc.segment('index', 'High', 'index', 'Low', source=source2, color='#bbbbbb')
colors_lighter = [lightness(BEAR_COLOR, .92),
lightness(BULL_COLOR, .92)]
fig_ohlc.vbar('index', '_width', 'Open', 'Close', source=source2, line_color=None,
fill_color=factor_cmap('inc', colors_lighter, ['0', '1']))
def _plot_ohlc():
"""Main OHLC bars"""
fig_ohlc.segment('index', 'High', 'index', 'Low', source=source, color="black")
r = fig_ohlc.vbar('index', BAR_WIDTH, 'Open', 'Close', source=source,
line_color="black", fill_color=inc_cmap)
return r
def _plot_ohlc_trades():
"""Trade entry / exit markers on OHLC plot"""
trade_source.add(trades[['EntryBar', 'ExitBar']].values.tolist(), 'position_lines_xs')
trade_source.add(trades[['EntryPrice', 'ExitPrice']].values.tolist(), 'position_lines_ys')
fig_ohlc.multi_line(xs='position_lines_xs', ys='position_lines_ys',
source=trade_source, line_color=trades_cmap,
legend_label=f'Trades ({len(trades)})',
line_width=8, line_alpha=1, line_dash='dotted')
def _plot_indicators():
"""Strategy indicators"""
def _too_many_dims(value):
assert value.ndim >= 2
if value.ndim > 2:
warnings.warn(f"Can't plot indicators with >2D ('{value.name}')",
stacklevel=5)
return True
return False
class LegendStr(str):
# The legend string is such a string that only matches
# itself if it's the exact same object. This ensures
# legend items are listed separately even when they have the
# same string contents. Otherwise, Bokeh would always consider
# equal strings as one and the same legend item.
def __eq__(self, other):
return self is other
ohlc_colors = colorgen()
indicator_figs = []
for i, value in enumerate(indicators):
value = np.atleast_2d(value)
# Use .get()! A user might have assigned a Strategy.data-evolved
# _Array without Strategy.I()
if not value._opts.get('plot') or _too_many_dims(value):
continue
is_overlay = value._opts['overlay']
is_scatter = value._opts['scatter']
if is_overlay:
fig = fig_ohlc
else:
fig = new_indicator_figure()
indicator_figs.append(fig)
tooltips = []
colors = value._opts['color']
colors = colors and cycle(_as_list(colors)) or (
cycle([next(ohlc_colors)]) if is_overlay else colorgen())
legend_label = LegendStr(value.name)
for j, arr in enumerate(value, 1):
color = next(colors)
source_name = f'{legend_label}_{i}_{j}'
if arr.dtype == bool:
arr = arr.astype(int)
source.add(arr, source_name)
tooltips.append(f'@{{{source_name}}}{{0,0.0[0000]}}')
if is_overlay:
ohlc_extreme_values[source_name] = arr
if is_scatter:
fig.scatter(
'index', source_name, source=source,
legend_label=legend_label, color=color,
line_color='black', fill_alpha=.8,
marker='circle', radius=BAR_WIDTH / 2 * 1.5)
else:
fig.line(
'index', source_name, source=source,
legend_label=legend_label, line_color=color,
line_width=1.3)
else:
if is_scatter:
r = fig.scatter(
'index', source_name, source=source,
legend_label=LegendStr(legend_label), color=color,
marker='circle', radius=BAR_WIDTH / 2 * .9)
else:
r = fig.line(
'index', source_name, source=source,
legend_label=LegendStr(legend_label), line_color=color,
line_width=1.3)
# Add dashed centerline just because
mean = float(pd.Series(arr).mean())
if not np.isnan(mean) and (abs(mean) < .1 or
round(abs(mean), 1) == .5 or
round(abs(mean), -1) in (50, 100, 200)):
fig.add_layout(Span(location=float(mean), dimension='width',
line_color='#666666', line_dash='dashed',
line_width=.5))
if is_overlay:
ohlc_tooltips.append((legend_label, NBSP.join(tooltips)))
else:
set_tooltips(fig, [(legend_label, NBSP.join(tooltips))], vline=True, renderers=[r])
# If the sole indicator line on this figure,
# have the legend only contain text without the glyph
if len(value) == 1:
fig.legend.glyph_width = 0
return indicator_figs
# Construct figure ...
if plot_equity:
_plot_equity_section()
if plot_return:
_plot_equity_section(is_return=True)
if plot_drawdown:
figs_above_ohlc.append(_plot_drawdown_section())
if plot_pl:
figs_above_ohlc.append(_plot_pl_section())
if plot_volume:
fig_volume = _plot_volume_section()
figs_below_ohlc.append(fig_volume)
if superimpose and is_datetime_index:
_plot_superimposed_ohlc()
ohlc_bars = _plot_ohlc()
_plot_ohlc_trades()
indicator_figs = _plot_indicators()
if reverse_indicators:
indicator_figs = indicator_figs[::-1]
figs_below_ohlc.extend(indicator_figs)
set_tooltips(fig_ohlc, ohlc_tooltips, vline=True, renderers=[ohlc_bars])
source.add(ohlc_extreme_values.min(1), 'ohlc_low')
source.add(ohlc_extreme_values.max(1), 'ohlc_high')
custom_js_args = dict(ohlc_range=fig_ohlc.y_range,
source=source)
if plot_volume:
custom_js_args.update(volume_range=fig_volume.y_range)
fig_ohlc.x_range.js_on_change('end', CustomJS(args=custom_js_args,
code=_AUTOSCALE_JS_CALLBACK))
plots = figs_above_ohlc + [fig_ohlc] + figs_below_ohlc
linked_crosshair = CrosshairTool(dimensions='both')
for f in plots:
if f.legend:
f.legend.visible = show_legend
f.legend.location = 'top_left'
f.legend.border_line_width = 1
f.legend.border_line_color = '#333333'
f.legend.padding = 5
f.legend.spacing = 0
f.legend.margin = 0
f.legend.label_text_font_size = '8pt'
f.legend.click_policy = "hide"
f.min_border_left = 0
f.min_border_top = 3
f.min_border_bottom = 6
f.min_border_right = 10
f.outline_line_color = '#666666'
f.add_tools(linked_crosshair)
wheelzoom_tool = next(wz for wz in f.tools if isinstance(wz, WheelZoomTool))
wheelzoom_tool.maintain_focus = False
kwargs = {}
if plot_width is None:
kwargs['sizing_mode'] = 'stretch_width'
fig = gridplot(
plots,
ncols=1,
toolbar_location='right',
toolbar_options=dict(logo=None),
merge_tools=True,
**kwargs
)
show(fig, browser=None if open_browser else 'none')
return fig
def plot_heatmaps(heatmap: pd.Series, agg: Union[Callable, str], ncols: int,
filename: str = '', plot_width: int = 1200, open_browser: bool = True):
if not (isinstance(heatmap, pd.Series) and
isinstance(heatmap.index, pd.MultiIndex)):
raise ValueError('heatmap must be heatmap Series as returned by '
'`Backtest.optimize(..., return_heatmap=True)`')
_bokeh_reset(filename)
param_combinations = combinations(heatmap.index.names, 2)
dfs = [heatmap.groupby(list(dims)).agg(agg).to_frame(name='_Value')
for dims in param_combinations]
plots = []
cmap = LinearColorMapper(palette='Viridis256',
low=min(df.min().min() for df in dfs),
high=max(df.max().max() for df in dfs),
nan_color='white')
for df in dfs:
name1, name2 = df.index.names
level1 = df.index.levels[0].astype(str).tolist()
level2 = df.index.levels[1].astype(str).tolist()
df = df.reset_index()
df[name1] = df[name1].astype('str')
df[name2] = df[name2].astype('str')
fig = _figure(x_range=level1,
y_range=level2,
x_axis_label=name1,
y_axis_label=name2,
plot_width=plot_width // ncols,
plot_height=plot_width // ncols,
tools='box_zoom,reset,save',
tooltips=[(name1, '@' + name1),
(name2, '@' + name2),
('Value', '@_Value{0.[000]}')])
fig.grid.grid_line_color = None
fig.axis.axis_line_color = None
fig.axis.major_tick_line_color = None
fig.axis.major_label_standoff = 0
fig.rect(x=name1,
y=name2,
width=1,
height=1,
source=df,
line_color=None,
fill_color=dict(field='_Value',
transform=cmap))
plots.append(fig)
fig = gridplot(
plots,
ncols=ncols,
toolbar_options=dict(logo=None),
toolbar_location='above',
merge_tools=True,
)
show(fig, browser=None if open_browser else 'none')
return fig
|
import boto3
'''
How to use:
Modify the event and profile variable definitions and execute the script
python3 ./Create_MediaPackage_Channel.py
What does it do:
This script will create a MediaLive Input, one of two prerequisies for
creating a MediaLive Channel.
Dependencies:
This script assumes an appropriate Lambda execution IAM role that has access
to use the MediaLive service.
Inputs:
Expects an input dictionary provided to the Lambda function
input =
{
"ID": 'ID for the channel',
"input_type": 'RTP_PUSH' | 'RTMP_PUSH' | 'RTMP_PULL' |
'URL_PULL' | 'MP4_FILE' | 'MEDIACONNECT'
"source_urls": ["list of", "source urls"]
'bitrate': 'MAX_10_MBPS' | 'MAX_20_MBPS' | 'MAX_50_MBPS'
'resolution': 'SD' | 'HD' | 'UHD'
'mediaconnect_flows': ["list of", "mediaconnectflows"],
}
'''
# # create a rtp push MediaLive input
def rtp_push(client, Id, sg):
response = client.create_input(
Type="RTP_PUSH",
InputSecurityGroups=[sg],
Name=Id
)
return response
# Deprecated
# create the udp push (legacy) input
# def udp_push(client, Id, sg):
# response = client.create_input(
# Type="UDP_PUSH",
# InputSecurityGroups=[sg],
# Name=Id
# )
# return response
# create a rtmp pull MediaLive input
def rtmp_pull(client, Id, sg, source_a, source_b):
response = client.create_input(
Type="RTMP_PULL",
InputSecurityGroups=[sg],
Name=Id,
Sources=[
{'Url': source_a},
{'Url': source_b},
]
)
return response
# create a rtmp push MediaLive input
def rtmp_push(client, Id, sg):
response = client.create_input(
Type="RTMP_PUSH",
InputSecurityGroups=[sg],
Destinations=[
{'StreamName': "%s-1" % Id},
{'StreamName': "%s-2" % Id}],
Name=Id)
return response
# create a url pull MediaLive input
def url_pull(client, Id, sg, source_a, source_b):
response = client.create_input(
Type="URL_PULL",
InputSecurityGroups=[sg],
Name=Id,
Sources=[
{'Url': source_a},
{'Url': source_b},
]
)
return response
# create a MP4 file MediaLive input
def mp4_file(client, Id, sg, input_url_a, input_url_b):
response = client.create_input(
Type="MP4_FILE",
InputSecurityGroups=[sg],
Name=Id,
Sources=[
{'Url': input_url_a},
{'Url': input_url_b}
]
)
return response
# create a MediaConnect input for MediaLive
def mediaconnect(client, Id, sg, flows, arn):
response = client.create_input(
Type="MEDIACONNECT",
MediaConnectFlows = flows,
InputSecurityGroups=[sg],
Name=Id,
RoleArn=arn
)
return response
# validates that a pull-based inputs have exactly two sources defined.
# returns a properly formatted list of source urls
def url_validator(input):
try:
urls = input['source_urls']
if len(urls) == 0:
print("No Sources defined, exiting now")
exit(1)
elif len(urls) != 2:
if len(urls) >= 2:
del urls[2:]
urls.append(urls[0])
except KeyError:
print("No Source URLs provided, with input, exiting now")
exit(1)
return urls
# validates that a MEDIACONNECT input has MediaConnect flows defined for it.
# returns properly formatted MediaConnect flow ARNs
def media_connect_validator(input):
try:
flows = input['mediaconnectflows']
if len(flows) > 2:
print("Too many MediaConnect flows provided, only using the first two")
del flows[2:]
elif len(flows) == 0:
print("No MediaConnect flows provided, exiting now")
exit(1)
except KeyError:
print("No MediaConnect Flows provided, with mediaconnect input, exiting now")
exit(1)
out = []
for x in flows:
out.append({"FlowArn": x})
return out
# TODO: replace with better code for providing an input security group
# or force the creation of a new Input security group each time
def input_sg(client):
try:
response = client.list_input_security_groups()['InputSecurityGroups']
return response[0]['Id']
except (KeyError, IndexError):
response = client.create_input_security_group(
WhitelistRules=[
{"Cidr": "0.0.0.0/0"}
])
return response['SecurityGroup']['Id']
def lambda_handler(event, context):
# The client initiated below is for production Lambda usage and
# assumes an appropriate IAM role is assigned to the Lambda
#
profile = boto3.session.Session()
live = profile.client('medialive', region_name='us-west-2')
ID = event['ID']
destinations = ""
# create the specified input
if event['input_type'] == 'RTP_PUSH':
input = rtp_push(live, ID, input_sg(live))
destinations = input['Input']['Destinations']
elif event['input_type'] == 'RTMP_PUSH':
input = rtmp_push(live, ID, input_sg(live))
destinations = input['Input']['Destinations']
elif event['input_type'] == 'RTMP_PULL':
urls = url_validator(event)
input = rtmp_pull(live, ID, input_sg(live), urls[0], urls[1])
sources = input['Input']['Sources']
elif event['input_type'] == 'URL_PULL':
urls = url_validator(event)
input = url_pull(live, ID, input_sg(live), urls[0], urls[1])
sources = input['Input']['Sources']
elif event['input_type'] == 'MP4_FILE':
urls = url_validator(event)
input = mp4_file(live, ID, input_sg(live), urls[0], urls[1])
sources = input['Input']['Sources']
elif event['input_type'] == 'MEDIACONNECT':
flows = media_connect_validator(event)
input = mediaconnect(live, ID, input_sg(live), flows, arn)
else:
print("No valid input type specified, exiting now")
exit(1)
input_id = input['Input']['Id']
return(input_id)
|
<filename>convert.py<gh_stars>1-10
# Quick to write and slow to run Doxygen to XML Comment converter.
# <NAME> 2011
def endComment():
"""
@brief Reset the values for the next comment block.
"""
global sEType, sEVar, sEData, iIndent
sEType = BRIEF
sEVar = None
sEData = ""
iIndent = -1
def handleExistingData(iIndent):
"""
@brief Write out any existing data.
@param iIndent The indent level.
"""
global sEType, sEVar, sEData
# If none, quit.
if not sEType:
return
# Skip if we have no data.
if not sEData:
return
# Insert tab level and comments into a header.
sHead = (" " * iIndent) + "/// "
# Sanitise data.
sEData.rstrip()
# Swap breaks for heads.
sEData = sEData.replace(BREAK, "\n" + sHead)
# Write out the respective blocks.
if sEType == BRIEF:
#sEData = sEData.replace("<summary>", "")
#sEData = sEData.replace("</summary>", "")
pOutFile.write(sHead + "<summary>\n")
pOutFile.write(sHead + sEData + "\n")
pOutFile.write(sHead + "</summary>\n")
elif sEType == PARAM:
pOutFile.write(sHead + "<param name=\"" + str(sEVar) + "\">" + str(sEData) + "</param>\n")
elif sEType == RETURN:
pOutFile.write(sHead + "<returns>" + str(sEData) + "</returns>\n")
elif sEType == AUTHOR:
pOutFile.write(sHead + "<author>" + str(sEData) + "</author>\n")
elif sEType == DATE:
pOutFile.write(sHead + "<date>" + str(sEData) + "</date>\n")
elif sEType == RETURN:
pOutFile.write(sHead + "<returns>" + str(sEData) + "</returns>\n")
elif sEType == REMARK:
pOutFile.write(sHead + str(sEData) + "\n")
# Zap any leftover data.
sEType = None
sEVar = None
sEData = ""
def dataFromString(sString, iStart = 0):
"""
@brief Parse data out of a line which may or may not end in an '*/'.
@param sString The string to parse.
@param iStart The starting index to parse from. Default = 0 which is the start of the string.
@return The data (without the ending '*/' is present.
"""
iEnd = len(sString)
if CLOSE_COMMENT in sString:
iEnd = sString.find(CLOSE_COMMENT)
return sString[iStart : iEnd].rstrip()
def dataFromLine(sLine):
"""
@brief Parse data from a comment line.
@param sLine The comment line to parse.
@return A rstrip'ed string of data after the '* ' in a comment line.
"""
iStart = sLine.find("* ")
if iStart < 0:
return ""
iStart += 2
return dataFromString(sLine, iStart)
def handleCommentLine(sLine, iLine):
"""
@brief Write data from a comment line back to the thingy.
@param sLine The line data.
@param iLine The line number.
@return Is the end of the comment block on this line.
"""
global sEType, sEVar, sEData, iIndent
# Work out the indentation level to operate at.
# This is only done once for each comment block.
if iIndent < 0:
iIndent = (len(sLine) - len(sLine.lstrip())) / 4
# If there is no '@' symbol, save as much data as we can from the commentline.
if START_SYMBOL not in sLine:
# If we are a directive which only accepts single line values then anything extra is a remark.
if sEType in (PARAM, RETURN, AUTHOR, DATE):
handleExistingData(iIndent)
sEType = REMARK
sEData = ""
# Get the data from the line and append it if it is exists.
sData = dataFromLine(sLine)
if len(sData) > 0:
# If we already have data, insert a breakline.
if sEData:
sEData += BREAK + sData
# Otherwise do not.
else:
sEData = sData
# If we have an end comment on this line, exit the comment by returning false.
if CLOSE_COMMENT in sLine:
handleExistingData(iIndent)
endComment()
return False
return True
# Since the line does contain an '@' symbol, push any existing data.
handleExistingData(iIndent)
# If this line contains an '@' symbol then work out what is after it.
sEType = sLine.split(START_SYMBOL)[1].split(" ")[0]
# If the comment data type is BRIEF
if sEType == BRIEF:
sEData = dataFromString(sLine, sLine.find(BRIEF) + len(BRIEF) + 1)
elif sEType == PARAM:
sTemp = dataFromString(sLine, sLine.find(PARAM) + len(PARAM) + 1)
iChop = sTemp.find(" ") + 1
sEData = sTemp[iChop:]
sEVar = sTemp[:iChop].rstrip()
elif sEType == RETURN:
sEData = dataFromString(sLine, sLine.find(RETURN) + len(RETURN) + 1)
elif sEType == DATE:
sEData = dataFromString(sLine, sLine.find(DATE) + len(DATE) + 1)
elif sEType == AUTHOR:
sEData = dataFromString(sLine, sLine.find(AUTHOR) + len(AUTHOR) + 1)
# If we have an end comment on this line, exit the comment by returning false.
if CLOSE_COMMENT in sLine:
handleExistingData(iIndent)
endComment()
return False
return True
## Modules
import time
import shutil
import os
## Constants
START_SYMBOL = "@"
OPEN_COMMENT = "/**"
CLOSE_COMMENT = "*/"
BRIEF = "brief"
PARAM = "param"
RETURN = "return"
AUTHOR = "author"
DATE = "date"
REMARK = "remark"
BREAK = "!BREAK!"
## Define globals.
global sEType, sEVar, sEData, pOutFile
## Main function.
def convert(sInFile, sOutFile = None, bReport = True):
"""
@brief A function which will convert the contents of one file and write them to an output file.
@param sInFile The file to convert from doxycomments to xml comments.
@param sOutFile OPTIONAL The file to save them in. Default is a _d appended version of the old one.
@param bReport Report the number of comments and time it took etc.
"""
# Globals
global pOutFile
# File jiggery.
if not sOutFile:
sOutFile = sInFile.replace(".", "_dtemp.")
# Some initial state and a line counter.
endComment()
bInComment = False
iLine = 0
iComments = 0
iStartTime = time.clock()
# Open the files.
pOutFile = open(sOutFile, "w")
with open(sInFile) as pIn:
# For each line in the file.
for sLine in pIn:
# Increment counter.
iLine += 1
# If we are in a comment, handle the line.
if bInComment:
bInComment = handleCommentLine(sLine, iLine)
# Check the new line to see if it opens a comment line.
elif OPEN_COMMENT in sLine:
iComments += 1
bInComment = handleCommentLine(sLine, iLine)
# We are neither a comment so write the line back to the source.
else:
pOutFile.write(sLine)
# Close the output file.
pOutFile.close()
# Backup the old file.
#shutil.copy(sInFile, sInFile + "_dbackup")
# Copy the new file over the old file.
shutil.copy(sOutFile, sInFile)
os.remove(sOutFile)
# Report.
if bReport:
print sInFile
print str(iComments) + " comment blocks converted within "+str(iLine)+" lines in approx "+str(round(time.clock() - iStartTime, 2))+" seconds."
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print "Please specify an input file."
else:
lFiles = sys.argv[1:]
for sFile in lFiles:
convert(sFile)
print "-----"
raw_input("Done")
|
import sys, pickle, os, time, yaml
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(cur_dir, 'scholar_py'))
import scholar
cache_file = os.path.join(cur_dir, 'papers.pkl')
yaml_cache_file = os.path.join(cur_dir, 'papers_cache.yml')
def get_paper_data(querier, paper):
if type(paper) is dict:
title = paper.get('title')
cluster_id = paper.get('cluster_id')
elif type(paper) is str:
title = paper
else:
raise "Input arg paper is of an invalid format %s" % repr(paper)
if cluster_id:
print 'Query by cluster_id'
query = scholar.ClusterScholarQuery(cluster = cluster_id)
else:
print 'Query by title "%s"' % title
query = scholar.SearchScholarQuery()
query.set_phrase(title)
query.set_num_page_results(1)
# This is important, set this to 1 can reduce the possiblility of get blocked by google
querier.send_query(query)
scholar.txt(querier, with_globals=True)
articles = querier.articles
time.sleep(1)
# for art in articles:
# print(encode(art.as_txt()) + '\n')
return articles[0] # Only return the top result
def get_scholar_data(paper_list):
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
settings.set_citation_format(scholar.ScholarSettings.CITFORM_BIBTEX)
querier.apply_settings(settings)
scholar.ScholarConf.LOG_LEVEL = 3
cache = read_cache(cache_file)
assert(cache != None)
if cache.get('paper_list') == paper_list:
print 'Use cache from file %s' % cache_file
# Use cache to reduce the number of google scholar request
else:
# Update cache, instead of flushing a complete new one
print 'Get data from google scholar'
cache_paper_title = [p['title'] for p in cache['paper_list']]
missing_paper = [p for p in paper_list if p['title'] not in cache_paper_title]
missing_scholar_data = [get_paper_data(querier, v) for v in missing_paper]
# update cache
cache['paper_list'] += missing_paper
cache['scholar_data'] += missing_scholar_data
save_cache(cache_file, cache)
save_cache(cache_file, cache) # Enforce to flush cache
return cache['scholar_data']
def read_pickle_cache(cache_file):
# Use pickle to implement cache
print 'Load cache from file %s' % cache_file
if not os.path.isfile(cache_file):
empty_db = dict(paper_list = [], scholar_data = [])
return empty_db
with open(cache_file, 'r') as f:
db = pickle.load(f)
assert(db.get('paper_list'))
assert(db.get('scholar_data'))
assert(len(db['paper_list']) == len(db['scholar_data']))
return db
def save_pickle_cache(cache_file, obj):
print 'Save obj to cache %s' % cache_file
with open(cache_file, 'w') as f:
pickle.dump(obj, f)
read_cache = read_pickle_cache
save_cache = save_pickle_cache
def read_yaml_cache(cache_file):
print 'Load cache from file %s' % cache_file
if not os.path.isfile(cache_file):
return None
with open(cache_file, 'r') as f:
return yaml.load(f)
def save_yaml_cache(cache_file, obj):
print 'Save obj to cache %s' % cache_file
with open(cache_file, 'w') as f:
yaml.dump(obj, f)
|
#!/usr/bin/python3
'''A set of convenience functions for converting among different phone codes.
Usage:
import phondler
print phondler.CODES # the known phone codes
print phondler.LANGUAGES # the known languages
s1 = phondler.convert(s0, code0, code1, language)
# s0 and s1 are strings containing individual symbols
# code0 and code1 must be members of phondler.CODES, of course
# language must be a member of phondler.LANGUAGES, of course
# (but not all languages are known for all phone codes)
l1 = phondler.convertlist(l0, code0, code1, language)
# l0, l1 are lists of symbols
phondler.vowels
phondler.consonants
# list known IPA symbols of vowels, consonants.
# for other tables, see phondler_tables.py
'''
import re,sys
import phondler.phondler_tables as phodic_tables
CODES=set(('ipa','arpabet','xsampa','disc','callhome'))
LANGUAGES=set(('eng','deu','nld','arz','cmn','spa','yue','lao','vie'))
vowels = phodic_tables._ipa_vowels
consonants = phodic_tables._ipa_consonants
#####################################################################
def translate_string(s, d):
'''(tl,ttf)=translate_string(s,d):
Translate the string, s, using symbols from dict, d, as:
1. Min # untranslatable symbols, then 2. Min # symbols.
tl = list of translated or untranslated symbols.
ttf[n] = True if tl[n] was translated, else ttf[n]=False.
'''
N = len(s)
symcost = 1 # path cost per translated symbol
oovcost = 10 # path cost per untranslatable symbol
maxsym = max(len(k) for k in d.keys()) # max input symbol length
# (pathcost to s[(n-m):n], n-m, translation[s[(n-m):m]], True/False)
lattice = [ (0,0,'',True) ]
for n in range(1,N+1):
# Initialize on the assumption that s[n-1] is untranslatable
lattice.append((oovcost+lattice[n-1][0],n-1,s[(n-1):n],False))
# Search for translatable sequences s[(n-m):n], and keep the best
for m in range(1,min(n+1,maxsym+1)):
if s[(n-m):n] in d and symcost+lattice[n-m][0] < lattice[n][0]:
lattice[n] = (symcost+lattice[n-m][0],n-m,d[s[(n-m):n]],True)
# Back-trace
tl = []
translated = []
n = N
while n > 0:
tl.append(lattice[n][2])
translated.append(lattice[n][3])
n = lattice[n][1]
return((tl[::-1], translated[::-1]))
def attach_tones_to_vowels(il, tones, vowels, searchstep, catdir):
'''Return a copy of il, with each tone attached to nearest vowel if any.
searchstep=1 means search for next vowel, searchstep=-1 means prev vowel.
catdir>=0 means concatenate after vowel, catdir<0 means cat before vowel.
Tones are not combined, except those also included in the vowels set.
'''
ol = il.copy()
v = 0 if searchstep>0 else len(ol)-1
t = -1
while 0<=v and v<len(ol):
if (ol[v] in vowels or (len(ol[v])>1 and ol[v][0] in vowels)) and t>=0:
ol[v]= ol[v]+ol[t] if catdir>=0 else ol[t]+ol[v]
ol = ol[0:t] + ol[(t+1):] # Remove the tone
t = -1 # Done with that tone
if v<len(ol) and ol[v] in tones:
t = v
v += searchstep
return(ol)
#####################################################################
# X-SAMPA
def ipa2xsampa(x,language):
'''Attempt to return X-SAMPA equivalent of an IPA phone x.'''
(tl,ttf) = translate_string(x, phodic_tables._ipa2xsampa)
return(''.join(tl))
def xsampa2ipa(x,language):
'''Return the IPA equivalent of X-SAMPA phone x.'''
(tl,ttf) = translate_string(x, phodic_tables._xsampa_and_diac2ipa)
return(''.join(tl))
######################################################################
# Language-dependent lexical tones and stress markers
def tone2ipa(n, language):
return(phodic_tables._tone2ipa[language][int(n[1:])])
#####################################################################
# DISC, the system used by CELEX
def disc2ipa(x, L):
'''Convert DISC symbol x into IPA, for language L'''
if L=='nld':
(tl,ttf) = translate_string(x, phodic_tables._disc2ipa_dutch)
return(''.join(tl))
elif L=='eng':
(tl,ttf) = translate_string(x, phodic_tables._disc2ipa_english)
return(''.join(tl))
else:
(tl,ttf) = translate_string(x, phodic_tables._disc2ipa)
return(''.join(tl))
def ipa2disc(x,L):
'''Convert IPA symbol x into DISC'''
(tl,ttf) = translate_string(x, phodic_tables._ipa2disc)
return(''.join(tl))
def ipa2disc_old(x,L):
'''Convert IPA symbol x into DISC, for language L'''
# Convert whole thing if possible; otherwise try prefix+vowel; else quit
if x in phodic_tables._ipa2disc:
return(phodic_tables._ipa2disc[x])
elif x[0] in phodic_tables._ipa2disc and x[1:] in phodic_tables._ipa2disc:
return(phodic_tables._ipa2disc[x[0]] + phodic_tables._ipa2disc[x[1:]])
else:
raise KeyError('Unknown IPA symbol %s for language %s'%(x,L))
#######################################################################
# Callhome phone codes
def callhome2ipa(x,L):
'''Convert callhome phone symbol x into IPA for language L'''
(il,ttf)=translate_string(x, phodic_tables._callhome2ipa[L])
if L=='arz':
ol = attach_tones_to_vowels(il, phodic_tables._ipa_stressmarkers,
phodic_tables._ipa_vowels, -1, -1)
elif L=='cmn':
ol=attach_tones_to_vowels(il, phodic_tables._ipa_tones,
phodic_tables._ipa_vowels, -1, 1)
elif L=='spa':
ol=attach_tones_to_vowels(il, phodic_tables._ipa_stressmarkers,
phodic_tables._ipa_vowels, -1, -1)
return(''.join(ol))
def ipa2callhome(x,L):
'''Convert IPA symbol x into callhome notation, for language L'''
(il,ttf)=translate_string(x, phodic_tables._ipa2callhome[L])
if L=='arz':
ol=attach_tones_to_vowels(il,'012', phodic_tables._callhome_vowels['arz'], 1, 1)
elif L=='cmn':
ol=attach_tones_to_vowels(il,'012345', phodic_tables._callhome_vowels['cmn'], -1, 1)
elif L=='spa':
ol=attach_tones_to_vowels(il,'012', phodic_tables._callhome_vowels['spa'], 1, 1)
return(''.join(ol))
#########################################################################
# ARPABET and TIMIT
def arpabet2ipa(x,language):
'''Convert ARPABET symbol X to IPA'''
(il,ttf)=translate_string(x, phodic_tables._arpabet2ipa)
ol=attach_tones_to_vowels(il, phodic_tables._ipa_stressmarkers,
phodic_tables._ipa_vowels, -1, -1)
return(''.join(ol))
def ipa2arpabet(x,language):
'''Convert IPA symbols to ARPABET'''
(il,ttf)=translate_string(x, phodic_tables._ipa2arpabet)
ol=attach_tones_to_vowels(il,'012', phodic_tables._arpabet_vowels, 1, 1)
return(''.join(ol))
def timit2ipa(x,L):
'''Convert TIMIT phone codes to IPA'''
x = x.upper()
(il,ttf)=translate_string(x, phodic_tables._timit2ipa)
ol=attach_tones_to_vowels(il, phodic_tables._ipa_stressmarkers,
phodic_tables._ipa_vowels, -1, -1)
return(''.join(ol))
#######################################################################
# phondler.convert and phondler.convertlist
# are used to convert symbols and lists of symbols, respectively,
# to or from IPA, by calling appropriate other functions.
#
_convertfuncs = {
'arpabet': (arpabet2ipa, ipa2arpabet),
'xsampa': (xsampa2ipa, ipa2xsampa),
'disc': (disc2ipa, ipa2disc),
'callhome': (callhome2ipa,ipa2callhome)
}
def convert(s0, c0, c1, language):
if c0=='ipa' and c1!='ipa':
x=_convertfuncs[c1][1](s0, language)
return(x)
elif c0!='ipa' and c1=='ipa':
return(_convertfuncs[c0][0](s0, language))
else:
raise RuntimeError('must convert to/from ipa, not %s to %s'%(c0,c1))
def convertlist(l0, c0, c1, language):
return([ convert(s0,c0,c1,language) for s0 in l0 ])
|
from PyQt5.QtCore import QPoint
from PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal
from PyQt5.QtGui import QContextMenuEvent
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QActionGroup
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QUndoStack
from urh.plugins.InsertSine.InsertSinePlugin import InsertSinePlugin
from urh.plugins.PluginManager import PluginManager
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
from urh.signalprocessing.Signal import Signal
from urh.ui.ROI import ROI
from urh.ui.actions.EditSignalAction import EditSignalAction, EditAction
from urh.ui.views.ZoomableGraphicView import ZoomableGraphicView
class EditableGraphicView(ZoomableGraphicView):
save_as_clicked = pyqtSignal()
create_clicked = pyqtSignal(int, int)
set_noise_clicked = pyqtSignal()
participant_changed = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.participants = []
self.__sample_rate = None # For default sample rate in insert sine dialog
self.autoRangeY = True
self.save_enabled = False # Signal is can be saved
self.create_new_signal_enabled = False
self.participants_assign_enabled = False
self.cache_qad = False # cache qad demod after edit operations?
self.__signal = None # type: Signal
self.stored_item = None # For copy/paste
self.paste_position = 0 # Where to paste? Set in contextmenuevent
self.context_menu_position = None # type: QPoint
self._init_undo_stack(QUndoStack())
self.addAction(self.undo_action)
self.addAction(self.redo_action)
self.copy_action = QAction(self.tr("Copy selection"), self) # type: QAction
self.copy_action.setShortcut(QKeySequence.Copy)
self.copy_action.triggered.connect(self.on_copy_action_triggered)
self.copy_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.copy_action.setIcon(QIcon.fromTheme("edit-copy"))
self.addAction(self.copy_action)
self.paste_action = QAction(self.tr("Paste"), self) # type: QAction
self.paste_action.setShortcut(QKeySequence.Paste)
self.paste_action.triggered.connect(self.on_paste_action_triggered)
self.paste_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.paste_action.setIcon(QIcon.fromTheme("edit-paste"))
self.addAction(self.paste_action)
self.delete_action = QAction(self.tr("Delete selection"), self)
self.delete_action.setShortcut(QKeySequence.Delete)
self.delete_action.triggered.connect(self.on_delete_action_triggered)
self.delete_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.delete_action.setIcon(QIcon.fromTheme("edit-delete"))
self.addAction(self.delete_action)
self.save_as_action = QAction(self.tr("Save Signal as..."), self) # type: QAction
self.save_as_action.setIcon(QIcon.fromTheme("document-save-as"))
self.save_as_action.setShortcut(QKeySequence.SaveAs)
self.save_as_action.triggered.connect(self.save_as_clicked.emit)
self.save_as_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.addAction(self.save_as_action)
self.insert_sine_action = QAction(self.tr("Insert sine wave..."), self)
font = self.insert_sine_action.font()
font.setBold(True)
self.insert_sine_action.setFont(font)
self.insert_sine_action.triggered.connect(self.on_insert_sine_action_triggered)
self.insert_sine_plugin = InsertSinePlugin()
self.insert_sine_plugin.insert_sine_wave_clicked.connect(self.on_insert_sine_wave_clicked)
def _init_undo_stack(self, undo_stack):
self.undo_stack = undo_stack
self.undo_action = self.undo_stack.createUndoAction(self)
self.undo_action.setIcon(QIcon.fromTheme("edit-undo"))
self.undo_action.setShortcut(QKeySequence.Undo)
self.undo_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.redo_action = self.undo_stack.createRedoAction(self)
self.redo_action.setIcon(QIcon.fromTheme("edit-redo"))
self.redo_action.setShortcut(QKeySequence.Redo)
self.redo_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
@property
def sample_rate(self) -> float:
return self.__sample_rate
@sample_rate.setter
def sample_rate(self, value):
self.__sample_rate = value
@property
def signal(self) -> Signal:
return self.__signal
@property
def protocol(self) -> ProtocolAnalyzer:
return None # Gets overwritten in EpicGraphicView
@property
def selection_area(self) -> ROI:
return self.scene().selection_area
@selection_area.setter
def selection_area(self, value):
self.scene().selection_area = value
def set_signal(self, signal: Signal):
self.__signal = signal
def create_context_menu(self):
self.paste_position = int(self.mapToScene(self.context_menu_position).x())
menu = QMenu(self)
if self.save_enabled:
menu.addAction(self.save_action)
menu.addAction(self.save_as_action)
menu.addSeparator()
menu.addAction(self.copy_action)
self.copy_action.setEnabled(not self.selection_area.is_empty)
menu.addAction(self.paste_action)
self.paste_action.setEnabled(self.stored_item is not None)
menu.addSeparator()
if PluginManager().is_plugin_enabled("InsertSine"):
menu.addAction(self.insert_sine_action)
if not self.selection_area.is_empty:
menu.addSeparator()
menu.addAction(self.zoom_in_action)
menu.addAction(self.zoom_out_action)
if not self.selection_area.is_empty:
zoom_action = menu.addAction(self.tr("Zoom selection"))
zoom_action.setIcon(QIcon.fromTheme("zoom-fit-best"))
zoom_action.triggered.connect(self.on_zoom_action_triggered)
menu.addSeparator()
menu.addAction(self.delete_action)
crop_action = menu.addAction(self.tr("Crop to selection"))
crop_action.triggered.connect(self.on_crop_action_triggered)
mute_action = menu.addAction(self.tr("Mute selection"))
mute_action.triggered.connect(self.on_mute_action_triggered)
menu.addSeparator()
if self.create_new_signal_enabled:
create_action = menu.addAction(self.tr("Create signal from selection"))
create_action.setIcon(QIcon.fromTheme("document-new"))
create_action.triggered.connect(self.on_create_action_triggered)
if hasattr(self, "selected_messages"):
selected_messages = self.selected_messages
else:
selected_messages = []
if len(selected_messages) == 1:
selected_msg = selected_messages[0]
else:
selected_msg = None
self.participant_actions = {}
if len(selected_messages) > 0 and self.participants_assign_enabled:
participant_group = QActionGroup(self)
participant_menu = menu.addMenu("Participant")
none_participant_action = participant_menu.addAction("None")
none_participant_action.setCheckable(True)
none_participant_action.setActionGroup(participant_group)
none_participant_action.triggered.connect(self.on_none_participant_action_triggered)
if selected_msg and selected_msg.participant is None:
none_participant_action.setChecked(True)
for participant in self.participants:
pa = participant_menu.addAction(participant.name + " (" + participant.shortname + ")")
pa.setCheckable(True)
pa.setActionGroup(participant_group)
if selected_msg and selected_msg.participant == participant:
pa.setChecked(True)
self.participant_actions[pa] = participant
pa.triggered.connect(self.on_participant_action_triggered)
if hasattr(self, "scene_type") and self.scene_type == 0:
if not self.selection_area.is_empty:
menu.addSeparator()
noise_action = menu.addAction(self.tr("Set noise level from Selection"))
noise_action.triggered.connect(self.on_noise_action_triggered)
menu.addSeparator()
menu.addAction(self.undo_action)
menu.addAction(self.redo_action)
return menu
def contextMenuEvent(self, event: QContextMenuEvent):
self.context_menu_position = event.pos()
menu = self.create_context_menu()
menu.exec_(self.mapToGlobal(event.pos()))
self.context_menu_position = None
def clear_selection(self):
self.set_selection_area(0, 0)
@pyqtSlot()
def on_insert_sine_action_triggered(self):
if not self.selection_area.is_empty:
num_samples = self.selection_area.width
else:
num_samples = None
original_data = self.signal.data if self.signal is not None else None
dialog = self.insert_sine_plugin.get_insert_sine_dialog(original_data=original_data,
position=self.paste_position,
sample_rate=self.sample_rate,
num_samples=num_samples)
dialog.show()
@pyqtSlot()
def on_insert_sine_wave_clicked(self):
if self.insert_sine_plugin.complex_wave is not None:
self.clear_selection()
insert_action = EditSignalAction(signal=self.signal, protocol=self.protocol,
data_to_insert=self.insert_sine_plugin.complex_wave,
position=self.paste_position,
mode=EditAction.insert, cache_qad=self.cache_qad)
self.undo_stack.push(insert_action)
@pyqtSlot()
def on_copy_action_triggered(self):
if not self.selection_area.is_empty:
self.stored_item = self.signal._fulldata[int(self.selection_area.start):int(self.selection_area.end)]
@pyqtSlot()
def on_paste_action_triggered(self):
if self.stored_item is not None:
# paste_position is set in ContextMenuEvent
self.clear_selection()
paste_action = EditSignalAction(signal=self.signal, protocol=self.protocol,
start=self.selection_area.start, end=self.selection_area.end,
data_to_insert=self.stored_item, position=self.paste_position,
mode=EditAction.paste, cache_qad=self.cache_qad)
self.undo_stack.push(paste_action)
@pyqtSlot()
def on_delete_action_triggered(self):
if not self.selection_area.is_empty:
start, end = self.selection_area.start, self.selection_area.end
self.clear_selection()
del_action = EditSignalAction(signal=self.signal, protocol=self.protocol,
start=start, end=end,
mode=EditAction.delete, cache_qad=self.cache_qad)
self.undo_stack.push(del_action)
self.centerOn(start, self.y_center)
@pyqtSlot()
def on_crop_action_triggered(self):
if not self.selection_area.is_empty:
start, end = self.selection_area.start, self.selection_area.end
self.clear_selection()
crop_action = EditSignalAction(signal=self.signal, protocol=self.protocol,
start=start, end=end,
mode=EditAction.crop, cache_qad=self.cache_qad)
self.undo_stack.push(crop_action)
@pyqtSlot()
def on_mute_action_triggered(self):
mute_action = EditSignalAction(signal=self.signal, protocol=self.protocol,
start=self.selection_area.start, end=self.selection_area.end,
mode=EditAction.mute, cache_qad=self.cache_qad)
self.undo_stack.push(mute_action)
@pyqtSlot()
def on_zoom_action_triggered(self):
self.zoom_to_selection(self.selection_area.x, self.selection_area.end)
@pyqtSlot()
def on_create_action_triggered(self):
self.create_clicked.emit(self.selection_area.x, self.selection_area.end)
@pyqtSlot()
def on_none_participant_action_triggered(self):
for msg in self.selected_messages:
msg.participant = None
self.participant_changed.emit()
@pyqtSlot()
def on_participant_action_triggered(self):
for msg in self.selected_messages:
msg.participant = self.participant_actions[self.sender()]
self.participant_changed.emit()
@pyqtSlot()
def on_noise_action_triggered(self):
self.set_noise_clicked.emit()
|
import matplotlib
matplotlib.use("TKAgg")
import matplotlib.pyplot as plt
from matplotlib import animation
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import random
def move2str(move):
return str(move).replace('player ', '')
class Tree(object):
def __init__(self):
self.trees = []
self.Ns = []
self.Qs = []
def update_tree_animation(self,tree,N,Q):
self.trees.append(tree.copy())
self.Ns.append(N.copy())
self.Qs.append(Q.copy())
def create_tree_animation(self):
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
fig, ax = plt.subplots(figsize=(14, 12))
def tree_animation_frame(i):
ax.clear()
develop_tree(self.Ns[i], self.Qs[i])
ax.plot()
def develop_tree(N, Q):
"""Pass in list of node dicts with path as key"""
size_min = 100
size_max = 400
node_labels = {}
node_labels['root'] = round(max(Q.values()) / max(N.values()), 1)
edge_labels = {}
node_N = [max(N.values())]
node_Q = [max(Q.values())]
edges = []
for node, ns in N.items():
# Take care around the root
if len(node.moves) == 0:
continue
elif len(node.moves) == 1:
parent = "root"
child = node.moves
else:
# Else, we're interested in the end two of the path
parent = node.moves[:-1]
child = node.moves
edges.append((move2str(parent), move2str(child)))
# Add to label mapping dict, only the last move in the path
node_labels[move2str(child)] = round(Q[node] / N[node], 2)
edge_labels[(move2str(parent), move2str(child))] = move2str(child[-1])
node_N.append(N[node])
node_Q.append(Q[node])
# Perform math on the summary stats
node_avg = [q / n for q, n in zip(node_Q, node_N)]
# If all values are the same, set node_color all to 1 to avoid div by 0
min_node_avg = min(node_avg)
max_node_avg = max(node_avg)
if max_node_avg - min_node_avg == 0:
node_color = [0 for val in node_avg]
else:
node_color = [(val - min_node_avg) / (max_node_avg - min_node_avg) for val in node_avg]
node_size = [n * (size_max - size_min) + size_min for n in node_avg]
# Cycled through all the nodes so print
G = nx.Graph()
#print(edges)
G.add_edges_from(edges)
pos = graphviz_layout(G, prog='dot')
nx.draw_networkx_nodes(G, pos, ax=ax, node_size=node_size, node_color=node_color, cmap=plt.cm.Blues_r)
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_labels(G, pos, labels=node_labels, ax=ax, font_size=8)
nx.draw_networkx_edge_labels(G, pos, ax=ax, edge_labels=edge_labels, label_pos=0.5, font_size=6)
# Make and save animation
ani = animation.FuncAnimation(fig, tree_animation_frame,frames=len(self.trees), interval=20,repeat_delay=5000)
# Finally, show live animation
plt.show()
waiting = input()
def _get_tree_string(self, tree, N, Q):
tree_string = ""
for node, children in tree.items():
tree_string += f"[{node}: {N[node]}, {Q[node]}] "
return tree_string |
# !/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
import time
# import os.path
import traceback
from testlib.base.base_step import step as base_step
from testlib.scripts.wireless.bluetooth.bt_step import Step as BtStep
from testlib.scripts.android.ui import ui_steps
from testlib.scripts.android.ui import ui_utils
# from testlib.scripts.wireless.bluetooth import bluetooth_utils
class GetAndroidVersion(BtStep):
""" Description:
Gets Android version via adb command (float type)
Usage:
bluetooth_steps.GetAndroidVersion(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = None
self.set_errorm("Get version", "Could not obtain Android version")
def do(self):
try:
self.step_data = self.adb_connection.cmd('shell getprop ro.build.version.release').communicate()[
0].decode("utf-8").strip()
except Exception, e:
self.set_errorm("Get version", e.message)
def check_condition(self):
"""
:return: True if android version is successfully obtained, False if not. Version is saved in step_data as string
"""
if self.step_data:
self.set_passm("Android version " + str(self.step_data))
return True
else:
return False
class ClickBluetoothSwitch(BtStep):
""" Description:
Only makes sure that the Bluetooth switch has the required state.
Furthermore, if you call this function with check_if_already=True,
if BT switch already has the required state, it returns failure.
Call this from the Bluetooth Settings activity
Usage:
bluetooth_steps.ClickBluetoothSwitch(
state = "ON", check_if_already=False)()
"""
def __init__(self, state="ON", check_if_already=False, **kwargs):
"""
:param state: "ON" for on state required, OFF for off state required
:param check_if_already: True to fail if already has the required state, False otherwise
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.state = state
# In some platform, state text is 'On' & 'Off', so covert to 'checked'
self.checked = True if state == "ON" else False
self.check_if_already = check_if_already
self.switch = self.uidevice(className="android.widget.Switch", enabled=True)
self.step_data = True
self.set_passm("BT set to " + self.state)
def do(self):
try:
# check if switch is present
if not self.switch.wait.exists(timeout=self.timeout):
raise Exception("No BT switch found")
if not self.switch.info["checked"] == self.checked:
self.switch.click()
else:
# check if already has required state
if self.check_if_already:
raise Exception("BT already has " + self.state + " state")
self.set_passm("BT already set to " + self.state)
except Exception, e:
self.set_errorm("Set BT to " + self.state, e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if required state was set, False if not
"""
if self.step_data:
# wait for switch transition
if not self.switch.wait.exists(timeout=self.timeout):
self.set_errorm("Set BT to " + self.state, "BT state not set to " + self.state + " correctly")
self.step_data = False
else:
# check if it has required state
if not self.uidevice(className="android.widget.Switch",
enabled=True,
checked=self.checked).wait.exists(
timeout=self.timeout):
self.set_errorm("Set BT to " + self.state, "BT state not set to " + self.state)
self.step_data = False
return self.step_data
class OpenBluetoothSettings(BtStep):
""" Description:
Opens the Bluetooth activity from settings, either from all
apps menu, or by sending an intent. Call this from the Home
screen if use_intent=False
Usage:
bluetooth_steps.OpenBluetoothSettings(serial=serial, use_intent=False, version=version)()
"""
def __init__(self, use_intent=False, **kwargs):
"""
:param use_intent: True to open from the home screen, False to use BT settings launch intent
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.use_intent = use_intent
self.step_data = True
# part of logging message
if self.use_intent:
self.message_str = "with intent"
else:
self.message_str = "from menu"
self.set_passm("BT settings opened " + self.message_str)
def do(self):
try:
if self.use_intent:
# execute start BT command if use_intent=True
cmd_launch_bt_settings = "shell am start -a android.settings.BLUETOOTH_SETTINGS -p com.android.settings"
self.adb_connection.cmd(cmd_launch_bt_settings).wait()
else:
ui_steps.open_settings(serial=self.serial)()
ui_steps.click_button_if_exists(serial=self.serial,
wait_time=5000, view_to_find={"text": "Connected devices"})()
ui_steps.click_button_with_scroll(serial=self.serial,
view_to_find={"text": "Bluetooth"})()
except Exception, e:
self.set_errorm("Open " + self.message_str, e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if BT settings list was launched, False if not
"""
if self.step_data:
self.set_errorm("Open " + self.message_str, "BT settings was not opened")
# wait for the BT activity to open
self.step_data = self.uidevice(text="Bluetooth").wait.exists(timeout=self.timeout)
return self.step_data
class CheckBtVisibility(BtStep):
""" Description:
Checks if the device is visible. Call this from the BT settings list,
with BT ON
Usage:
bluetooth_steps.CheckBtVisibility(serial=serial, version=version)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("DUT is visible")
def do(self):
if not ui_steps.wait_for_view_common(serial=self.serial,
view_to_find={"textContains": "is visible"}, optional=True)():
self.step_data = ui_steps.wait_for_view_common(serial=self.serial,
view_to_find={"textMatches": ".*?(v|V)isible.*?"})()
def check_condition(self):
"""
:return: True if BT is visible message was found on the screen, False if not
"""
if not self.step_data:
self.set_errorm("Check if visible",
"Check condition for Visibility has failed, 'visible' text can not" +
" be found on the screen")
# self.step_data = self.uidevice(textContains=" is visible").wait.exists(timeout=self.timeout)
return self.step_data
class WaitBtScanning(BtStep):
""" Description:
Makes sure that the BT scanning progress is finished, by waiting
for progress bar to be gone. Call this from BT settings list,
with BT on
Usage:
bluetooth_steps.WaitBtScanning(serial=serial,
timeout_appear=5000, time_to_wait=60000, version=version)()
"""
def __init__(self, timeout_appear=5000, time_to_wait=60000, **kwargs):
"""
:param timeout_appear: time to wait till the scanning progress bar appears
:param time_to_wait: time to wait till the scanning progress bar is gone
:param kwargs: serial, version, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.timeout_appear = timeout_appear
self.time_to_wait = time_to_wait
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
self.step_data = True
def do(self):
if self.device_info.dessert < "O":
try:
if not self.bt_list.wait.exists(timeout=self.timeout_appear):
raise Exception("BT devices list was not found")
# scroll here to reveal scanning progressbar
if not self.bt_list.scroll.to(text="Available devices"):
raise Exception("Available devices title was not found in BT list")
except Exception, e:
self.set_errorm("Wait scan finish", e.message)
self.step_data = False
else:
time.sleep(15)
def check_condition(self):
"""
:return: True if BT scanning progress was finished after timeout reached, False if not
"""
if self.device_info.dessert < "O" and self.step_data:
progress_bar = self.uidevice(resourceId="com.android.settings:id/scanning_progress")
if progress_bar.wait.exists(timeout=self.timeout_appear):
self.set_passm("Scanning progress finished")
self.set_errorm("Wait scan finish", "Timeout reached, still scanning")
self.step_data = progress_bar.wait.gone(timeout=self.time_to_wait)
else:
self.set_passm("Scanning progress already finished")
self.step_data = True
return self.step_data
class GetBtMac(BtStep):
""" Description:
Get BT Address Mac via adb command
Usage:
bluetooth_steps.GetBtMac(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = None
self.set_errorm("Obtain BT MAC", "Could not obtain BT MAC address")
def do(self):
try:
self.step_data = self.adb_connection.cmd('shell settings get secure bluetooth_address').communicate()[
0].decode("utf-8").strip()
except Exception, e:
self.set_errorm("Obtain BT MAC", e.message)
def check_condition(self):
"""
:return: True if bt mac was found, False if not. Note that the mac is saved in step_data
"""
if self.step_data:
self.set_passm("BT MAC address " + str(self.step_data))
return True
else:
return False
class BtChangeDeviceName(BtStep):
""" Description:
Replaces the name of the devices with the given name, if not
already named with the given name. If there is not any character
given in the name, it validates that Rename button from the
pop-up is disabled. Call this from the BT settings list, with
BT ON
Usage:
bluetooth_steps.BtChangeDeviceName(serial=serial, name = "", version=version)()
"""
def __init__(self, name="", **kwargs):
"""
:param name: name to be set; if empty, it checks if the Rename button is disabled
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.name = name
self.step_data = True
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
def do(self):
try:
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list was not found")
if not self.name == '':
# check if BT does not already have the given name
if not self.bt_list.scroll.to(textContains="is visible to nearby devices"):
raise Exception("BT name was not found down of the list")
bt_check_object = self.uidevice(textContains="is visible to nearby devices")
condition = bt_check_object.info["text"].startswith(self.name + " is visible to nearby devices")
else:
# if empty name is given, do not need to check if not already
condition = False
# if BT does not already have given name, rename it
if not condition:
# open Rename pop-up
menu_button = self.uidevice(description="More options")
if not menu_button.wait.exists(timeout=self.timeout):
raise Exception("More options button in BT settings not found")
menu_button.click()
rename_button = self.uidevice(textContains="Rename ")
if not rename_button.wait.exists(timeout=self.timeout):
raise Exception("Rename option from the menu not found")
rename_button.click()
if not self.uidevice(resourceId="android:id/alertTitle", text="Rename this device").wait.exists(
timeout=self.timeout):
raise Exception("Rename DUT alert title not opened")
# replace name
rename_edit_text = self.uidevice(className="android.widget.EditText")
'''if not rename_edit_text.wait.exists(timeout=self.timeout):
raise Exception("Rename Edit text not found")
rename_edit_text.set_text(self.name)
# force a small delay due to window transition
time.sleep(1)
rename_button = self.uidevice(text="Rename")
if not rename_button.wait.exists(timeout=self.timeout):
raise Exception("Rename button from pop-up not found")
'''
ui_steps.edit_text(view_to_find={"className": "android.widget.EditText"}, value=self.name,
serial=self.serial)()
rename_button = self.uidevice(text="Rename")
# if given name is empty, check the status of Rename button and return to the BT list
if self.name == '':
if rename_edit_text.text:
raise Exception("Error when clearing old BT name, not empty")
if rename_button.enabled:
raise Exception("Rename button in popup not disabled when empty name")
cancel_button = self.uidevice(text="Cancel")
if not cancel_button.wait.exists(timeout=self.timeout):
raise Exception("Cancel button not found in Rename popup when empty name")
cancel_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after cancel rename BT with empty name")
self.set_passm("Rename button disabled when empty name")
# if given name is not empty, rename it
else:
rename_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after renaming BT")
if not self.bt_list.scroll.to(textContains="is visible to nearby devices"):
raise Exception("BT name was not found down of the list after rename")
bt_check_object = self.uidevice(textContains="is visible to nearby devices")
if not bt_check_object.info["text"].startswith(self.name + " is visible to nearby devices"):
raise Exception("Found: " + bt_check_object.info["text"] + " instead of " + self.name)
self.set_passm("Device renamed: " + self.name)
# else pass, and write in the logs that device is already renamed
else:
self.set_passm("Device already named: " + self.name)
elif self.version.startswith("7."):
# N version
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list was not found")
if not self.name == '':
# check if BT does not already have the given name
if not self.bt_list.scroll.to(textContains="is visible to nearby devices"):
raise Exception("BT name was not found down of the list")
bt_check_object = self.uidevice(textContains="is visible to nearby devices")
condition = bt_check_object.info["text"].startswith(self.name + " is visible to nearby devices")
else:
# if empty name is given, do not need to check if not already
condition = False
# if BT does not already have given name, rename it
if not condition:
# open Rename pop-up
menu_button = self.uidevice(description="More options")
if not menu_button.wait.exists(timeout=self.timeout):
raise Exception("More options button in BT settings not found")
menu_button.click()
rename_button = self.uidevice(textContains="Rename ")
if not rename_button.wait.exists(timeout=self.timeout):
raise Exception("Rename option from the menu not found")
rename_button.click()
if not self.uidevice(resourceId="android:id/alertTitle", text="Rename this device").wait.exists(
timeout=self.timeout):
raise Exception("Rename DUT alert title not opened")
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd("shell dumpsys input_method").communicate()[
0].decode("utf-8"):
self.uidevice.press.back()
time.sleep(1)
# replace name
rename_edit_text = self.uidevice(className="android.widget.EditText")
'''if not rename_edit_text.wait.exists(timeout=self.timeout):
raise Exception("Rename Edit text not found")
rename_edit_text.set_text(self.name)
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd("shell dumpsys input_method").communicate()[
0].decode("utf-8"):
self.uidevice.press.back()
time.sleep(1)
'''
ui_steps.edit_text(view_to_find={"className": "android.widget.EditText"}, value=self.name,
serial=self.serial)()
rename_button = self.uidevice(text="RENAME")
if not rename_button.wait.exists(timeout=self.timeout):
raise Exception("Rename button from pop-up not found")
# if given name is empty, check the status of Rename button and return to the BT list
if self.name == '':
if rename_edit_text.text:
raise Exception("Error when clearing old BT name, not empty")
if rename_button.enabled:
raise Exception("Rename button in popup not disabled when empty name")
cancel_button = self.uidevice(text="CANCEL")
if not cancel_button.wait.exists(timeout=self.timeout):
raise Exception("Cancel button not found in Rename popup when empty name")
cancel_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after cancel rename BT with empty name")
self.set_passm("Rename button disabled when empty name")
# if given name is not empty, rename it
else:
rename_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after renaming BT")
if not self.bt_list.scroll.to(textContains="is visible to nearby devices"):
raise Exception("BT name was not found down of the list after rename")
bt_check_object = self.uidevice(textContains="is visible to nearby devices")
if not bt_check_object.info["text"].startswith(self.name + " is visible to nearby devices"):
raise Exception("Found: " + bt_check_object.info["text"] + " instead of " + self.name)
self.set_passm("Device renamed: " + self.name)
# else pass, and write in the logs that device is already renamed
else:
self.set_passm("Device already named: " + self.name)
else:
# O-dessert version
if not self.name == '':
# check if BT does not already have the given name
if not self.bt_list.scroll.to(textContains="visible"):
raise Exception("BT name was not found down of the list")
bt_check_object = self.uidevice(textContains="Visible as")
condition = self.name in bt_check_object.info["text"]
else:
# if empty name is given, do not need to check if not already
condition = False
# if BT does not already have given name, rename it
if not condition:
ui_steps.click_button_common(serial=self.serial,
view_to_find={"textContains": "Device name"})()
if not self.uidevice(resourceId="android:id/alertTitle", text="Rename this device").wait.exists(
timeout=self.timeout):
raise Exception("Rename DUT alert title not opened")
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd("shell dumpsys input_method").communicate()[
0].decode("utf-8"):
self.uidevice.press.back()
time.sleep(1)
# replace name
rename_edit_text = self.uidevice(className="android.widget.EditText")
ui_steps.edit_text(view_to_find={"className": "android.widget.EditText"}, value=self.name,
serial=self.serial)()
rename_button = self.uidevice(text="RENAME")
if not rename_button.wait.exists(timeout=self.timeout):
raise Exception("Rename button from pop-up not found")
# if given name is empty, check the status of Rename button and return to the BT list
if self.name == '':
if rename_edit_text.text:
raise Exception("Error when clearing old BT name, not empty")
if rename_button.enabled:
raise Exception("Rename button in popup not disabled when empty name")
cancel_button = self.uidevice(text="CANCEL")
if not cancel_button.wait.exists(timeout=self.timeout):
raise Exception("Cancel button not found in Rename popup when empty name")
cancel_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after cancel rename BT with empty name")
self.set_passm("Rename button disabled when empty name")
# if given name is not empty, rename it
else:
rename_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list not reached after renaming BT")
if not self.bt_list.scroll.to(textContains="visible"):
raise Exception("BT name was not found down of the list after rename")
self.set_passm("Device renamed: " + self.name)
# else pass, and write in the logs that device is already renamed
else:
self.set_passm("Device already named: " + self.name)
# self.set_passm("Device already named: " + self.name)
except Exception, e:
message = e.message
if message is None or message == "":
message = traceback.print_exc()
self.set_errorm("Rename BT to " + self.name, message)
self.step_data = False
def check_condition(self):
"""
:return: True if device was renamed(or Rename button is grayed out when empty name), False if not.
"""
return self.step_data
class BtSearchDevices(BtStep):
""" Description:
Refreshes the BT available list until a certain device has
appeared(for a max_attempt tries). Note that this let the BT
list scrolled to the required device in the list. Call this in
BT settings list, with BT ON and not any scanning in progress
Usage:
bluetooth_steps.BtSearchDevices(serial=serial,
dev_to_find="BT_test", scan_timeout=60000,
max_attempts=1, version=version)()
"""
def __init__(self, dev_to_find, scan_timeout=60000, max_attempts=1, **kwargs):
"""
:param dev_to_find: name of the device to be found
:param scan_timeout: maximum timeout for scanning progress
:param max_attempts: maximum no. of tries
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_to_find = dev_to_find
self.scan_timeout = scan_timeout
self.max_attempts = max_attempts
self.step_data = True
# if self.version.startswith("5.") or self.version.startswith("6.0"):
# self.bt_list = self.uidevice(resourceId="android:id/list")
# self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
def do(self):
try:
# if not self.uidevice(text="Available devices").wait.exists(timeout=self.timeout):
# raise Exception("BT devices list was not found")
counter = 1
# condition = True means that the device was found
condition = False
while not condition:
# break if max_attempts reached
if counter > self.max_attempts:
break
if self.device_info.dessert < 'O':
# open More options menu and click Refresh
menu_button = self.uidevice(description="More options")
if not menu_button.wait.exists(timeout=self.timeout):
raise Exception("Try " + str(counter) + ": More options button in BT settings not found")
menu_button.click()
refresh_button = self.uidevice(text="Refresh")
if not refresh_button.wait.exists(timeout=self.scan_timeout):
raise Exception("Try " + str(counter) + ": Refresh button was not found")
refresh_button.click()
else:
if counter != 1:
self.uidevice.press.back()
ui_steps.click_button_common(serial=self.serial,
view_to_find={"text": "Pair new device"}, optional=True)()
# if not self.bt_list.wait.exists(timeout=self.timeout):
# raise Exception("Try " + str(counter) + ": BT devices list was not found")
# wait until scanning process is finished
if not WaitBtScanning(serial=self.serial,
time_to_wait=self.scan_timeout, critical=False,
version=self.version)():
raise Exception("Wait for scanning to finish failed")
counter += 1
# check if device was found, if not, perform again the while loop
condition = ui_steps.wait_for_view_common(serial=self.serial,
view_to_find={"text": self.dev_to_find}, optional=True)()
self.set_passm("Device " + self.dev_to_find + " found after " + str(
counter - 1) + " attempt(s)")
except Exception, e:
self.set_errorm("Scan after " + self.dev_to_find, e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if device was found, False if not.
"""
if self.step_data:
# returns if the device is on the screen, or not
self.set_errorm("Scan after " + self.dev_to_find,
"Search failed, device " + self.dev_to_find + " was not found in Available devices" +
" list after " + str(self.max_attempts) + " attempt(s)")
self.step_data = self.uidevice(text=self.dev_to_find).exists
return self.step_data
class GetPasskey(BtStep):
""" Description:
Get the pairing code from the pair request window. Call this in
the Pairing request window
Usage:
bluetooth_steps.GetPasskey(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = None
self.set_errorm("Get passkey from Pair request", "Could not obtain passkey")
def do(self):
try:
passkey_object = self.uidevice(resourceId="com.android.settings:id/pairing_subhead")
if not passkey_object.exists:
raise Exception("Pairing code not displayed")
# save the passkey in step_data
self.step_data = passkey_object.text
except Exception, e:
self.set_errorm("Get passkey from Pair request", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if passkey was found, False if not. Note that the passkey is saved as str in step_data
"""
if self.step_data:
self.set_passm("Passkey " + str(self.step_data))
return True
else:
return False
class PasskeyCheck(BtStep):
""" Description:
This method checks if the pairing request passkeys are both
on the initiator and on the receiver
Usage:
bluetooth_steps.PasskeyCheck(serial=serial, passkey_initiator=passkey1,
passkey_receiver=passkey2)()
"""
def __init__(self, passkey_initiator, passkey_receiver, **kwargs):
"""
:param passkey_initiator: passkey of the initiator device
:param passkey_receiver: passkey of the receiver device
:param kwargs: standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.passkey_initiator = passkey_initiator
self.passkey_receiver = passkey_receiver
self.set_passm("Pairing code matches; " + str(self.passkey_initiator) + " = " + str(self.passkey_receiver))
self.set_errorm("Pairing code does not match",
"Initiator: " + str(self.passkey_initiator) + " Receiver: " + str(self.passkey_initiator))
def do(self):
# do nothing, only check
pass
def check_condition(self):
"""
:return: True if passkeys match, False if not.
"""
return self.passkey_initiator == self.passkey_receiver
class CheckIfPaired(BtStep):
""" Description:
Checks if the device is paired, or not(depending on the paired parameter)
with another device. Call this with the BT list opened
Usage:
bluetooth_steps.CheckIfPaired(serial=serial,
dev_paired_with = DEVNAME, paired=True, version=version)()
"""
def __init__(self, dev_paired_with, paired=True, **kwargs):
"""
:param dev_paired_with: name of the device to check if DUT is(not) paired with
:param paired: True, to check if DUT is paired with, False, to check if DUT is not paired with
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_paired_with = dev_paired_with
self.paired = paired
self.step_data = True
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
if self.paired:
self.message_str = "Check if paired"
self.set_passm("Paired with " + str(dev_paired_with))
self.set_errorm(self.message_str, "Not paired with " + str(dev_paired_with))
else:
self.message_str = "Check if not paired"
self.set_passm("Not paired with " + str(dev_paired_with))
self.set_errorm(self.message_str, "Paired with " + str(dev_paired_with))
def do(self):
try:
if self.device_info.dessert < "O":
if self.uidevice(text="YES").wait.exists(timeout=1000):
self.uidevice(text="YES").click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT list not found")
if self.bt_list.scroll.to(text=self.dev_paired_with):
device_layout = self.bt_list.child_by_text(self.dev_paired_with, allow_scroll_search=False,
className="android.widget.LinearLayout")
if self.paired:
if not device_layout.child(resourceId="com.android.settings:id/deviceDetails").wait.exists(
timeout=self.timeout):
self.step_data = False
else:
if not device_layout.child(resourceId="com.android.settings:id/deviceDetails").wait.gone(
timeout=self.timeout):
self.step_data = False
else:
if self.paired:
self.step_data = False
else:
condition = False
ui_steps.click_button_common(serial=self.serial, view_to_find={"description": "Navigate up"})()
ui_steps.click_button_common(serial=self.serial, view_to_find={"textContains": "Connected devices"})()
time.sleep(1)
condition = ui_steps.wait_for_view_common(serial=self.serial,
view_to_find={"textContains": self.dev_paired_with},
second_view_to_find={
"resourceId": "com.android.settings:id/settings_button"},
position='right', optional=True)()
if self.paired:
if condition:
self.step_data = True
else:
self.step_data = False
else:
if condition:
self.step_data = False
else:
self.step_data = True
except Exception, e:
self.set_errorm(self.message_str, e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if DUT is paired (or not, depending on Paired=True/False) with required device, False otherwise
"""
return self.step_data
class WaitPairRequest(BtStep):
""" Description:
Waits for the pair request alert to appear or to be gone,
as defined by parameter appear=True/False.
Usage:
bluetooth_steps.WaitPairRequest(serial=serial,
appear=True, time_to_wait=10000, version=version)()
"""
def __init__(self, appear=True, time_to_wait=10000, **kwargs):
"""
:param appear: True, to check if appears, False, to check if gone
:param time_to_wait: maximum time to wait for pairing request window
:param kwargs: serial, version, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.appear = appear
self.time_to_wait = time_to_wait
if self.appear:
self.set_passm("Pair request appeared")
self.set_errorm("Wait pair request window",
"Pair request not appeared after " + str(self.time_to_wait) + " milliseconds")
else:
self.set_passm("Pair request gone")
self.set_errorm("Wait pair request window gone",
"Pair request not gone after " + str(self.time_to_wait) + " milliseconds")
def do(self):
# nothing to do here, only to check
pass
def check_condition(self):
"""
:return: True if pairing request window appears(or is gone), False otherwise
"""
if self.version.startswith("5."):
# LLP version
if self.appear:
# wait for appear of pair request dialog
self.step_data = self.uidevice(resourceId="android:id/alertTitle",
text="Bluetooth pairing request").wait.exists(timeout=self.time_to_wait)
else:
# wait until pair request dialog disappears
self.step_data = self.uidevice(resourceId="android:id/alertTitle",
text="Bluetooth pairing request").wait.gone(timeout=self.time_to_wait)
else:
# M version
if self.appear:
# wait for appear of pair request dialog
self.step_data = self.uidevice(resourceId="android:id/alertTitle",
textContains="Pair with").wait.exists(timeout=self.time_to_wait)
else:
# wait until pair request dialog disappears
self.step_data = self.uidevice(resourceId="android:id/alertTitle",
textContains="Pair with").wait.gone(timeout=self.time_to_wait)
return self.step_data
class InitiatePairRequest(BtStep):
""" Description:
Initiate a pair request. It searches for the device name, clicks on it and assures
that the initiator device is in the pairing request window (i.e. if pair request window
is not displayed on the screen, it checks if the "Cannot communicate" message is displayed,
and if not, it searches the request in the notifications menu)
Usage:
bluetooth_steps.InitiatePairRequest(serial=serial, dev_to_pair_name="Name",
scan_timeout=60000, scan_max_attempts=1, version=version)()
"""
def __init__(self, dev_to_pair_name, scan_timeout=60000, scan_max_attempts=1, **kwargs):
"""
:param dev_to_pair_name: name of device to pair with
:param scan_timeout: maximum timeout for scanning progress
:param scan_max_attempts: maximum no. of scan tries till the device is found
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_to_pair_name = dev_to_pair_name
self.scan_timeout = scan_timeout
self.scan_max_attempts = scan_max_attempts
self.step_data = True
self.set_passm("Pair request initiated to " + str(dev_to_pair_name))
def do(self):
try:
# search for required device
if not BtSearchDevices(serial=self.serial, dev_to_find=self.dev_to_pair_name,
scan_timeout=self.scan_timeout,
timeout=self.timeout, max_attempts=self.scan_max_attempts, version=self.version,
critical=False)():
raise Exception("Search for device failed")
# click on the device name (already scrolled in the view)
# self.uidevice(text=self.dev_to_pair_name).click()
if not ui_steps.click_button_common(serial=self.serial,
view_to_find={"textContains": self.dev_to_pair_name},
view_to_check={"resourceId": "android:id/alertTitle"})():
time.sleep(1)
ui_steps.click_button_common(serial=self.serial, view_to_find={"textContains": self.dev_to_pair_name},
view_to_check={"resourceId": "android:id/alertTitle"})()
time.sleep(5)
if self.version.startswith("5."):
# LLP version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
if not self.uidevice(resourceId="android:id/alertTitle",
text="Bluetooth pairing request").wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout, version=self.version,
critical=False, no_log=True)():
raise Exception(
"Pair request not appeared on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
version=self.version, critical=False, no_log=True)():
raise Exception("Pair request not initiated")
if not self.uidevice(resourceId="com.android.settings:id/message_subhead",
text=self.dev_to_pair_name).wait.exists(timeout=self.timeout):
raise Exception("Pair request not initiated to the expected device")
else:
# M, N version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
pair_request_title_obj = self.uidevice(resourceId="android:id/alertTitle", textContains="Pair with")
if not pair_request_title_obj.wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
if self.device_info.dessert < "O":
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout,
version=self.version, critical=False, no_log=True)():
raise Exception(
"Pair request not appeared on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
version=self.version, critical=False, no_log=True)():
raise Exception("Pair request not initiated")
pair_request_title_str = pair_request_title_obj.text
if not pair_request_title_str == "Pair with " + str(self.dev_to_pair_name) + "?":
raise Exception(
"Pair request not initiated to the expected device, found " + str(pair_request_title_str))
except Exception, e:
self.set_errorm("Pair request to " + str(self.dev_to_pair_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Both devices are in the pair request window, False otherwise
"""
return self.step_data
class PairDevice(BtStep):
""" Description:
Initiate a pair request. It searches for the device name, clicks on it and assures
that the initiator device is in the pairing request window (i.e. if pair request window
is not displayed on the screen, it checks if the "Cannot communicate" message is displayed,
and checks device name paired or not to DUT, If paired returns true)
Usage:
bluetooth_steps.PairDevice(serial=serial, dev_to_pair_name="Name",
scan_timeout=60000, scan_max_attempts=1, version=version)()
"""
def __init__(self, dev_to_pair_name, scan_timeout=60000, scan_max_attempts=1, **kwargs):
"""
:param dev_to_pair_name: name of device to pair with
:param scan_timeout: maximum timeout for scanning progress
:param scan_max_attempts: maximum no. of scan tries till the device is found
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_to_pair_name = dev_to_pair_name
self.scan_timeout = scan_timeout
self.scan_max_attempts = scan_max_attempts
self.step_data = True
self.set_passm("Paired with " + str(dev_to_pair_name))
def do(self):
try:
# search for required device
if not BtSearchDevices(serial=self.serial, dev_to_find=self.dev_to_pair_name,
scan_timeout=self.scan_timeout,
timeout=self.timeout, max_attempts=self.scan_max_attempts, version=self.version,
critical=False)():
raise Exception("Search for device failed")
# click on the device name (already scrolled in the view)
self.uidevice(text=self.dev_to_pair_name).click()
if self.version.startswith("5."):
# LLP version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
if not self.uidevice(resourceId="android:id/alertTitle",
text="Bluetooth pairing request").wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
else:
# M, N version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
pair_request_title_obj = self.uidevice(resourceId="android:id/alertTitle", textContains="Pair with")
if not pair_request_title_obj.wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
except Exception, e:
self.set_errorm("Pair request to " + str(self.dev_to_pair_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Device was paired, False if not
"""
if self.step_data:
# check if is paired with required device
self.step_data = CheckIfPaired(serial=self.serial, dev_paired_with=self.dev_to_pair_name, paired=True,
timeout=self.timeout, version=self.version, critical=False)()
return self.step_data
class ReceivePairRequest(BtStep):
""" Description:
Receives a pair request. It assures that device is
in the pairing request window (i.e. if pair request window
is not received on the screen, it searches it in the
notifications menu)
Usage:
bluetooth_steps.ReceivePairRequest(serial=serial,
dev_receiving_from_name="Name", version=version)()
"""
def __init__(self, dev_receiving_from_name, **kwargs):
"""
:param dev_receiving_from_name: name of the device receiving pair request from
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_receiving_from_name = dev_receiving_from_name
self.step_data = True
self.set_passm("Pair request received from " + str(self.dev_receiving_from_name))
def do(self):
try:
if self.version.startswith("5."):
# LLP version
# if pair request window not appear on the receiver device, open notification and check if
# there is not even there the pairing request
if not self.uidevice(resourceId="android:id/alertTitle", text="Bluetooth pairing request").wait.exists(
timeout=5000):
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout, version=self.version,
critical=False)():
raise Exception(
"Pair request not received on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
version=self.version, critical=False)():
raise Exception("Pair request not received")
if not self.uidevice(resourceId="com.android.settings:id/message_subhead",
text=self.dev_receiving_from_name).wait.exists(timeout=self.timeout):
raise Exception("Pair request not received from the expected device")
else:
# M, N version
# if pair request window not appear on the receiver device, open notification and check if
# there is not even there the pairing request
pair_request_title_obj = self.uidevice(resourceId="android:id/alertTitle", textContains="Pair with")
if not pair_request_title_obj.wait.exists(timeout=5000):
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout, version=self.version,
critical=False, no_log=True)():
raise Exception(
"Pair request not received on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
verion=self.version, critical=False, no_log=True)():
raise Exception("Pair request not received on device")
pair_request_title_str = pair_request_title_obj.text
if not pair_request_title_str == "Pair with " + str(self.dev_receiving_from_name) + "?":
raise Exception(
"Pair request not received from the expected device, found " + str(pair_request_title_str))
except Exception, e:
self.set_errorm("Pair request from " + str(self.dev_receiving_from_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Both devices are in the pair request window, False otherwise
"""
return self.step_data
class SearchPairRequestNotification(BtStep):
""" Description:
Opens a Pairing request from the notification menu. Note that
this does not check if, indeed the pairing request dialog appears,
it only clicks the notification. Call this only if the request
dialog is not displayed and it should be
Usage:
bluetooth_steps.SearchPairRequestNotification(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("Pairing request notification clicked")
def do(self):
try:
# open notification menu
if not OpenNotificationsMenu(serial=self.serial, timeout=self.timeout, version=self.version, critical=False,
no_log=True)():
raise Exception("Notification menu not opened when searching for pairing request")
# click on the pairing request notification
if not BtCheckNotificationAppear(serial=self.serial, text_contains="Pairing request",
click_on_notification=True, time_to_appear=self.timeout,
version=self.version, critical=False, no_log=True)():
raise Exception("Check Pair request notification not successful")
except Exception, e:
self.set_errorm("Search pair request in notifications ", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Pair request notification was found and clicked, False otherwise
"""
return self.step_data
class OpenNotificationsMenu(BtStep):
""" Description:
Opens the notifications menu in order to operate with Bluetooth notifications
Usage:
bluetooth_steps.OpenNotificationsMenu(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.set_passm("Notifications menu opened")
self.set_errorm("Open notifications", "Notifications menu not opened")
def do(self):
self.uidevice.open.notification()
# sleep here for transition to be finished
time.sleep(2)
def check_condition(self):
"""
:return: True if Notifications menu was opened, False otherwise
"""
self.step_data = self.uidevice(resourceId="com.android.systemui:id/notification_stack_scroller").wait.exists(
timeout=self.timeout)
# self.step_data = True
return self.step_data
class CloseNotificationsMenu(BtStep):
""" Description:
Closes the notifications menu
Usage:
bluetooth_steps.CloseNotificationsMenu(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.notifications_menu = self.uidevice(resourceId="com.android.systemui:id/notification_stack_scroller")
self.set_passm("Notifications menu closed")
self.set_errorm("Close notifications", "Notifications menu not gone")
def do(self):
try:
if not self.notifications_menu.exists:
raise Exception("Notifications menu is not already opened")
self.uidevice.press.back()
except Exception, e:
self.set_errorm("Close notifications", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Notifications menu was closed, False otherwise
"""
if self.step_data:
self.step_data = self.notifications_menu.wait.gone(timeout=self.timeout)
return self.step_data
class PerformActionPairRequest(BtStep):
""" Description:
Performs a click on the button with label exact text as defined by
action parameter and checks if the pair request window is gone. If
the action is 'Timeout', it only waits for pair request window to be
gone, the amount of time as defined by timeout parameter. Call this
only when Pair request window is already shown
Usage:
bluetooth_steps.PerformActionPairRequest(serial=serial,
action="Pair", version=version)()
"""
def __init__(self, action="Pair", **kwargs):
"""
:param action: "Pair"/"Cancel"/"Timeout" action to be performed
:param kwargs: serial, timeout, version, no_log, and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
if action not in ["Cancel", "Pair", "Timeout"]:
raise Exception("Config error: not any expected value for action")
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.action = action
else:
# N version
self.action = action.upper()
self.step_data = True
self.set_passm("Action " + str(self.action) + " successful")
self.set_errorm("Action " + str(self.action), "Pair request window not gone after action performed")
def do(self):
try:
# if action is not Timeout, perform click on the button
if self.action.upper() != "TIMEOUT":
action_button = self.uidevice(text=self.action)
if not action_button.wait.exists(timeout=self.timeout + 30000):
raise Exception("Button " + str(self.action) + " not found")
action_button.click()
if self.uidevice(text="YES").wait.exists(timeout=1000):
self.uidevice(text="YES").click()
except Exception, e:
self.set_errorm("Action " + str(self.action), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if pair request window is gone, False if not
"""
if self.step_data:
# check if the pair request window is gone
self.step_data = WaitPairRequest(serial=self.serial, appear=False, time_to_wait=self.timeout,
version=self.version, critical=False)()
return self.step_data
class CouldNotPairDialogCheck(BtStep):
""" Description:
Checks if the "Couldn't pair" dialog is displayed
(by waiting for it) and clicks on it's OK button.
Usage:
bluetooth_steps.CouldNotPairDialogCheck(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("Dialog appeared, canceled successful")
self.set_errorm("Could not pair dialog", "Not canceled successfully")
self.dialog_window = self.uidevice(resourceId="android:id/message", textContains="incorrect PIN or passkey")
def do(self):
try:
if self.device_info.dessert < "O":
# wait for dialog to appear
if not self.dialog_window.wait.exists(timeout=self.timeout + 30000):
raise Exception("Dialog not appeared")
# click on it's OK button
ok_button = self.uidevice(text="OK")
if not ok_button.wait.exists(timeout=self.timeout + 30000):
raise Exception("OK not found in the dialog")
ok_button.click()
else:
pass
# in O dialog box disappears automatically, we are not checking dialog box for greater than O dessert
except Exception, e:
self.set_errorm("Could not pair dialog", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Could not pair dialog is gone after press on OK, False otherwise
"""
if self.step_data:
# check if dialog is gone
self.step_data = self.dialog_window.wait.gone(timeout=self.timeout)
return self.step_data
class BtRemoveAllPairedDevices(BtStep):
""" Description:
All pair devices will be removed from the list. Call this in BT
devices list, with no scanning in progress
Usage:
bluetooth_steps.BtRemoveAllPairedDevices(serial = serial,
max_attempts=20, version=version)()
"""
def __init__(self, max_attempts=20, **kwargs):
"""
:param max_attempts: maximum no. of tries
:param kwargs: serial, version, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.max_attempts = max_attempts
self.paired_title = self.uidevice(text="Paired devices")
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
self.step_data = True
self.set_passm("Nothing to unpair")
self.set_errorm(str(max_attempts) + "attempts", "Not removed all paired devices")
def do(self):
try:
if self.version.startswith("5.") or self.version.startswith("6.") or self.version.startswith("7."):
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT devices list was not found")
# execute only if Paired devices title is found
if self.bt_list.scroll.to(text="Paired devices"):
counter = 1
# for each existing paired button, click on it and FORGET
while self.paired_title.exists:
if counter > self.max_attempts:
break
paired_button = self.uidevice(description="Device settings")
paired_button.click.wait()
time.sleep(1)
if not self.uidevice(resourceId="android:id/alertTitle").wait.exists(timeout=self.timeout):
raise Exception(
"Alert title not opened when removing " + " (device no. " + str(
counter) + ")")
if self.version.startswith("5."):
# LLP version
forget_button = self.uidevice(resourceId="android:id/button2", text="FORGET")
elif self.version.startswith("6.0"):
# M version
forget_button = self.uidevice(resourceId="android:id/button2", text="Forget")
# force a small delay due to window transition
time.sleep(1)
else:
# N version
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd(
"shell dumpsys input_method").communicate()[0].decode("utf-8"):
self.uidevice.press.back()
time.sleep(1)
forget_button = self.uidevice(text="FORGET")
if not forget_button.wait.exists(timeout=self.timeout):
raise Exception(
"Forget button not found when unpair " + " (device no. " + str(
counter) + ")")
forget_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception(
"Not returned to BT list after unpair " + " (device no. " + str(
counter) + ")")
counter += 1
self.set_passm(str(counter - 1) + " device(s) unpaired")
else:
counter = 1
# for each existing paired button, click on it and FORGET
while self.paired_title.exists and self.uidevice(description="Settings"):
if counter > self.max_attempts:
break
ui_steps.click_button_common(serial=self.serial,
view_to_find={"description": "Settings"},
view_to_check={"resourceId": "android:id/alertTitle"})()
time.sleep(1)
if not ui_steps.click_button_common(serial=self.serial,
view_to_find={"text": "FORGET"})():
raise Exception("Forget button not found when unpair " + " (device no. " + str(counter) + ")")
counter += 1
self.set_passm(str(counter - 1) + " device(s) unpaired")
except Exception, e:
self.set_errorm("Unpair devices", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if all paired devices were unpaired, False otherwise
"""
if self.step_data and self.device_info.dessert < "O":
# check if "Paired devices" title is gone
self.step_data = self.uidevice(text="Paired devices").wait.gone(timeout=self.timeout)
else:
self.step_data = self.uidevice(description="Settings").wait.gone(timeout=self.timeout)
return self.step_data
class OpenPairedDeviceSettings(BtStep):
""" Description:
Open the device settings alert title for a certain paired device.
Call this in BT settings list for a device a device already
paired
Usage:
bluetooth_steps.OpenPairedDeviceSettings(serial = serial,
device_name="DEV_name", version=version)()
"""
def __init__(self, device_name, **kwargs):
"""
:param device_name: name of device in the list for which Settings should be opened
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.device_name = device_name
self.step_data = True
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
self.set_passm("Device settings opened for device " + str(self.device_name))
self.set_errorm("Paired device settings for " + str(self.device_name), "Device settings not opened")
def do(self):
try:
if self.device_info.dessert < "O":
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("BT list not found")
if not self.bt_list.scroll.to(text=self.device_name):
raise Exception("Device " + str(self.device_name) + " not found in BT list")
# get linear layout corresponding to required device
device_layout = self.bt_list.child_by_text(self.device_name, allow_scroll_search=False,
className="android.widget.LinearLayout")
# get the device settings button corresponding to required device by searching the child of the linear
# layout
device_settings_button = device_layout.child(resourceId="com.android.settings:id/deviceDetails")
if not device_settings_button.wait.exists(timeout=self.timeout):
raise Exception("Device settings button not found")
# click on device settings
device_settings_button.click()
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
# do nothing, no workaround needed
pass
else:
# N version workaround
if not self.uidevice(resourceId="android:id/alertTitle", text="Paired devices").wait.exists(
timeout=self.timeout + 30000):
raise Exception("Device settings not opened")
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd("shell dumpsys input_method").communicate()[
0].decode("utf-8"):
self.uidevice.press.back()
time.sleep(1)
else:
paired_button = self.uidevice(description="Settings")
paired_button.click.wait()
time.sleep(1)
if not self.uidevice(resourceId="android:id/alertTitle").wait.exists(timeout=self.timeout):
raise Exception("Alert title not opened when removing")
# O version
# force a small delay due to window transition and close keyboard
time.sleep(1)
if "mInputShown=true" in self.adb_connection.cmd("shell dumpsys input_method").communicate()[0].decode(
"utf-8"):
self.uidevice.press.back()
except Exception, e:
self.set_errorm("Paired device settings for " + str(self.device_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Device settings was opened, False otherwise
"""
if self.step_data:
# check if Device settings window is opened
self.step_data = self.uidevice(resourceId="android:id/alertTitle", text="Paired devices").wait.exists(
timeout=self.timeout)
return self.step_data
class UnpairDevice(BtStep):
""" Description:
Unpair a certain device from the list.Call this in BT settings
list for a device a device already paired
Usage:
bluetooth_steps.UnpairDevice(serial = serial,
device_name="DEV_name", version=version)()
"""
def __init__(self, device_name, **kwargs):
"""
:param device_name: name of device from the list to be unpaired
:param kwargs: serial, timeout, version, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.device_name = device_name
self.step_data = True
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.bt_list = self.uidevice(resourceId="android:id/list")
else:
# N version
self.bt_list = self.uidevice(resourceId="com.android.settings:id/list")
self.set_passm("Device " + str(self.device_name) + " unpaired")
self.set_errorm("Unpair device " + str(self.device_name), "Device is still paired")
def do(self):
try:
if not OpenPairedDeviceSettings(serial=self.serial, device_name=self.device_name, timeout=self.timeout,
version=self.version, critical=False)():
raise Exception("Open paired device settings failed")
if not self.uidevice(text=self.device_name):
raise Exception("Name of the device not found in the unpair alert window")
# click on forget
if self.version.startswith("5."):
# LLP version
forget_button = self.uidevice(resourceId="android:id/button2", text="FORGET")
elif self.version.startswith("6.0"):
# M version
forget_button = self.uidevice(resourceId="android:id/button2", text="Forget")
# force a small delay due to window transition
time.sleep(1)
else:
# N version
forget_button = self.uidevice(text="FORGET")
if not forget_button.wait.exists(timeout=self.timeout):
raise Exception("Forget button not found when unpairing device")
forget_button.click()
if not self.bt_list.wait.exists(timeout=self.timeout):
raise Exception("Not returned to BT list after unpairing device")
except Exception, e:
self.set_errorm("Unpair device " + str(self.device_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Device was unpaired, False if not
"""
if self.step_data:
# check if is not paired with required device
self.step_data = CheckIfPaired(serial=self.serial, dev_paired_with=self.device_name, paired=False,
timeout=self.timeout, version=self.version, critical=False)()
return self.step_data
class DisconnectDevice(BtStep):
""" Description:
disconnect a certain device from the list and still it will be paired
Usage:
bluetooth_steps.DisconnectDevice(serial = serial,
device_name="DEV_name", version=version)()
"""
def __init__(self, device_name, **kwargs):
"""
:param device_name: name of device from the list to be disconnected
:param kwargs: serial, timeout, version, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.device_name = device_name
self.step_data = True
self.set_passm("Device " + str(self.device_name) + " Disconnected and still will be paired")
self.set_errorm("DisconnectDevice" + str(self.device_name), "Device is still connected")
def do(self):
try:
if ui_steps.click_button_common(serial=self.serial, view_to_find={"textContains": self.device_name},
view_to_check={"resourceId": "android:id/alertTitle"})():
if not ui_steps.click_button_common(serial=self.serial, view_to_find={"textContains": "ok"})():
self.step_data = False
else:
self.step_data = False
except Exception, e:
self.set_errorm("Disconnect device " + str(self.device_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Device was paired, False if not
"""
if self.step_data:
# check if is not paired with required device
self.step_data = CheckIfPaired(serial=self.serial, dev_paired_with=self.device_name, paired=True,
timeout=self.timeout, version=self.version, critical=False)()
return self.step_data
class BtCheckNotificationAppear(BtStep):
""" Description:
Checks if a Bluetooth notification appeared (searching for a
textContains selector). You have two options: click on
notification (and validates that the notification menu is gone),
or only check if appeared. Call this with notification menu
already opened
Usage:
bluetooth_steps.BtCheckNotificationAppear(serial=serial,
text_contains="text_contained_into_notification_title",
click_on_notification=False, time_to_appear=60000)()
"""
def __init__(self, text_contains, click_on_notification=False, time_to_appear=60000, **kwargs):
"""
:param text_contains: text contained in the notification to check
:param click_on_notification: True-click on notification. False-only check
:param time_to_appear: max time to wait till notification appears
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.text_contains = text_contains
self.click_on_notification = click_on_notification
self.time_to_appear = time_to_appear
self.step_data = True
self.notification = self.uidevice(resourceId="android:id/notification_main_column").child(
textContains=self.text_contains)
if self.click_on_notification:
self.set_passm("Notification '" + str(
self.text_contains) + "' found, clicked on it successful")
else:
self.set_passm("Notification '" + str(self.text_contains) + "' found")
def do(self):
try:
# check if notification appeared
if not self.notification.wait.exists(timeout=self.time_to_appear):
raise Exception("Notification not appeared")
# click on the notification if required and validate that the notifications menu is gone
if self.click_on_notification:
self.notification.click()
if not self.uidevice(resourceId="com.android.systemui:id/notification_stack_scroller").wait.gone(
timeout=self.timeout):
raise Exception("Notification menu not gone after click")
except Exception, e:
self.set_errorm("Notification " + str(self.text_contains), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if notification was found (and was clicked if requested), False if not
"""
return self.step_data
class BtCheckNotificationGone(BtStep):
""" Description:
Waits for a Bluetooth notification to be gone (searching for a
textContains selector). Call this with notification menu
already opened, with the required notification already displayed
Usage:
bluetooth_steps.BtCheckNotificationGone(serial=serial,
text_contains="text_contained_into_notification_title",
time_to_wait=60000)()
"""
def __init__(self, text_contains, time_to_wait=60000, **kwargs):
"""
:param text_contains: text contained in the desired notification
:param time_to_wait: max time to wait till notification is gone
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.text_contains = text_contains
self.time_to_wait = time_to_wait
self.step_data = True
self.notification = self.uidevice(resourceId="android:id/notification_main_column").child(
textContains=self.text_contains)
self.set_passm("Notification '" + str(self.text_contains) + "' gone")
def do(self):
try:
# wait for the notification to be gone
if not self.notification.wait.gone(timeout=self.time_to_wait):
raise Exception("Notification not gone after " + str(self.time_to_wait))
except Exception, e:
self.set_errorm("Notification " + str(self.text_contains), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if notification was gone, False if not
"""
return self.step_data
class PressHome(BtStep):
""" Description:
Press the home button as a setup for tests.
Usage:
bluetooth_steps.PressHome(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("Home pressed")
def do(self):
try:
self.uidevice.press.home()
time.sleep(1)
except Exception, e:
self.step_data = False
self.set_errorm("Press home exception", e.message)
def check_condition(self):
"""
:return: True if home pressed, False if not.
"""
return self.step_data
class StopPackage(BtStep):
""" Description:
Executes command 'adb shell am force-stop [package_name]'. By default,
it stops the Settings app, but you can also clear other apps by passing
their package name to package_name parameter. This does not check
anything, to be used for setup/teardown of tests
Usage:
bluetooth_steps.StopPackage(serial=serial,
package_name="com.android.settings")()
"""
def __init__(self, package_name="com.android.settings", **kwargs):
"""
:param package_name: package name of the app to be stopped
:param kwargs: serial and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.package_name = package_name
def do(self):
try:
self.adb_connection.cmd("shell am force-stop " + str(self.package_name)).wait()
except Exception, e:
info_message = "Exception encountered when stop " + str(self.package_name) + ": " + e.message
if self.serial:
info_message = "[ " + str(self.serial) + " ] " + info_message
self.logger.info(info_message)
def check(self):
# prevent test step to display info, not relevant for BT tests
pass
class LogInfo(base_step):
""" Description:
Logs an info message
Usage:
bluetooth_steps.LogInfo(message=<your_message>)()
"""
def __init__(self, info_message):
"""
:param info_message: info message to be logged
"""
base_step.__init__(self)
self._info_message = info_message
def do(self):
self.logger.info(self._info_message)
def check(self):
# prevent test step to display info, not relevant for BT tests
pass
class ConnectPairedDevices(BtStep):
""" Description:
Do not use in BT tests!
Connects device with the already paired <dev_to_connect_name>
Usage:
bluetooth_steps.ConnectPairedDevices(dev_to_connect_name=<device name>)()
Tags:
ui, android, bluetooth
"""
def __init__(self, dev_to_connect_name, **kwargs):
BtStep.__init__(self, **kwargs)
self.dev_to_connect_name = dev_to_connect_name
self.connected = True
self.set_passm("Connected to device " + str(dev_to_connect_name))
def do(self):
try:
ui_steps.click_button_if_exists(serial=self.serial,
view_to_find={"text":
self.dev_to_connect_name})()
except Exception, e:
self.connected = False
self.set_errorm("Connect to device " +
str(self.dev_to_connect_name), e.message)
def check_condition(self):
if ui_utils.is_text_visible(text_to_find=self.dev_to_connect_name,
serial=self.serial):
self.connected = True
else:
self.connected = False
return self.connected
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import dataflow_fft4
expected_verilog = """
module test
(
);
reg CLK;
reg RST;
reg signed [16-1:0] din0re;
reg signed [16-1:0] din0im;
reg signed [16-1:0] din1re;
reg signed [16-1:0] din1im;
reg signed [16-1:0] din2re;
reg signed [16-1:0] din2im;
reg signed [16-1:0] din3re;
reg signed [16-1:0] din3im;
wire signed [16-1:0] dout3re;
wire signed [16-1:0] dout3im;
wire signed [16-1:0] dout0re;
wire signed [16-1:0] dout0im;
wire signed [16-1:0] dout2re;
wire signed [16-1:0] dout2im;
wire signed [16-1:0] dout1re;
wire signed [16-1:0] dout1im;
wire signed [8-1:0] _din0re;
wire signed [8-1:0] _din0im;
wire signed [8-1:0] _din1re;
wire signed [8-1:0] _din1im;
wire signed [8-1:0] _din2re;
wire signed [8-1:0] _din2im;
wire signed [8-1:0] _din3re;
wire signed [8-1:0] _din3im;
wire signed [8-1:0] _dout0re;
wire signed [8-1:0] _dout0im;
wire signed [8-1:0] _dout1re;
wire signed [8-1:0] _dout1im;
wire signed [8-1:0] _dout2re;
wire signed [8-1:0] _dout2im;
wire signed [8-1:0] _dout3re;
wire signed [8-1:0] _dout3im;
assign _din0re = din0re >>> 8;
assign _din0im = din0im >>> 8;
assign _din1re = din1re >>> 8;
assign _din1im = din1im >>> 8;
assign _din2re = din2re >>> 8;
assign _din2im = din2im >>> 8;
assign _din3re = din3re >>> 8;
assign _din3im = din3im >>> 8;
assign _dout0re = dout0re >>> 8;
assign _dout0im = dout0im >>> 8;
assign _dout1re = dout1re >>> 8;
assign _dout1im = dout1im >>> 8;
assign _dout2re = dout2re >>> 8;
assign _dout2im = dout2im >>> 8;
assign _dout3re = dout3re >>> 8;
assign _dout3im = dout3im >>> 8;
fft4
uut
(
.CLK(CLK),
.RST(RST),
.din0re(din0re),
.din0im(din0im),
.din1re(din1re),
.din1im(din1im),
.din2re(din2re),
.din2im(din2im),
.din3re(din3re),
.din3im(din3im),
.dout3re(dout3re),
.dout3im(dout3im),
.dout0re(dout0re),
.dout0im(dout0im),
.dout2re(dout2re),
.dout2im(dout2im),
.dout1re(dout1re),
.dout1im(dout1im)
);
reg reset_done;
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut, _din0re, _din0im, _din1re, _din1im, _din2re, _din2im, _din3re, _din3im, _dout0re, _dout0im, _dout1re, _dout1im, _dout2re, _dout2im, _dout3re, _dout3im);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
reset_done = 0;
din0re = 0;
din0im = 0;
din1re = 256;
din1im = 256;
din2re = 512;
din2im = 512;
din3re = 768;
din3im = 768;
#100;
RST = 1;
#100;
RST = 0;
#1000;
reset_done = 1;
@(posedge CLK);
#1;
#10000;
$finish;
end
reg [32-1:0] send_fsm;
localparam send_fsm_init = 0;
localparam send_fsm_1 = 1;
localparam send_fsm_2 = 2;
localparam send_fsm_3 = 3;
localparam send_fsm_4 = 4;
localparam send_fsm_5 = 5;
localparam send_fsm_6 = 6;
localparam send_fsm_7 = 7;
localparam send_fsm_8 = 8;
localparam send_fsm_9 = 9;
localparam send_fsm_10 = 10;
localparam send_fsm_11 = 11;
localparam send_fsm_12 = 12;
localparam send_fsm_13 = 13;
localparam send_fsm_14 = 14;
localparam send_fsm_15 = 15;
localparam send_fsm_16 = 16;
localparam send_fsm_17 = 17;
localparam send_fsm_18 = 18;
localparam send_fsm_19 = 19;
localparam send_fsm_20 = 20;
localparam send_fsm_21 = 21;
localparam send_fsm_22 = 22;
localparam send_fsm_23 = 23;
localparam send_fsm_24 = 24;
localparam send_fsm_25 = 25;
localparam send_fsm_26 = 26;
localparam send_fsm_27 = 27;
localparam send_fsm_28 = 28;
localparam send_fsm_29 = 29;
localparam send_fsm_30 = 30;
localparam send_fsm_31 = 31;
localparam send_fsm_32 = 32;
localparam send_fsm_33 = 33;
localparam send_fsm_34 = 34;
localparam send_fsm_35 = 35;
localparam send_fsm_36 = 36;
localparam send_fsm_37 = 37;
localparam send_fsm_38 = 38;
localparam send_fsm_39 = 39;
localparam send_fsm_40 = 40;
localparam send_fsm_41 = 41;
localparam send_fsm_42 = 42;
localparam send_fsm_43 = 43;
localparam send_fsm_44 = 44;
localparam send_fsm_45 = 45;
localparam send_fsm_46 = 46;
localparam send_fsm_47 = 47;
localparam send_fsm_48 = 48;
localparam send_fsm_49 = 49;
localparam send_fsm_50 = 50;
localparam send_fsm_51 = 51;
localparam send_fsm_52 = 52;
localparam send_fsm_53 = 53;
localparam send_fsm_54 = 54;
localparam send_fsm_55 = 55;
localparam send_fsm_56 = 56;
localparam send_fsm_57 = 57;
localparam send_fsm_58 = 58;
localparam send_fsm_59 = 59;
localparam send_fsm_60 = 60;
localparam send_fsm_61 = 61;
localparam send_fsm_62 = 62;
localparam send_fsm_63 = 63;
localparam send_fsm_64 = 64;
localparam send_fsm_65 = 65;
localparam send_fsm_66 = 66;
localparam send_fsm_67 = 67;
localparam send_fsm_68 = 68;
localparam send_fsm_69 = 69;
localparam send_fsm_70 = 70;
localparam send_fsm_71 = 71;
localparam send_fsm_72 = 72;
localparam send_fsm_73 = 73;
localparam send_fsm_74 = 74;
localparam send_fsm_75 = 75;
localparam send_fsm_76 = 76;
localparam send_fsm_77 = 77;
localparam send_fsm_78 = 78;
localparam send_fsm_79 = 79;
localparam send_fsm_80 = 80;
localparam send_fsm_81 = 81;
localparam send_fsm_82 = 82;
localparam send_fsm_83 = 83;
localparam send_fsm_84 = 84;
localparam send_fsm_85 = 85;
localparam send_fsm_86 = 86;
localparam send_fsm_87 = 87;
localparam send_fsm_88 = 88;
localparam send_fsm_89 = 89;
localparam send_fsm_90 = 90;
localparam send_fsm_91 = 91;
localparam send_fsm_92 = 92;
localparam send_fsm_93 = 93;
localparam send_fsm_94 = 94;
localparam send_fsm_95 = 95;
localparam send_fsm_96 = 96;
localparam send_fsm_97 = 97;
localparam send_fsm_98 = 98;
localparam send_fsm_99 = 99;
localparam send_fsm_100 = 100;
localparam send_fsm_101 = 101;
localparam send_fsm_102 = 102;
localparam send_fsm_103 = 103;
always @(posedge CLK) begin
if(RST) begin
send_fsm <= send_fsm_init;
end else begin
case(send_fsm)
send_fsm_init: begin
if(reset_done) begin
send_fsm <= send_fsm_1;
end
end
send_fsm_1: begin
din0re <= 0;
din0im <= 0;
din1re <= 256;
din1im <= 256;
din2re <= 512;
din2im <= 512;
din3re <= 768;
din3im <= 768;
send_fsm <= send_fsm_2;
end
send_fsm_2: begin
din0re <= 0;
din0im <= 0;
din1re <= 0;
din1im <= 0;
din2re <= 0;
din2im <= 0;
din3re <= 0;
din3im <= 0;
send_fsm <= send_fsm_3;
end
send_fsm_3: begin
send_fsm <= send_fsm_4;
end
send_fsm_4: begin
send_fsm <= send_fsm_5;
end
send_fsm_5: begin
send_fsm <= send_fsm_6;
end
send_fsm_6: begin
send_fsm <= send_fsm_7;
end
send_fsm_7: begin
send_fsm <= send_fsm_8;
end
send_fsm_8: begin
send_fsm <= send_fsm_9;
end
send_fsm_9: begin
send_fsm <= send_fsm_10;
end
send_fsm_10: begin
send_fsm <= send_fsm_11;
end
send_fsm_11: begin
send_fsm <= send_fsm_12;
end
send_fsm_12: begin
send_fsm <= send_fsm_13;
end
send_fsm_13: begin
send_fsm <= send_fsm_14;
end
send_fsm_14: begin
send_fsm <= send_fsm_15;
end
send_fsm_15: begin
send_fsm <= send_fsm_16;
end
send_fsm_16: begin
send_fsm <= send_fsm_17;
end
send_fsm_17: begin
send_fsm <= send_fsm_18;
end
send_fsm_18: begin
send_fsm <= send_fsm_19;
end
send_fsm_19: begin
send_fsm <= send_fsm_20;
end
send_fsm_20: begin
send_fsm <= send_fsm_21;
end
send_fsm_21: begin
send_fsm <= send_fsm_22;
end
send_fsm_22: begin
send_fsm <= send_fsm_23;
end
send_fsm_23: begin
send_fsm <= send_fsm_24;
end
send_fsm_24: begin
send_fsm <= send_fsm_25;
end
send_fsm_25: begin
send_fsm <= send_fsm_26;
end
send_fsm_26: begin
send_fsm <= send_fsm_27;
end
send_fsm_27: begin
send_fsm <= send_fsm_28;
end
send_fsm_28: begin
send_fsm <= send_fsm_29;
end
send_fsm_29: begin
send_fsm <= send_fsm_30;
end
send_fsm_30: begin
send_fsm <= send_fsm_31;
end
send_fsm_31: begin
send_fsm <= send_fsm_32;
end
send_fsm_32: begin
send_fsm <= send_fsm_33;
end
send_fsm_33: begin
send_fsm <= send_fsm_34;
end
send_fsm_34: begin
send_fsm <= send_fsm_35;
end
send_fsm_35: begin
send_fsm <= send_fsm_36;
end
send_fsm_36: begin
send_fsm <= send_fsm_37;
end
send_fsm_37: begin
send_fsm <= send_fsm_38;
end
send_fsm_38: begin
send_fsm <= send_fsm_39;
end
send_fsm_39: begin
send_fsm <= send_fsm_40;
end
send_fsm_40: begin
send_fsm <= send_fsm_41;
end
send_fsm_41: begin
send_fsm <= send_fsm_42;
end
send_fsm_42: begin
send_fsm <= send_fsm_43;
end
send_fsm_43: begin
send_fsm <= send_fsm_44;
end
send_fsm_44: begin
send_fsm <= send_fsm_45;
end
send_fsm_45: begin
send_fsm <= send_fsm_46;
end
send_fsm_46: begin
send_fsm <= send_fsm_47;
end
send_fsm_47: begin
send_fsm <= send_fsm_48;
end
send_fsm_48: begin
send_fsm <= send_fsm_49;
end
send_fsm_49: begin
send_fsm <= send_fsm_50;
end
send_fsm_50: begin
send_fsm <= send_fsm_51;
end
send_fsm_51: begin
send_fsm <= send_fsm_52;
end
send_fsm_52: begin
send_fsm <= send_fsm_53;
end
send_fsm_53: begin
send_fsm <= send_fsm_54;
end
send_fsm_54: begin
send_fsm <= send_fsm_55;
end
send_fsm_55: begin
send_fsm <= send_fsm_56;
end
send_fsm_56: begin
send_fsm <= send_fsm_57;
end
send_fsm_57: begin
send_fsm <= send_fsm_58;
end
send_fsm_58: begin
send_fsm <= send_fsm_59;
end
send_fsm_59: begin
send_fsm <= send_fsm_60;
end
send_fsm_60: begin
send_fsm <= send_fsm_61;
end
send_fsm_61: begin
send_fsm <= send_fsm_62;
end
send_fsm_62: begin
send_fsm <= send_fsm_63;
end
send_fsm_63: begin
send_fsm <= send_fsm_64;
end
send_fsm_64: begin
send_fsm <= send_fsm_65;
end
send_fsm_65: begin
send_fsm <= send_fsm_66;
end
send_fsm_66: begin
send_fsm <= send_fsm_67;
end
send_fsm_67: begin
send_fsm <= send_fsm_68;
end
send_fsm_68: begin
send_fsm <= send_fsm_69;
end
send_fsm_69: begin
send_fsm <= send_fsm_70;
end
send_fsm_70: begin
send_fsm <= send_fsm_71;
end
send_fsm_71: begin
send_fsm <= send_fsm_72;
end
send_fsm_72: begin
send_fsm <= send_fsm_73;
end
send_fsm_73: begin
send_fsm <= send_fsm_74;
end
send_fsm_74: begin
send_fsm <= send_fsm_75;
end
send_fsm_75: begin
send_fsm <= send_fsm_76;
end
send_fsm_76: begin
send_fsm <= send_fsm_77;
end
send_fsm_77: begin
send_fsm <= send_fsm_78;
end
send_fsm_78: begin
send_fsm <= send_fsm_79;
end
send_fsm_79: begin
send_fsm <= send_fsm_80;
end
send_fsm_80: begin
send_fsm <= send_fsm_81;
end
send_fsm_81: begin
send_fsm <= send_fsm_82;
end
send_fsm_82: begin
send_fsm <= send_fsm_83;
end
send_fsm_83: begin
send_fsm <= send_fsm_84;
end
send_fsm_84: begin
send_fsm <= send_fsm_85;
end
send_fsm_85: begin
send_fsm <= send_fsm_86;
end
send_fsm_86: begin
send_fsm <= send_fsm_87;
end
send_fsm_87: begin
send_fsm <= send_fsm_88;
end
send_fsm_88: begin
send_fsm <= send_fsm_89;
end
send_fsm_89: begin
send_fsm <= send_fsm_90;
end
send_fsm_90: begin
send_fsm <= send_fsm_91;
end
send_fsm_91: begin
send_fsm <= send_fsm_92;
end
send_fsm_92: begin
send_fsm <= send_fsm_93;
end
send_fsm_93: begin
send_fsm <= send_fsm_94;
end
send_fsm_94: begin
send_fsm <= send_fsm_95;
end
send_fsm_95: begin
send_fsm <= send_fsm_96;
end
send_fsm_96: begin
send_fsm <= send_fsm_97;
end
send_fsm_97: begin
send_fsm <= send_fsm_98;
end
send_fsm_98: begin
send_fsm <= send_fsm_99;
end
send_fsm_99: begin
send_fsm <= send_fsm_100;
end
send_fsm_100: begin
send_fsm <= send_fsm_101;
end
send_fsm_101: begin
send_fsm <= send_fsm_102;
end
send_fsm_102: begin
send_fsm <= send_fsm_103;
end
send_fsm_103: begin
$finish;
end
endcase
end
end
endmodule
module fft4
(
input CLK,
input RST,
input signed [16-1:0] din0re,
input signed [16-1:0] din0im,
input signed [16-1:0] din1re,
input signed [16-1:0] din1im,
input signed [16-1:0] din2re,
input signed [16-1:0] din2im,
input signed [16-1:0] din3re,
input signed [16-1:0] din3im,
output signed [16-1:0] dout3re,
output signed [16-1:0] dout3im,
output signed [16-1:0] dout0re,
output signed [16-1:0] dout0im,
output signed [16-1:0] dout2re,
output signed [16-1:0] dout2im,
output signed [16-1:0] dout1re,
output signed [16-1:0] dout1im
);
reg signed [16-1:0] _dataflow_plus_data_8;
reg _dataflow_plus_valid_8;
wire _dataflow_plus_ready_8;
reg signed [16-1:0] _dataflow_plus_data_9;
reg _dataflow_plus_valid_9;
wire _dataflow_plus_ready_9;
reg signed [16-1:0] _dataflow_minus_data_10;
reg _dataflow_minus_valid_10;
wire _dataflow_minus_ready_10;
reg signed [16-1:0] _dataflow_minus_data_11;
reg _dataflow_minus_valid_11;
wire _dataflow_minus_ready_11;
reg signed [16-1:0] _dataflow_plus_data_22;
reg _dataflow_plus_valid_22;
wire _dataflow_plus_ready_22;
reg signed [16-1:0] _dataflow_plus_data_23;
reg _dataflow_plus_valid_23;
wire _dataflow_plus_ready_23;
reg signed [16-1:0] _dataflow_minus_data_24;
reg _dataflow_minus_valid_24;
wire _dataflow_minus_ready_24;
reg signed [16-1:0] _dataflow_minus_data_25;
reg _dataflow_minus_valid_25;
wire _dataflow_minus_ready_25;
wire signed [16-1:0] _dataflow_times_data_12;
wire _dataflow_times_valid_12;
wire _dataflow_times_ready_12;
wire signed [18-1:0] _dataflow_times_mul_odata_12;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_12;
assign _dataflow_times_data_12 = _dataflow_times_mul_odata_reg_12;
wire _dataflow_times_mul_ovalid_12;
reg _dataflow_times_mul_valid_reg_12;
assign _dataflow_times_valid_12 = _dataflow_times_mul_valid_reg_12;
wire _dataflow_times_mul_enable_12;
wire _dataflow_times_mul_update_12;
assign _dataflow_times_mul_enable_12 = (_dataflow_times_ready_12 || !_dataflow_times_valid_12) && _dataflow_minus_ready_10 && _dataflow_minus_valid_10;
assign _dataflow_times_mul_update_12 = _dataflow_times_ready_12 || !_dataflow_times_valid_12;
multiplier_0
_dataflow_times_mul_12
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_12),
.enable(_dataflow_times_mul_enable_12),
.valid(_dataflow_times_mul_ovalid_12),
.a(_dataflow_minus_data_10),
.b(2'sd1),
.c(_dataflow_times_mul_odata_12)
);
wire signed [16-1:0] _dataflow_times_data_14;
wire _dataflow_times_valid_14;
wire _dataflow_times_ready_14;
wire signed [17-1:0] _dataflow_times_mul_odata_14;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_14;
assign _dataflow_times_data_14 = _dataflow_times_mul_odata_reg_14;
wire _dataflow_times_mul_ovalid_14;
reg _dataflow_times_mul_valid_reg_14;
assign _dataflow_times_valid_14 = _dataflow_times_mul_valid_reg_14;
wire _dataflow_times_mul_enable_14;
wire _dataflow_times_mul_update_14;
assign _dataflow_times_mul_enable_14 = (_dataflow_times_ready_14 || !_dataflow_times_valid_14) && _dataflow_minus_ready_11 && _dataflow_minus_valid_11;
assign _dataflow_times_mul_update_14 = _dataflow_times_ready_14 || !_dataflow_times_valid_14;
multiplier_1
_dataflow_times_mul_14
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_14),
.enable(_dataflow_times_mul_enable_14),
.valid(_dataflow_times_mul_ovalid_14),
.a(_dataflow_minus_data_11),
.b(1'sd0),
.c(_dataflow_times_mul_odata_14)
);
wire signed [16-1:0] _dataflow_times_data_16;
wire _dataflow_times_valid_16;
wire _dataflow_times_ready_16;
wire signed [17-1:0] _dataflow_times_mul_odata_16;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_16;
assign _dataflow_times_data_16 = _dataflow_times_mul_odata_reg_16;
wire _dataflow_times_mul_ovalid_16;
reg _dataflow_times_mul_valid_reg_16;
assign _dataflow_times_valid_16 = _dataflow_times_mul_valid_reg_16;
wire _dataflow_times_mul_enable_16;
wire _dataflow_times_mul_update_16;
assign _dataflow_times_mul_enable_16 = (_dataflow_times_ready_16 || !_dataflow_times_valid_16) && _dataflow_minus_ready_10 && _dataflow_minus_valid_10;
assign _dataflow_times_mul_update_16 = _dataflow_times_ready_16 || !_dataflow_times_valid_16;
multiplier_2
_dataflow_times_mul_16
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_16),
.enable(_dataflow_times_mul_enable_16),
.valid(_dataflow_times_mul_ovalid_16),
.a(_dataflow_minus_data_10),
.b(1'sd0),
.c(_dataflow_times_mul_odata_16)
);
assign _dataflow_minus_ready_10 = (_dataflow_times_ready_12 || !_dataflow_times_valid_12) && _dataflow_minus_valid_10 && ((_dataflow_times_ready_16 || !_dataflow_times_valid_16) && _dataflow_minus_valid_10);
wire signed [16-1:0] _dataflow_times_data_18;
wire _dataflow_times_valid_18;
wire _dataflow_times_ready_18;
wire signed [18-1:0] _dataflow_times_mul_odata_18;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_18;
assign _dataflow_times_data_18 = _dataflow_times_mul_odata_reg_18;
wire _dataflow_times_mul_ovalid_18;
reg _dataflow_times_mul_valid_reg_18;
assign _dataflow_times_valid_18 = _dataflow_times_mul_valid_reg_18;
wire _dataflow_times_mul_enable_18;
wire _dataflow_times_mul_update_18;
assign _dataflow_times_mul_enable_18 = (_dataflow_times_ready_18 || !_dataflow_times_valid_18) && _dataflow_minus_ready_11 && _dataflow_minus_valid_11;
assign _dataflow_times_mul_update_18 = _dataflow_times_ready_18 || !_dataflow_times_valid_18;
multiplier_3
_dataflow_times_mul_18
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_18),
.enable(_dataflow_times_mul_enable_18),
.valid(_dataflow_times_mul_ovalid_18),
.a(_dataflow_minus_data_11),
.b(2'sd1),
.c(_dataflow_times_mul_odata_18)
);
assign _dataflow_minus_ready_11 = (_dataflow_times_ready_14 || !_dataflow_times_valid_14) && _dataflow_minus_valid_11 && ((_dataflow_times_ready_18 || !_dataflow_times_valid_18) && _dataflow_minus_valid_11);
wire signed [16-1:0] _dataflow_times_data_26;
wire _dataflow_times_valid_26;
wire _dataflow_times_ready_26;
wire signed [17-1:0] _dataflow_times_mul_odata_26;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_26;
assign _dataflow_times_data_26 = _dataflow_times_mul_odata_reg_26;
wire _dataflow_times_mul_ovalid_26;
reg _dataflow_times_mul_valid_reg_26;
assign _dataflow_times_valid_26 = _dataflow_times_mul_valid_reg_26;
wire _dataflow_times_mul_enable_26;
wire _dataflow_times_mul_update_26;
assign _dataflow_times_mul_enable_26 = (_dataflow_times_ready_26 || !_dataflow_times_valid_26) && _dataflow_minus_ready_24 && _dataflow_minus_valid_24;
assign _dataflow_times_mul_update_26 = _dataflow_times_ready_26 || !_dataflow_times_valid_26;
multiplier_4
_dataflow_times_mul_26
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_26),
.enable(_dataflow_times_mul_enable_26),
.valid(_dataflow_times_mul_ovalid_26),
.a(_dataflow_minus_data_24),
.b(1'sd0),
.c(_dataflow_times_mul_odata_26)
);
wire signed [16-1:0] _dataflow_times_data_28;
wire _dataflow_times_valid_28;
wire _dataflow_times_ready_28;
wire signed [18-1:0] _dataflow_times_mul_odata_28;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_28;
assign _dataflow_times_data_28 = _dataflow_times_mul_odata_reg_28;
wire _dataflow_times_mul_ovalid_28;
reg _dataflow_times_mul_valid_reg_28;
assign _dataflow_times_valid_28 = _dataflow_times_mul_valid_reg_28;
wire _dataflow_times_mul_enable_28;
wire _dataflow_times_mul_update_28;
assign _dataflow_times_mul_enable_28 = (_dataflow_times_ready_28 || !_dataflow_times_valid_28) && _dataflow_minus_ready_25 && _dataflow_minus_valid_25;
assign _dataflow_times_mul_update_28 = _dataflow_times_ready_28 || !_dataflow_times_valid_28;
multiplier_5
_dataflow_times_mul_28
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_28),
.enable(_dataflow_times_mul_enable_28),
.valid(_dataflow_times_mul_ovalid_28),
.a(_dataflow_minus_data_25),
.b(-2'sd1),
.c(_dataflow_times_mul_odata_28)
);
wire signed [16-1:0] _dataflow_times_data_30;
wire _dataflow_times_valid_30;
wire _dataflow_times_ready_30;
wire signed [18-1:0] _dataflow_times_mul_odata_30;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_30;
assign _dataflow_times_data_30 = _dataflow_times_mul_odata_reg_30;
wire _dataflow_times_mul_ovalid_30;
reg _dataflow_times_mul_valid_reg_30;
assign _dataflow_times_valid_30 = _dataflow_times_mul_valid_reg_30;
wire _dataflow_times_mul_enable_30;
wire _dataflow_times_mul_update_30;
assign _dataflow_times_mul_enable_30 = (_dataflow_times_ready_30 || !_dataflow_times_valid_30) && _dataflow_minus_ready_24 && _dataflow_minus_valid_24;
assign _dataflow_times_mul_update_30 = _dataflow_times_ready_30 || !_dataflow_times_valid_30;
multiplier_6
_dataflow_times_mul_30
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_30),
.enable(_dataflow_times_mul_enable_30),
.valid(_dataflow_times_mul_ovalid_30),
.a(_dataflow_minus_data_24),
.b(-2'sd1),
.c(_dataflow_times_mul_odata_30)
);
assign _dataflow_minus_ready_24 = (_dataflow_times_ready_26 || !_dataflow_times_valid_26) && _dataflow_minus_valid_24 && ((_dataflow_times_ready_30 || !_dataflow_times_valid_30) && _dataflow_minus_valid_24);
wire signed [16-1:0] _dataflow_times_data_32;
wire _dataflow_times_valid_32;
wire _dataflow_times_ready_32;
wire signed [17-1:0] _dataflow_times_mul_odata_32;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_32;
assign _dataflow_times_data_32 = _dataflow_times_mul_odata_reg_32;
wire _dataflow_times_mul_ovalid_32;
reg _dataflow_times_mul_valid_reg_32;
assign _dataflow_times_valid_32 = _dataflow_times_mul_valid_reg_32;
wire _dataflow_times_mul_enable_32;
wire _dataflow_times_mul_update_32;
assign _dataflow_times_mul_enable_32 = (_dataflow_times_ready_32 || !_dataflow_times_valid_32) && _dataflow_minus_ready_25 && _dataflow_minus_valid_25;
assign _dataflow_times_mul_update_32 = _dataflow_times_ready_32 || !_dataflow_times_valid_32;
multiplier_7
_dataflow_times_mul_32
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_32),
.enable(_dataflow_times_mul_enable_32),
.valid(_dataflow_times_mul_ovalid_32),
.a(_dataflow_minus_data_25),
.b(1'sd0),
.c(_dataflow_times_mul_odata_32)
);
assign _dataflow_minus_ready_25 = (_dataflow_times_ready_28 || !_dataflow_times_valid_28) && _dataflow_minus_valid_25 && ((_dataflow_times_ready_32 || !_dataflow_times_valid_32) && _dataflow_minus_valid_25);
reg signed [16-1:0] _dataflow_plus_data_36;
reg _dataflow_plus_valid_36;
wire _dataflow_plus_ready_36;
reg signed [16-1:0] _dataflow_plus_data_37;
reg _dataflow_plus_valid_37;
wire _dataflow_plus_ready_37;
reg signed [16-1:0] _dataflow_minus_data_38;
reg _dataflow_minus_valid_38;
wire _dataflow_minus_ready_38;
assign _dataflow_plus_ready_8 = (_dataflow_plus_ready_36 || !_dataflow_plus_valid_36) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22) && ((_dataflow_minus_ready_38 || !_dataflow_minus_valid_38) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22));
assign _dataflow_plus_ready_22 = (_dataflow_plus_ready_36 || !_dataflow_plus_valid_36) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22) && ((_dataflow_minus_ready_38 || !_dataflow_minus_valid_38) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22));
reg signed [16-1:0] _dataflow_minus_data_39;
reg _dataflow_minus_valid_39;
wire _dataflow_minus_ready_39;
assign _dataflow_plus_ready_9 = (_dataflow_plus_ready_37 || !_dataflow_plus_valid_37) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23) && ((_dataflow_minus_ready_39 || !_dataflow_minus_valid_39) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23));
assign _dataflow_plus_ready_23 = (_dataflow_plus_ready_37 || !_dataflow_plus_valid_37) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23) && ((_dataflow_minus_ready_39 || !_dataflow_minus_valid_39) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23));
wire signed [16-1:0] _dataflow_times_data_40;
wire _dataflow_times_valid_40;
wire _dataflow_times_ready_40;
wire signed [18-1:0] _dataflow_times_mul_odata_40;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_40;
assign _dataflow_times_data_40 = _dataflow_times_mul_odata_reg_40;
wire _dataflow_times_mul_ovalid_40;
reg _dataflow_times_mul_valid_reg_40;
assign _dataflow_times_valid_40 = _dataflow_times_mul_valid_reg_40;
wire _dataflow_times_mul_enable_40;
wire _dataflow_times_mul_update_40;
assign _dataflow_times_mul_enable_40 = (_dataflow_times_ready_40 || !_dataflow_times_valid_40) && _dataflow_minus_ready_38 && _dataflow_minus_valid_38;
assign _dataflow_times_mul_update_40 = _dataflow_times_ready_40 || !_dataflow_times_valid_40;
multiplier_8
_dataflow_times_mul_40
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_40),
.enable(_dataflow_times_mul_enable_40),
.valid(_dataflow_times_mul_ovalid_40),
.a(_dataflow_minus_data_38),
.b(2'sd1),
.c(_dataflow_times_mul_odata_40)
);
wire signed [16-1:0] _dataflow_times_data_42;
wire _dataflow_times_valid_42;
wire _dataflow_times_ready_42;
wire signed [17-1:0] _dataflow_times_mul_odata_42;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_42;
assign _dataflow_times_data_42 = _dataflow_times_mul_odata_reg_42;
wire _dataflow_times_mul_ovalid_42;
reg _dataflow_times_mul_valid_reg_42;
assign _dataflow_times_valid_42 = _dataflow_times_mul_valid_reg_42;
wire _dataflow_times_mul_enable_42;
wire _dataflow_times_mul_update_42;
assign _dataflow_times_mul_enable_42 = (_dataflow_times_ready_42 || !_dataflow_times_valid_42) && _dataflow_minus_ready_39 && _dataflow_minus_valid_39;
assign _dataflow_times_mul_update_42 = _dataflow_times_ready_42 || !_dataflow_times_valid_42;
multiplier_9
_dataflow_times_mul_42
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_42),
.enable(_dataflow_times_mul_enable_42),
.valid(_dataflow_times_mul_ovalid_42),
.a(_dataflow_minus_data_39),
.b(1'sd0),
.c(_dataflow_times_mul_odata_42)
);
wire signed [16-1:0] _dataflow_times_data_44;
wire _dataflow_times_valid_44;
wire _dataflow_times_ready_44;
wire signed [17-1:0] _dataflow_times_mul_odata_44;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_44;
assign _dataflow_times_data_44 = _dataflow_times_mul_odata_reg_44;
wire _dataflow_times_mul_ovalid_44;
reg _dataflow_times_mul_valid_reg_44;
assign _dataflow_times_valid_44 = _dataflow_times_mul_valid_reg_44;
wire _dataflow_times_mul_enable_44;
wire _dataflow_times_mul_update_44;
assign _dataflow_times_mul_enable_44 = (_dataflow_times_ready_44 || !_dataflow_times_valid_44) && _dataflow_minus_ready_38 && _dataflow_minus_valid_38;
assign _dataflow_times_mul_update_44 = _dataflow_times_ready_44 || !_dataflow_times_valid_44;
multiplier_10
_dataflow_times_mul_44
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_44),
.enable(_dataflow_times_mul_enable_44),
.valid(_dataflow_times_mul_ovalid_44),
.a(_dataflow_minus_data_38),
.b(1'sd0),
.c(_dataflow_times_mul_odata_44)
);
assign _dataflow_minus_ready_38 = (_dataflow_times_ready_40 || !_dataflow_times_valid_40) && _dataflow_minus_valid_38 && ((_dataflow_times_ready_44 || !_dataflow_times_valid_44) && _dataflow_minus_valid_38);
wire signed [16-1:0] _dataflow_times_data_46;
wire _dataflow_times_valid_46;
wire _dataflow_times_ready_46;
wire signed [18-1:0] _dataflow_times_mul_odata_46;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_46;
assign _dataflow_times_data_46 = _dataflow_times_mul_odata_reg_46;
wire _dataflow_times_mul_ovalid_46;
reg _dataflow_times_mul_valid_reg_46;
assign _dataflow_times_valid_46 = _dataflow_times_mul_valid_reg_46;
wire _dataflow_times_mul_enable_46;
wire _dataflow_times_mul_update_46;
assign _dataflow_times_mul_enable_46 = (_dataflow_times_ready_46 || !_dataflow_times_valid_46) && _dataflow_minus_ready_39 && _dataflow_minus_valid_39;
assign _dataflow_times_mul_update_46 = _dataflow_times_ready_46 || !_dataflow_times_valid_46;
multiplier_11
_dataflow_times_mul_46
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_46),
.enable(_dataflow_times_mul_enable_46),
.valid(_dataflow_times_mul_ovalid_46),
.a(_dataflow_minus_data_39),
.b(2'sd1),
.c(_dataflow_times_mul_odata_46)
);
assign _dataflow_minus_ready_39 = (_dataflow_times_ready_42 || !_dataflow_times_valid_42) && _dataflow_minus_valid_39 && ((_dataflow_times_ready_46 || !_dataflow_times_valid_46) && _dataflow_minus_valid_39);
reg signed [16-1:0] _dataflow__delay_data_64;
reg _dataflow__delay_valid_64;
wire _dataflow__delay_ready_64;
assign _dataflow_plus_ready_36 = (_dataflow__delay_ready_64 || !_dataflow__delay_valid_64) && _dataflow_plus_valid_36;
reg signed [16-1:0] _dataflow__delay_data_80;
reg _dataflow__delay_valid_80;
wire _dataflow__delay_ready_80;
assign _dataflow_plus_ready_37 = (_dataflow__delay_ready_80 || !_dataflow__delay_valid_80) && _dataflow_plus_valid_37;
reg signed [16-1:0] _dataflow__delay_data_65;
reg _dataflow__delay_valid_65;
wire _dataflow__delay_ready_65;
assign _dataflow__delay_ready_64 = (_dataflow__delay_ready_65 || !_dataflow__delay_valid_65) && _dataflow__delay_valid_64;
reg signed [16-1:0] _dataflow__delay_data_81;
reg _dataflow__delay_valid_81;
wire _dataflow__delay_ready_81;
assign _dataflow__delay_ready_80 = (_dataflow__delay_ready_81 || !_dataflow__delay_valid_81) && _dataflow__delay_valid_80;
reg signed [16-1:0] _dataflow__delay_data_66;
reg _dataflow__delay_valid_66;
wire _dataflow__delay_ready_66;
assign _dataflow__delay_ready_65 = (_dataflow__delay_ready_66 || !_dataflow__delay_valid_66) && _dataflow__delay_valid_65;
reg signed [16-1:0] _dataflow__delay_data_82;
reg _dataflow__delay_valid_82;
wire _dataflow__delay_ready_82;
assign _dataflow__delay_ready_81 = (_dataflow__delay_ready_82 || !_dataflow__delay_valid_82) && _dataflow__delay_valid_81;
reg signed [16-1:0] _dataflow__delay_data_67;
reg _dataflow__delay_valid_67;
wire _dataflow__delay_ready_67;
assign _dataflow__delay_ready_66 = (_dataflow__delay_ready_67 || !_dataflow__delay_valid_67) && _dataflow__delay_valid_66;
reg signed [16-1:0] _dataflow__delay_data_83;
reg _dataflow__delay_valid_83;
wire _dataflow__delay_ready_83;
assign _dataflow__delay_ready_82 = (_dataflow__delay_ready_83 || !_dataflow__delay_valid_83) && _dataflow__delay_valid_82;
reg signed [16-1:0] _dataflow__delay_data_68;
reg _dataflow__delay_valid_68;
wire _dataflow__delay_ready_68;
assign _dataflow__delay_ready_67 = (_dataflow__delay_ready_68 || !_dataflow__delay_valid_68) && _dataflow__delay_valid_67;
reg signed [16-1:0] _dataflow__delay_data_84;
reg _dataflow__delay_valid_84;
wire _dataflow__delay_ready_84;
assign _dataflow__delay_ready_83 = (_dataflow__delay_ready_84 || !_dataflow__delay_valid_84) && _dataflow__delay_valid_83;
reg signed [16-1:0] _dataflow__delay_data_69;
reg _dataflow__delay_valid_69;
wire _dataflow__delay_ready_69;
assign _dataflow__delay_ready_68 = (_dataflow__delay_ready_69 || !_dataflow__delay_valid_69) && _dataflow__delay_valid_68;
reg signed [16-1:0] _dataflow__delay_data_85;
reg _dataflow__delay_valid_85;
wire _dataflow__delay_ready_85;
assign _dataflow__delay_ready_84 = (_dataflow__delay_ready_85 || !_dataflow__delay_valid_85) && _dataflow__delay_valid_84;
reg signed [16-1:0] _dataflow_minus_data_20;
reg _dataflow_minus_valid_20;
wire _dataflow_minus_ready_20;
assign _dataflow_times_ready_12 = (_dataflow_minus_ready_20 || !_dataflow_minus_valid_20) && (_dataflow_times_valid_12 && _dataflow_times_valid_14);
assign _dataflow_times_ready_14 = (_dataflow_minus_ready_20 || !_dataflow_minus_valid_20) && (_dataflow_times_valid_12 && _dataflow_times_valid_14);
reg signed [16-1:0] _dataflow_plus_data_21;
reg _dataflow_plus_valid_21;
wire _dataflow_plus_ready_21;
assign _dataflow_times_ready_16 = (_dataflow_plus_ready_21 || !_dataflow_plus_valid_21) && (_dataflow_times_valid_16 && _dataflow_times_valid_18);
assign _dataflow_times_ready_18 = (_dataflow_plus_ready_21 || !_dataflow_plus_valid_21) && (_dataflow_times_valid_16 && _dataflow_times_valid_18);
reg signed [16-1:0] _dataflow_minus_data_34;
reg _dataflow_minus_valid_34;
wire _dataflow_minus_ready_34;
assign _dataflow_times_ready_26 = (_dataflow_minus_ready_34 || !_dataflow_minus_valid_34) && (_dataflow_times_valid_26 && _dataflow_times_valid_28);
assign _dataflow_times_ready_28 = (_dataflow_minus_ready_34 || !_dataflow_minus_valid_34) && (_dataflow_times_valid_26 && _dataflow_times_valid_28);
reg signed [16-1:0] _dataflow_plus_data_35;
reg _dataflow_plus_valid_35;
wire _dataflow_plus_ready_35;
assign _dataflow_times_ready_30 = (_dataflow_plus_ready_35 || !_dataflow_plus_valid_35) && (_dataflow_times_valid_30 && _dataflow_times_valid_32);
assign _dataflow_times_ready_32 = (_dataflow_plus_ready_35 || !_dataflow_plus_valid_35) && (_dataflow_times_valid_30 && _dataflow_times_valid_32);
reg signed [16-1:0] _dataflow__delay_data_70;
reg _dataflow__delay_valid_70;
wire _dataflow__delay_ready_70;
assign _dataflow__delay_ready_69 = (_dataflow__delay_ready_70 || !_dataflow__delay_valid_70) && _dataflow__delay_valid_69;
reg signed [16-1:0] _dataflow__delay_data_86;
reg _dataflow__delay_valid_86;
wire _dataflow__delay_ready_86;
assign _dataflow__delay_ready_85 = (_dataflow__delay_ready_86 || !_dataflow__delay_valid_86) && _dataflow__delay_valid_85;
reg signed [16-1:0] _dataflow_minus_data_48;
reg _dataflow_minus_valid_48;
wire _dataflow_minus_ready_48;
assign _dataflow_times_ready_40 = (_dataflow_minus_ready_48 || !_dataflow_minus_valid_48) && (_dataflow_times_valid_40 && _dataflow_times_valid_42);
assign _dataflow_times_ready_42 = (_dataflow_minus_ready_48 || !_dataflow_minus_valid_48) && (_dataflow_times_valid_40 && _dataflow_times_valid_42);
reg signed [16-1:0] _dataflow_plus_data_49;
reg _dataflow_plus_valid_49;
wire _dataflow_plus_ready_49;
assign _dataflow_times_ready_44 = (_dataflow_plus_ready_49 || !_dataflow_plus_valid_49) && (_dataflow_times_valid_44 && _dataflow_times_valid_46);
assign _dataflow_times_ready_46 = (_dataflow_plus_ready_49 || !_dataflow_plus_valid_49) && (_dataflow_times_valid_44 && _dataflow_times_valid_46);
reg signed [16-1:0] _dataflow_plus_data_50;
reg _dataflow_plus_valid_50;
wire _dataflow_plus_ready_50;
reg signed [16-1:0] _dataflow_plus_data_51;
reg _dataflow_plus_valid_51;
wire _dataflow_plus_ready_51;
reg signed [16-1:0] _dataflow_minus_data_52;
reg _dataflow_minus_valid_52;
wire _dataflow_minus_ready_52;
assign _dataflow_minus_ready_20 = (_dataflow_plus_ready_50 || !_dataflow_plus_valid_50) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34) && ((_dataflow_minus_ready_52 || !_dataflow_minus_valid_52) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34));
assign _dataflow_minus_ready_34 = (_dataflow_plus_ready_50 || !_dataflow_plus_valid_50) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34) && ((_dataflow_minus_ready_52 || !_dataflow_minus_valid_52) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34));
reg signed [16-1:0] _dataflow_minus_data_53;
reg _dataflow_minus_valid_53;
wire _dataflow_minus_ready_53;
assign _dataflow_plus_ready_21 = (_dataflow_plus_ready_51 || !_dataflow_plus_valid_51) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35) && ((_dataflow_minus_ready_53 || !_dataflow_minus_valid_53) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35));
assign _dataflow_plus_ready_35 = (_dataflow_plus_ready_51 || !_dataflow_plus_valid_51) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35) && ((_dataflow_minus_ready_53 || !_dataflow_minus_valid_53) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35));
reg signed [16-1:0] _dataflow__delay_data_71;
reg _dataflow__delay_valid_71;
wire _dataflow__delay_ready_71;
assign _dataflow__delay_ready_70 = (_dataflow__delay_ready_71 || !_dataflow__delay_valid_71) && _dataflow__delay_valid_70;
reg signed [16-1:0] _dataflow__delay_data_87;
reg _dataflow__delay_valid_87;
wire _dataflow__delay_ready_87;
assign _dataflow__delay_ready_86 = (_dataflow__delay_ready_87 || !_dataflow__delay_valid_87) && _dataflow__delay_valid_86;
wire signed [16-1:0] _dataflow_times_data_54;
wire _dataflow_times_valid_54;
wire _dataflow_times_ready_54;
wire signed [18-1:0] _dataflow_times_mul_odata_54;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_54;
assign _dataflow_times_data_54 = _dataflow_times_mul_odata_reg_54;
wire _dataflow_times_mul_ovalid_54;
reg _dataflow_times_mul_valid_reg_54;
assign _dataflow_times_valid_54 = _dataflow_times_mul_valid_reg_54;
wire _dataflow_times_mul_enable_54;
wire _dataflow_times_mul_update_54;
assign _dataflow_times_mul_enable_54 = (_dataflow_times_ready_54 || !_dataflow_times_valid_54) && _dataflow_minus_ready_52 && _dataflow_minus_valid_52;
assign _dataflow_times_mul_update_54 = _dataflow_times_ready_54 || !_dataflow_times_valid_54;
multiplier_12
_dataflow_times_mul_54
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_54),
.enable(_dataflow_times_mul_enable_54),
.valid(_dataflow_times_mul_ovalid_54),
.a(_dataflow_minus_data_52),
.b(2'sd1),
.c(_dataflow_times_mul_odata_54)
);
wire signed [16-1:0] _dataflow_times_data_56;
wire _dataflow_times_valid_56;
wire _dataflow_times_ready_56;
wire signed [17-1:0] _dataflow_times_mul_odata_56;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_56;
assign _dataflow_times_data_56 = _dataflow_times_mul_odata_reg_56;
wire _dataflow_times_mul_ovalid_56;
reg _dataflow_times_mul_valid_reg_56;
assign _dataflow_times_valid_56 = _dataflow_times_mul_valid_reg_56;
wire _dataflow_times_mul_enable_56;
wire _dataflow_times_mul_update_56;
assign _dataflow_times_mul_enable_56 = (_dataflow_times_ready_56 || !_dataflow_times_valid_56) && _dataflow_minus_ready_53 && _dataflow_minus_valid_53;
assign _dataflow_times_mul_update_56 = _dataflow_times_ready_56 || !_dataflow_times_valid_56;
multiplier_13
_dataflow_times_mul_56
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_56),
.enable(_dataflow_times_mul_enable_56),
.valid(_dataflow_times_mul_ovalid_56),
.a(_dataflow_minus_data_53),
.b(1'sd0),
.c(_dataflow_times_mul_odata_56)
);
wire signed [16-1:0] _dataflow_times_data_58;
wire _dataflow_times_valid_58;
wire _dataflow_times_ready_58;
wire signed [17-1:0] _dataflow_times_mul_odata_58;
reg signed [17-1:0] _dataflow_times_mul_odata_reg_58;
assign _dataflow_times_data_58 = _dataflow_times_mul_odata_reg_58;
wire _dataflow_times_mul_ovalid_58;
reg _dataflow_times_mul_valid_reg_58;
assign _dataflow_times_valid_58 = _dataflow_times_mul_valid_reg_58;
wire _dataflow_times_mul_enable_58;
wire _dataflow_times_mul_update_58;
assign _dataflow_times_mul_enable_58 = (_dataflow_times_ready_58 || !_dataflow_times_valid_58) && _dataflow_minus_ready_52 && _dataflow_minus_valid_52;
assign _dataflow_times_mul_update_58 = _dataflow_times_ready_58 || !_dataflow_times_valid_58;
multiplier_14
_dataflow_times_mul_58
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_58),
.enable(_dataflow_times_mul_enable_58),
.valid(_dataflow_times_mul_ovalid_58),
.a(_dataflow_minus_data_52),
.b(1'sd0),
.c(_dataflow_times_mul_odata_58)
);
assign _dataflow_minus_ready_52 = (_dataflow_times_ready_54 || !_dataflow_times_valid_54) && _dataflow_minus_valid_52 && ((_dataflow_times_ready_58 || !_dataflow_times_valid_58) && _dataflow_minus_valid_52);
wire signed [16-1:0] _dataflow_times_data_60;
wire _dataflow_times_valid_60;
wire _dataflow_times_ready_60;
wire signed [18-1:0] _dataflow_times_mul_odata_60;
reg signed [18-1:0] _dataflow_times_mul_odata_reg_60;
assign _dataflow_times_data_60 = _dataflow_times_mul_odata_reg_60;
wire _dataflow_times_mul_ovalid_60;
reg _dataflow_times_mul_valid_reg_60;
assign _dataflow_times_valid_60 = _dataflow_times_mul_valid_reg_60;
wire _dataflow_times_mul_enable_60;
wire _dataflow_times_mul_update_60;
assign _dataflow_times_mul_enable_60 = (_dataflow_times_ready_60 || !_dataflow_times_valid_60) && _dataflow_minus_ready_53 && _dataflow_minus_valid_53;
assign _dataflow_times_mul_update_60 = _dataflow_times_ready_60 || !_dataflow_times_valid_60;
multiplier_15
_dataflow_times_mul_60
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_60),
.enable(_dataflow_times_mul_enable_60),
.valid(_dataflow_times_mul_ovalid_60),
.a(_dataflow_minus_data_53),
.b(2'sd1),
.c(_dataflow_times_mul_odata_60)
);
assign _dataflow_minus_ready_53 = (_dataflow_times_ready_56 || !_dataflow_times_valid_56) && _dataflow_minus_valid_53 && ((_dataflow_times_ready_60 || !_dataflow_times_valid_60) && _dataflow_minus_valid_53);
reg signed [16-1:0] _dataflow__delay_data_72;
reg _dataflow__delay_valid_72;
wire _dataflow__delay_ready_72;
assign _dataflow__delay_ready_71 = (_dataflow__delay_ready_72 || !_dataflow__delay_valid_72) && _dataflow__delay_valid_71;
reg signed [16-1:0] _dataflow__delay_data_88;
reg _dataflow__delay_valid_88;
wire _dataflow__delay_ready_88;
assign _dataflow__delay_ready_87 = (_dataflow__delay_ready_88 || !_dataflow__delay_valid_88) && _dataflow__delay_valid_87;
reg signed [16-1:0] _dataflow__delay_data_96;
reg _dataflow__delay_valid_96;
wire _dataflow__delay_ready_96;
assign _dataflow_minus_ready_48 = (_dataflow__delay_ready_96 || !_dataflow__delay_valid_96) && _dataflow_minus_valid_48;
reg signed [16-1:0] _dataflow__delay_data_104;
reg _dataflow__delay_valid_104;
wire _dataflow__delay_ready_104;
assign _dataflow_plus_ready_49 = (_dataflow__delay_ready_104 || !_dataflow__delay_valid_104) && _dataflow_plus_valid_49;
reg signed [16-1:0] _dataflow__delay_data_112;
reg _dataflow__delay_valid_112;
wire _dataflow__delay_ready_112;
assign _dataflow_plus_ready_50 = (_dataflow__delay_ready_112 || !_dataflow__delay_valid_112) && _dataflow_plus_valid_50;
reg signed [16-1:0] _dataflow__delay_data_120;
reg _dataflow__delay_valid_120;
wire _dataflow__delay_ready_120;
assign _dataflow_plus_ready_51 = (_dataflow__delay_ready_120 || !_dataflow__delay_valid_120) && _dataflow_plus_valid_51;
reg signed [16-1:0] _dataflow__delay_data_73;
reg _dataflow__delay_valid_73;
wire _dataflow__delay_ready_73;
assign _dataflow__delay_ready_72 = (_dataflow__delay_ready_73 || !_dataflow__delay_valid_73) && _dataflow__delay_valid_72;
reg signed [16-1:0] _dataflow__delay_data_89;
reg _dataflow__delay_valid_89;
wire _dataflow__delay_ready_89;
assign _dataflow__delay_ready_88 = (_dataflow__delay_ready_89 || !_dataflow__delay_valid_89) && _dataflow__delay_valid_88;
reg signed [16-1:0] _dataflow__delay_data_97;
reg _dataflow__delay_valid_97;
wire _dataflow__delay_ready_97;
assign _dataflow__delay_ready_96 = (_dataflow__delay_ready_97 || !_dataflow__delay_valid_97) && _dataflow__delay_valid_96;
reg signed [16-1:0] _dataflow__delay_data_105;
reg _dataflow__delay_valid_105;
wire _dataflow__delay_ready_105;
assign _dataflow__delay_ready_104 = (_dataflow__delay_ready_105 || !_dataflow__delay_valid_105) && _dataflow__delay_valid_104;
reg signed [16-1:0] _dataflow__delay_data_113;
reg _dataflow__delay_valid_113;
wire _dataflow__delay_ready_113;
assign _dataflow__delay_ready_112 = (_dataflow__delay_ready_113 || !_dataflow__delay_valid_113) && _dataflow__delay_valid_112;
reg signed [16-1:0] _dataflow__delay_data_121;
reg _dataflow__delay_valid_121;
wire _dataflow__delay_ready_121;
assign _dataflow__delay_ready_120 = (_dataflow__delay_ready_121 || !_dataflow__delay_valid_121) && _dataflow__delay_valid_120;
reg signed [16-1:0] _dataflow__delay_data_74;
reg _dataflow__delay_valid_74;
wire _dataflow__delay_ready_74;
assign _dataflow__delay_ready_73 = (_dataflow__delay_ready_74 || !_dataflow__delay_valid_74) && _dataflow__delay_valid_73;
reg signed [16-1:0] _dataflow__delay_data_90;
reg _dataflow__delay_valid_90;
wire _dataflow__delay_ready_90;
assign _dataflow__delay_ready_89 = (_dataflow__delay_ready_90 || !_dataflow__delay_valid_90) && _dataflow__delay_valid_89;
reg signed [16-1:0] _dataflow__delay_data_98;
reg _dataflow__delay_valid_98;
wire _dataflow__delay_ready_98;
assign _dataflow__delay_ready_97 = (_dataflow__delay_ready_98 || !_dataflow__delay_valid_98) && _dataflow__delay_valid_97;
reg signed [16-1:0] _dataflow__delay_data_106;
reg _dataflow__delay_valid_106;
wire _dataflow__delay_ready_106;
assign _dataflow__delay_ready_105 = (_dataflow__delay_ready_106 || !_dataflow__delay_valid_106) && _dataflow__delay_valid_105;
reg signed [16-1:0] _dataflow__delay_data_114;
reg _dataflow__delay_valid_114;
wire _dataflow__delay_ready_114;
assign _dataflow__delay_ready_113 = (_dataflow__delay_ready_114 || !_dataflow__delay_valid_114) && _dataflow__delay_valid_113;
reg signed [16-1:0] _dataflow__delay_data_122;
reg _dataflow__delay_valid_122;
wire _dataflow__delay_ready_122;
assign _dataflow__delay_ready_121 = (_dataflow__delay_ready_122 || !_dataflow__delay_valid_122) && _dataflow__delay_valid_121;
reg signed [16-1:0] _dataflow__delay_data_75;
reg _dataflow__delay_valid_75;
wire _dataflow__delay_ready_75;
assign _dataflow__delay_ready_74 = (_dataflow__delay_ready_75 || !_dataflow__delay_valid_75) && _dataflow__delay_valid_74;
reg signed [16-1:0] _dataflow__delay_data_91;
reg _dataflow__delay_valid_91;
wire _dataflow__delay_ready_91;
assign _dataflow__delay_ready_90 = (_dataflow__delay_ready_91 || !_dataflow__delay_valid_91) && _dataflow__delay_valid_90;
reg signed [16-1:0] _dataflow__delay_data_99;
reg _dataflow__delay_valid_99;
wire _dataflow__delay_ready_99;
assign _dataflow__delay_ready_98 = (_dataflow__delay_ready_99 || !_dataflow__delay_valid_99) && _dataflow__delay_valid_98;
reg signed [16-1:0] _dataflow__delay_data_107;
reg _dataflow__delay_valid_107;
wire _dataflow__delay_ready_107;
assign _dataflow__delay_ready_106 = (_dataflow__delay_ready_107 || !_dataflow__delay_valid_107) && _dataflow__delay_valid_106;
reg signed [16-1:0] _dataflow__delay_data_115;
reg _dataflow__delay_valid_115;
wire _dataflow__delay_ready_115;
assign _dataflow__delay_ready_114 = (_dataflow__delay_ready_115 || !_dataflow__delay_valid_115) && _dataflow__delay_valid_114;
reg signed [16-1:0] _dataflow__delay_data_123;
reg _dataflow__delay_valid_123;
wire _dataflow__delay_ready_123;
assign _dataflow__delay_ready_122 = (_dataflow__delay_ready_123 || !_dataflow__delay_valid_123) && _dataflow__delay_valid_122;
reg signed [16-1:0] _dataflow__delay_data_76;
reg _dataflow__delay_valid_76;
wire _dataflow__delay_ready_76;
assign _dataflow__delay_ready_75 = (_dataflow__delay_ready_76 || !_dataflow__delay_valid_76) && _dataflow__delay_valid_75;
reg signed [16-1:0] _dataflow__delay_data_92;
reg _dataflow__delay_valid_92;
wire _dataflow__delay_ready_92;
assign _dataflow__delay_ready_91 = (_dataflow__delay_ready_92 || !_dataflow__delay_valid_92) && _dataflow__delay_valid_91;
reg signed [16-1:0] _dataflow__delay_data_100;
reg _dataflow__delay_valid_100;
wire _dataflow__delay_ready_100;
assign _dataflow__delay_ready_99 = (_dataflow__delay_ready_100 || !_dataflow__delay_valid_100) && _dataflow__delay_valid_99;
reg signed [16-1:0] _dataflow__delay_data_108;
reg _dataflow__delay_valid_108;
wire _dataflow__delay_ready_108;
assign _dataflow__delay_ready_107 = (_dataflow__delay_ready_108 || !_dataflow__delay_valid_108) && _dataflow__delay_valid_107;
reg signed [16-1:0] _dataflow__delay_data_116;
reg _dataflow__delay_valid_116;
wire _dataflow__delay_ready_116;
assign _dataflow__delay_ready_115 = (_dataflow__delay_ready_116 || !_dataflow__delay_valid_116) && _dataflow__delay_valid_115;
reg signed [16-1:0] _dataflow__delay_data_124;
reg _dataflow__delay_valid_124;
wire _dataflow__delay_ready_124;
assign _dataflow__delay_ready_123 = (_dataflow__delay_ready_124 || !_dataflow__delay_valid_124) && _dataflow__delay_valid_123;
reg signed [16-1:0] _dataflow__delay_data_77;
reg _dataflow__delay_valid_77;
wire _dataflow__delay_ready_77;
assign _dataflow__delay_ready_76 = (_dataflow__delay_ready_77 || !_dataflow__delay_valid_77) && _dataflow__delay_valid_76;
reg signed [16-1:0] _dataflow__delay_data_93;
reg _dataflow__delay_valid_93;
wire _dataflow__delay_ready_93;
assign _dataflow__delay_ready_92 = (_dataflow__delay_ready_93 || !_dataflow__delay_valid_93) && _dataflow__delay_valid_92;
reg signed [16-1:0] _dataflow__delay_data_101;
reg _dataflow__delay_valid_101;
wire _dataflow__delay_ready_101;
assign _dataflow__delay_ready_100 = (_dataflow__delay_ready_101 || !_dataflow__delay_valid_101) && _dataflow__delay_valid_100;
reg signed [16-1:0] _dataflow__delay_data_109;
reg _dataflow__delay_valid_109;
wire _dataflow__delay_ready_109;
assign _dataflow__delay_ready_108 = (_dataflow__delay_ready_109 || !_dataflow__delay_valid_109) && _dataflow__delay_valid_108;
reg signed [16-1:0] _dataflow__delay_data_117;
reg _dataflow__delay_valid_117;
wire _dataflow__delay_ready_117;
assign _dataflow__delay_ready_116 = (_dataflow__delay_ready_117 || !_dataflow__delay_valid_117) && _dataflow__delay_valid_116;
reg signed [16-1:0] _dataflow__delay_data_125;
reg _dataflow__delay_valid_125;
wire _dataflow__delay_ready_125;
assign _dataflow__delay_ready_124 = (_dataflow__delay_ready_125 || !_dataflow__delay_valid_125) && _dataflow__delay_valid_124;
reg signed [16-1:0] _dataflow__delay_data_78;
reg _dataflow__delay_valid_78;
wire _dataflow__delay_ready_78;
assign _dataflow__delay_ready_77 = (_dataflow__delay_ready_78 || !_dataflow__delay_valid_78) && _dataflow__delay_valid_77;
reg signed [16-1:0] _dataflow__delay_data_94;
reg _dataflow__delay_valid_94;
wire _dataflow__delay_ready_94;
assign _dataflow__delay_ready_93 = (_dataflow__delay_ready_94 || !_dataflow__delay_valid_94) && _dataflow__delay_valid_93;
reg signed [16-1:0] _dataflow__delay_data_102;
reg _dataflow__delay_valid_102;
wire _dataflow__delay_ready_102;
assign _dataflow__delay_ready_101 = (_dataflow__delay_ready_102 || !_dataflow__delay_valid_102) && _dataflow__delay_valid_101;
reg signed [16-1:0] _dataflow__delay_data_110;
reg _dataflow__delay_valid_110;
wire _dataflow__delay_ready_110;
assign _dataflow__delay_ready_109 = (_dataflow__delay_ready_110 || !_dataflow__delay_valid_110) && _dataflow__delay_valid_109;
reg signed [16-1:0] _dataflow__delay_data_118;
reg _dataflow__delay_valid_118;
wire _dataflow__delay_ready_118;
assign _dataflow__delay_ready_117 = (_dataflow__delay_ready_118 || !_dataflow__delay_valid_118) && _dataflow__delay_valid_117;
reg signed [16-1:0] _dataflow__delay_data_126;
reg _dataflow__delay_valid_126;
wire _dataflow__delay_ready_126;
assign _dataflow__delay_ready_125 = (_dataflow__delay_ready_126 || !_dataflow__delay_valid_126) && _dataflow__delay_valid_125;
reg signed [16-1:0] _dataflow_minus_data_62;
reg _dataflow_minus_valid_62;
wire _dataflow_minus_ready_62;
assign _dataflow_times_ready_54 = (_dataflow_minus_ready_62 || !_dataflow_minus_valid_62) && (_dataflow_times_valid_54 && _dataflow_times_valid_56);
assign _dataflow_times_ready_56 = (_dataflow_minus_ready_62 || !_dataflow_minus_valid_62) && (_dataflow_times_valid_54 && _dataflow_times_valid_56);
reg signed [16-1:0] _dataflow_plus_data_63;
reg _dataflow_plus_valid_63;
wire _dataflow_plus_ready_63;
assign _dataflow_times_ready_58 = (_dataflow_plus_ready_63 || !_dataflow_plus_valid_63) && (_dataflow_times_valid_58 && _dataflow_times_valid_60);
assign _dataflow_times_ready_60 = (_dataflow_plus_ready_63 || !_dataflow_plus_valid_63) && (_dataflow_times_valid_58 && _dataflow_times_valid_60);
reg signed [16-1:0] _dataflow__delay_data_79;
reg _dataflow__delay_valid_79;
wire _dataflow__delay_ready_79;
assign _dataflow__delay_ready_78 = (_dataflow__delay_ready_79 || !_dataflow__delay_valid_79) && _dataflow__delay_valid_78;
reg signed [16-1:0] _dataflow__delay_data_95;
reg _dataflow__delay_valid_95;
wire _dataflow__delay_ready_95;
assign _dataflow__delay_ready_94 = (_dataflow__delay_ready_95 || !_dataflow__delay_valid_95) && _dataflow__delay_valid_94;
reg signed [16-1:0] _dataflow__delay_data_103;
reg _dataflow__delay_valid_103;
wire _dataflow__delay_ready_103;
assign _dataflow__delay_ready_102 = (_dataflow__delay_ready_103 || !_dataflow__delay_valid_103) && _dataflow__delay_valid_102;
reg signed [16-1:0] _dataflow__delay_data_111;
reg _dataflow__delay_valid_111;
wire _dataflow__delay_ready_111;
assign _dataflow__delay_ready_110 = (_dataflow__delay_ready_111 || !_dataflow__delay_valid_111) && _dataflow__delay_valid_110;
reg signed [16-1:0] _dataflow__delay_data_119;
reg _dataflow__delay_valid_119;
wire _dataflow__delay_ready_119;
assign _dataflow__delay_ready_118 = (_dataflow__delay_ready_119 || !_dataflow__delay_valid_119) && _dataflow__delay_valid_118;
reg signed [16-1:0] _dataflow__delay_data_127;
reg _dataflow__delay_valid_127;
wire _dataflow__delay_ready_127;
assign _dataflow__delay_ready_126 = (_dataflow__delay_ready_127 || !_dataflow__delay_valid_127) && _dataflow__delay_valid_126;
assign dout3re = _dataflow_minus_data_62;
assign _dataflow_minus_ready_62 = 1;
assign dout3im = _dataflow_plus_data_63;
assign _dataflow_plus_ready_63 = 1;
assign dout0re = _dataflow__delay_data_79;
assign _dataflow__delay_ready_79 = 1;
assign dout0im = _dataflow__delay_data_95;
assign _dataflow__delay_ready_95 = 1;
assign dout2re = _dataflow__delay_data_103;
assign _dataflow__delay_ready_103 = 1;
assign dout2im = _dataflow__delay_data_111;
assign _dataflow__delay_ready_111 = 1;
assign dout1re = _dataflow__delay_data_119;
assign _dataflow__delay_ready_119 = 1;
assign dout1im = _dataflow__delay_data_127;
assign _dataflow__delay_ready_127 = 1;
always @(posedge CLK) begin
if(RST) begin
_dataflow_plus_data_8 <= 0;
_dataflow_plus_valid_8 <= 0;
_dataflow_plus_data_9 <= 0;
_dataflow_plus_valid_9 <= 0;
_dataflow_minus_data_10 <= 0;
_dataflow_minus_valid_10 <= 0;
_dataflow_minus_data_11 <= 0;
_dataflow_minus_valid_11 <= 0;
_dataflow_plus_data_22 <= 0;
_dataflow_plus_valid_22 <= 0;
_dataflow_plus_data_23 <= 0;
_dataflow_plus_valid_23 <= 0;
_dataflow_minus_data_24 <= 0;
_dataflow_minus_valid_24 <= 0;
_dataflow_minus_data_25 <= 0;
_dataflow_minus_valid_25 <= 0;
_dataflow_times_mul_odata_reg_12 <= 0;
_dataflow_times_mul_valid_reg_12 <= 0;
_dataflow_times_mul_odata_reg_14 <= 0;
_dataflow_times_mul_valid_reg_14 <= 0;
_dataflow_times_mul_odata_reg_16 <= 0;
_dataflow_times_mul_valid_reg_16 <= 0;
_dataflow_times_mul_odata_reg_18 <= 0;
_dataflow_times_mul_valid_reg_18 <= 0;
_dataflow_times_mul_odata_reg_26 <= 0;
_dataflow_times_mul_valid_reg_26 <= 0;
_dataflow_times_mul_odata_reg_28 <= 0;
_dataflow_times_mul_valid_reg_28 <= 0;
_dataflow_times_mul_odata_reg_30 <= 0;
_dataflow_times_mul_valid_reg_30 <= 0;
_dataflow_times_mul_odata_reg_32 <= 0;
_dataflow_times_mul_valid_reg_32 <= 0;
_dataflow_plus_data_36 <= 0;
_dataflow_plus_valid_36 <= 0;
_dataflow_plus_data_37 <= 0;
_dataflow_plus_valid_37 <= 0;
_dataflow_minus_data_38 <= 0;
_dataflow_minus_valid_38 <= 0;
_dataflow_minus_data_39 <= 0;
_dataflow_minus_valid_39 <= 0;
_dataflow_times_mul_odata_reg_40 <= 0;
_dataflow_times_mul_valid_reg_40 <= 0;
_dataflow_times_mul_odata_reg_42 <= 0;
_dataflow_times_mul_valid_reg_42 <= 0;
_dataflow_times_mul_odata_reg_44 <= 0;
_dataflow_times_mul_valid_reg_44 <= 0;
_dataflow_times_mul_odata_reg_46 <= 0;
_dataflow_times_mul_valid_reg_46 <= 0;
_dataflow__delay_data_64 <= 0;
_dataflow__delay_valid_64 <= 0;
_dataflow__delay_data_80 <= 0;
_dataflow__delay_valid_80 <= 0;
_dataflow__delay_data_65 <= 0;
_dataflow__delay_valid_65 <= 0;
_dataflow__delay_data_81 <= 0;
_dataflow__delay_valid_81 <= 0;
_dataflow__delay_data_66 <= 0;
_dataflow__delay_valid_66 <= 0;
_dataflow__delay_data_82 <= 0;
_dataflow__delay_valid_82 <= 0;
_dataflow__delay_data_67 <= 0;
_dataflow__delay_valid_67 <= 0;
_dataflow__delay_data_83 <= 0;
_dataflow__delay_valid_83 <= 0;
_dataflow__delay_data_68 <= 0;
_dataflow__delay_valid_68 <= 0;
_dataflow__delay_data_84 <= 0;
_dataflow__delay_valid_84 <= 0;
_dataflow__delay_data_69 <= 0;
_dataflow__delay_valid_69 <= 0;
_dataflow__delay_data_85 <= 0;
_dataflow__delay_valid_85 <= 0;
_dataflow_minus_data_20 <= 0;
_dataflow_minus_valid_20 <= 0;
_dataflow_plus_data_21 <= 0;
_dataflow_plus_valid_21 <= 0;
_dataflow_minus_data_34 <= 0;
_dataflow_minus_valid_34 <= 0;
_dataflow_plus_data_35 <= 0;
_dataflow_plus_valid_35 <= 0;
_dataflow__delay_data_70 <= 0;
_dataflow__delay_valid_70 <= 0;
_dataflow__delay_data_86 <= 0;
_dataflow__delay_valid_86 <= 0;
_dataflow_minus_data_48 <= 0;
_dataflow_minus_valid_48 <= 0;
_dataflow_plus_data_49 <= 0;
_dataflow_plus_valid_49 <= 0;
_dataflow_plus_data_50 <= 0;
_dataflow_plus_valid_50 <= 0;
_dataflow_plus_data_51 <= 0;
_dataflow_plus_valid_51 <= 0;
_dataflow_minus_data_52 <= 0;
_dataflow_minus_valid_52 <= 0;
_dataflow_minus_data_53 <= 0;
_dataflow_minus_valid_53 <= 0;
_dataflow__delay_data_71 <= 0;
_dataflow__delay_valid_71 <= 0;
_dataflow__delay_data_87 <= 0;
_dataflow__delay_valid_87 <= 0;
_dataflow_times_mul_odata_reg_54 <= 0;
_dataflow_times_mul_valid_reg_54 <= 0;
_dataflow_times_mul_odata_reg_56 <= 0;
_dataflow_times_mul_valid_reg_56 <= 0;
_dataflow_times_mul_odata_reg_58 <= 0;
_dataflow_times_mul_valid_reg_58 <= 0;
_dataflow_times_mul_odata_reg_60 <= 0;
_dataflow_times_mul_valid_reg_60 <= 0;
_dataflow__delay_data_72 <= 0;
_dataflow__delay_valid_72 <= 0;
_dataflow__delay_data_88 <= 0;
_dataflow__delay_valid_88 <= 0;
_dataflow__delay_data_96 <= 0;
_dataflow__delay_valid_96 <= 0;
_dataflow__delay_data_104 <= 0;
_dataflow__delay_valid_104 <= 0;
_dataflow__delay_data_112 <= 0;
_dataflow__delay_valid_112 <= 0;
_dataflow__delay_data_120 <= 0;
_dataflow__delay_valid_120 <= 0;
_dataflow__delay_data_73 <= 0;
_dataflow__delay_valid_73 <= 0;
_dataflow__delay_data_89 <= 0;
_dataflow__delay_valid_89 <= 0;
_dataflow__delay_data_97 <= 0;
_dataflow__delay_valid_97 <= 0;
_dataflow__delay_data_105 <= 0;
_dataflow__delay_valid_105 <= 0;
_dataflow__delay_data_113 <= 0;
_dataflow__delay_valid_113 <= 0;
_dataflow__delay_data_121 <= 0;
_dataflow__delay_valid_121 <= 0;
_dataflow__delay_data_74 <= 0;
_dataflow__delay_valid_74 <= 0;
_dataflow__delay_data_90 <= 0;
_dataflow__delay_valid_90 <= 0;
_dataflow__delay_data_98 <= 0;
_dataflow__delay_valid_98 <= 0;
_dataflow__delay_data_106 <= 0;
_dataflow__delay_valid_106 <= 0;
_dataflow__delay_data_114 <= 0;
_dataflow__delay_valid_114 <= 0;
_dataflow__delay_data_122 <= 0;
_dataflow__delay_valid_122 <= 0;
_dataflow__delay_data_75 <= 0;
_dataflow__delay_valid_75 <= 0;
_dataflow__delay_data_91 <= 0;
_dataflow__delay_valid_91 <= 0;
_dataflow__delay_data_99 <= 0;
_dataflow__delay_valid_99 <= 0;
_dataflow__delay_data_107 <= 0;
_dataflow__delay_valid_107 <= 0;
_dataflow__delay_data_115 <= 0;
_dataflow__delay_valid_115 <= 0;
_dataflow__delay_data_123 <= 0;
_dataflow__delay_valid_123 <= 0;
_dataflow__delay_data_76 <= 0;
_dataflow__delay_valid_76 <= 0;
_dataflow__delay_data_92 <= 0;
_dataflow__delay_valid_92 <= 0;
_dataflow__delay_data_100 <= 0;
_dataflow__delay_valid_100 <= 0;
_dataflow__delay_data_108 <= 0;
_dataflow__delay_valid_108 <= 0;
_dataflow__delay_data_116 <= 0;
_dataflow__delay_valid_116 <= 0;
_dataflow__delay_data_124 <= 0;
_dataflow__delay_valid_124 <= 0;
_dataflow__delay_data_77 <= 0;
_dataflow__delay_valid_77 <= 0;
_dataflow__delay_data_93 <= 0;
_dataflow__delay_valid_93 <= 0;
_dataflow__delay_data_101 <= 0;
_dataflow__delay_valid_101 <= 0;
_dataflow__delay_data_109 <= 0;
_dataflow__delay_valid_109 <= 0;
_dataflow__delay_data_117 <= 0;
_dataflow__delay_valid_117 <= 0;
_dataflow__delay_data_125 <= 0;
_dataflow__delay_valid_125 <= 0;
_dataflow__delay_data_78 <= 0;
_dataflow__delay_valid_78 <= 0;
_dataflow__delay_data_94 <= 0;
_dataflow__delay_valid_94 <= 0;
_dataflow__delay_data_102 <= 0;
_dataflow__delay_valid_102 <= 0;
_dataflow__delay_data_110 <= 0;
_dataflow__delay_valid_110 <= 0;
_dataflow__delay_data_118 <= 0;
_dataflow__delay_valid_118 <= 0;
_dataflow__delay_data_126 <= 0;
_dataflow__delay_valid_126 <= 0;
_dataflow_minus_data_62 <= 0;
_dataflow_minus_valid_62 <= 0;
_dataflow_plus_data_63 <= 0;
_dataflow_plus_valid_63 <= 0;
_dataflow__delay_data_79 <= 0;
_dataflow__delay_valid_79 <= 0;
_dataflow__delay_data_95 <= 0;
_dataflow__delay_valid_95 <= 0;
_dataflow__delay_data_103 <= 0;
_dataflow__delay_valid_103 <= 0;
_dataflow__delay_data_111 <= 0;
_dataflow__delay_valid_111 <= 0;
_dataflow__delay_data_119 <= 0;
_dataflow__delay_valid_119 <= 0;
_dataflow__delay_data_127 <= 0;
_dataflow__delay_valid_127 <= 0;
end else begin
if((_dataflow_plus_ready_8 || !_dataflow_plus_valid_8) && 1 && 1) begin
_dataflow_plus_data_8 <= din0re + din2re;
end
if(_dataflow_plus_valid_8 && _dataflow_plus_ready_8) begin
_dataflow_plus_valid_8 <= 0;
end
if((_dataflow_plus_ready_8 || !_dataflow_plus_valid_8) && 1) begin
_dataflow_plus_valid_8 <= 1;
end
if((_dataflow_plus_ready_9 || !_dataflow_plus_valid_9) && 1 && 1) begin
_dataflow_plus_data_9 <= din0im + din2im;
end
if(_dataflow_plus_valid_9 && _dataflow_plus_ready_9) begin
_dataflow_plus_valid_9 <= 0;
end
if((_dataflow_plus_ready_9 || !_dataflow_plus_valid_9) && 1) begin
_dataflow_plus_valid_9 <= 1;
end
if((_dataflow_minus_ready_10 || !_dataflow_minus_valid_10) && 1 && 1) begin
_dataflow_minus_data_10 <= din0re - din2re;
end
if(_dataflow_minus_valid_10 && _dataflow_minus_ready_10) begin
_dataflow_minus_valid_10 <= 0;
end
if((_dataflow_minus_ready_10 || !_dataflow_minus_valid_10) && 1) begin
_dataflow_minus_valid_10 <= 1;
end
if((_dataflow_minus_ready_11 || !_dataflow_minus_valid_11) && 1 && 1) begin
_dataflow_minus_data_11 <= din0im - din2im;
end
if(_dataflow_minus_valid_11 && _dataflow_minus_ready_11) begin
_dataflow_minus_valid_11 <= 0;
end
if((_dataflow_minus_ready_11 || !_dataflow_minus_valid_11) && 1) begin
_dataflow_minus_valid_11 <= 1;
end
if((_dataflow_plus_ready_22 || !_dataflow_plus_valid_22) && 1 && 1) begin
_dataflow_plus_data_22 <= din1re + din3re;
end
if(_dataflow_plus_valid_22 && _dataflow_plus_ready_22) begin
_dataflow_plus_valid_22 <= 0;
end
if((_dataflow_plus_ready_22 || !_dataflow_plus_valid_22) && 1) begin
_dataflow_plus_valid_22 <= 1;
end
if((_dataflow_plus_ready_23 || !_dataflow_plus_valid_23) && 1 && 1) begin
_dataflow_plus_data_23 <= din1im + din3im;
end
if(_dataflow_plus_valid_23 && _dataflow_plus_ready_23) begin
_dataflow_plus_valid_23 <= 0;
end
if((_dataflow_plus_ready_23 || !_dataflow_plus_valid_23) && 1) begin
_dataflow_plus_valid_23 <= 1;
end
if((_dataflow_minus_ready_24 || !_dataflow_minus_valid_24) && 1 && 1) begin
_dataflow_minus_data_24 <= din1re - din3re;
end
if(_dataflow_minus_valid_24 && _dataflow_minus_ready_24) begin
_dataflow_minus_valid_24 <= 0;
end
if((_dataflow_minus_ready_24 || !_dataflow_minus_valid_24) && 1) begin
_dataflow_minus_valid_24 <= 1;
end
if((_dataflow_minus_ready_25 || !_dataflow_minus_valid_25) && 1 && 1) begin
_dataflow_minus_data_25 <= din1im - din3im;
end
if(_dataflow_minus_valid_25 && _dataflow_minus_ready_25) begin
_dataflow_minus_valid_25 <= 0;
end
if((_dataflow_minus_ready_25 || !_dataflow_minus_valid_25) && 1) begin
_dataflow_minus_valid_25 <= 1;
end
if(_dataflow_times_ready_12 || !_dataflow_times_valid_12) begin
_dataflow_times_mul_odata_reg_12 <= _dataflow_times_mul_odata_12;
end
if(_dataflow_times_ready_12 || !_dataflow_times_valid_12) begin
_dataflow_times_mul_valid_reg_12 <= _dataflow_times_mul_ovalid_12;
end
if(_dataflow_times_ready_14 || !_dataflow_times_valid_14) begin
_dataflow_times_mul_odata_reg_14 <= _dataflow_times_mul_odata_14;
end
if(_dataflow_times_ready_14 || !_dataflow_times_valid_14) begin
_dataflow_times_mul_valid_reg_14 <= _dataflow_times_mul_ovalid_14;
end
if(_dataflow_times_ready_16 || !_dataflow_times_valid_16) begin
_dataflow_times_mul_odata_reg_16 <= _dataflow_times_mul_odata_16;
end
if(_dataflow_times_ready_16 || !_dataflow_times_valid_16) begin
_dataflow_times_mul_valid_reg_16 <= _dataflow_times_mul_ovalid_16;
end
if(_dataflow_times_ready_18 || !_dataflow_times_valid_18) begin
_dataflow_times_mul_odata_reg_18 <= _dataflow_times_mul_odata_18;
end
if(_dataflow_times_ready_18 || !_dataflow_times_valid_18) begin
_dataflow_times_mul_valid_reg_18 <= _dataflow_times_mul_ovalid_18;
end
if(_dataflow_times_ready_26 || !_dataflow_times_valid_26) begin
_dataflow_times_mul_odata_reg_26 <= _dataflow_times_mul_odata_26;
end
if(_dataflow_times_ready_26 || !_dataflow_times_valid_26) begin
_dataflow_times_mul_valid_reg_26 <= _dataflow_times_mul_ovalid_26;
end
if(_dataflow_times_ready_28 || !_dataflow_times_valid_28) begin
_dataflow_times_mul_odata_reg_28 <= _dataflow_times_mul_odata_28;
end
if(_dataflow_times_ready_28 || !_dataflow_times_valid_28) begin
_dataflow_times_mul_valid_reg_28 <= _dataflow_times_mul_ovalid_28;
end
if(_dataflow_times_ready_30 || !_dataflow_times_valid_30) begin
_dataflow_times_mul_odata_reg_30 <= _dataflow_times_mul_odata_30;
end
if(_dataflow_times_ready_30 || !_dataflow_times_valid_30) begin
_dataflow_times_mul_valid_reg_30 <= _dataflow_times_mul_ovalid_30;
end
if(_dataflow_times_ready_32 || !_dataflow_times_valid_32) begin
_dataflow_times_mul_odata_reg_32 <= _dataflow_times_mul_odata_32;
end
if(_dataflow_times_ready_32 || !_dataflow_times_valid_32) begin
_dataflow_times_mul_valid_reg_32 <= _dataflow_times_mul_ovalid_32;
end
if((_dataflow_plus_ready_36 || !_dataflow_plus_valid_36) && (_dataflow_plus_ready_8 && _dataflow_plus_ready_22) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22)) begin
_dataflow_plus_data_36 <= _dataflow_plus_data_8 + _dataflow_plus_data_22;
end
if(_dataflow_plus_valid_36 && _dataflow_plus_ready_36) begin
_dataflow_plus_valid_36 <= 0;
end
if((_dataflow_plus_ready_36 || !_dataflow_plus_valid_36) && (_dataflow_plus_ready_8 && _dataflow_plus_ready_22)) begin
_dataflow_plus_valid_36 <= _dataflow_plus_valid_8 && _dataflow_plus_valid_22;
end
if((_dataflow_plus_ready_37 || !_dataflow_plus_valid_37) && (_dataflow_plus_ready_9 && _dataflow_plus_ready_23) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23)) begin
_dataflow_plus_data_37 <= _dataflow_plus_data_9 + _dataflow_plus_data_23;
end
if(_dataflow_plus_valid_37 && _dataflow_plus_ready_37) begin
_dataflow_plus_valid_37 <= 0;
end
if((_dataflow_plus_ready_37 || !_dataflow_plus_valid_37) && (_dataflow_plus_ready_9 && _dataflow_plus_ready_23)) begin
_dataflow_plus_valid_37 <= _dataflow_plus_valid_9 && _dataflow_plus_valid_23;
end
if((_dataflow_minus_ready_38 || !_dataflow_minus_valid_38) && (_dataflow_plus_ready_8 && _dataflow_plus_ready_22) && (_dataflow_plus_valid_8 && _dataflow_plus_valid_22)) begin
_dataflow_minus_data_38 <= _dataflow_plus_data_8 - _dataflow_plus_data_22;
end
if(_dataflow_minus_valid_38 && _dataflow_minus_ready_38) begin
_dataflow_minus_valid_38 <= 0;
end
if((_dataflow_minus_ready_38 || !_dataflow_minus_valid_38) && (_dataflow_plus_ready_8 && _dataflow_plus_ready_22)) begin
_dataflow_minus_valid_38 <= _dataflow_plus_valid_8 && _dataflow_plus_valid_22;
end
if((_dataflow_minus_ready_39 || !_dataflow_minus_valid_39) && (_dataflow_plus_ready_9 && _dataflow_plus_ready_23) && (_dataflow_plus_valid_9 && _dataflow_plus_valid_23)) begin
_dataflow_minus_data_39 <= _dataflow_plus_data_9 - _dataflow_plus_data_23;
end
if(_dataflow_minus_valid_39 && _dataflow_minus_ready_39) begin
_dataflow_minus_valid_39 <= 0;
end
if((_dataflow_minus_ready_39 || !_dataflow_minus_valid_39) && (_dataflow_plus_ready_9 && _dataflow_plus_ready_23)) begin
_dataflow_minus_valid_39 <= _dataflow_plus_valid_9 && _dataflow_plus_valid_23;
end
if(_dataflow_times_ready_40 || !_dataflow_times_valid_40) begin
_dataflow_times_mul_odata_reg_40 <= _dataflow_times_mul_odata_40;
end
if(_dataflow_times_ready_40 || !_dataflow_times_valid_40) begin
_dataflow_times_mul_valid_reg_40 <= _dataflow_times_mul_ovalid_40;
end
if(_dataflow_times_ready_42 || !_dataflow_times_valid_42) begin
_dataflow_times_mul_odata_reg_42 <= _dataflow_times_mul_odata_42;
end
if(_dataflow_times_ready_42 || !_dataflow_times_valid_42) begin
_dataflow_times_mul_valid_reg_42 <= _dataflow_times_mul_ovalid_42;
end
if(_dataflow_times_ready_44 || !_dataflow_times_valid_44) begin
_dataflow_times_mul_odata_reg_44 <= _dataflow_times_mul_odata_44;
end
if(_dataflow_times_ready_44 || !_dataflow_times_valid_44) begin
_dataflow_times_mul_valid_reg_44 <= _dataflow_times_mul_ovalid_44;
end
if(_dataflow_times_ready_46 || !_dataflow_times_valid_46) begin
_dataflow_times_mul_odata_reg_46 <= _dataflow_times_mul_odata_46;
end
if(_dataflow_times_ready_46 || !_dataflow_times_valid_46) begin
_dataflow_times_mul_valid_reg_46 <= _dataflow_times_mul_ovalid_46;
end
if((_dataflow__delay_ready_64 || !_dataflow__delay_valid_64) && _dataflow_plus_ready_36 && _dataflow_plus_valid_36) begin
_dataflow__delay_data_64 <= _dataflow_plus_data_36;
end
if(_dataflow__delay_valid_64 && _dataflow__delay_ready_64) begin
_dataflow__delay_valid_64 <= 0;
end
if((_dataflow__delay_ready_64 || !_dataflow__delay_valid_64) && _dataflow_plus_ready_36) begin
_dataflow__delay_valid_64 <= _dataflow_plus_valid_36;
end
if((_dataflow__delay_ready_80 || !_dataflow__delay_valid_80) && _dataflow_plus_ready_37 && _dataflow_plus_valid_37) begin
_dataflow__delay_data_80 <= _dataflow_plus_data_37;
end
if(_dataflow__delay_valid_80 && _dataflow__delay_ready_80) begin
_dataflow__delay_valid_80 <= 0;
end
if((_dataflow__delay_ready_80 || !_dataflow__delay_valid_80) && _dataflow_plus_ready_37) begin
_dataflow__delay_valid_80 <= _dataflow_plus_valid_37;
end
if((_dataflow__delay_ready_65 || !_dataflow__delay_valid_65) && _dataflow__delay_ready_64 && _dataflow__delay_valid_64) begin
_dataflow__delay_data_65 <= _dataflow__delay_data_64;
end
if(_dataflow__delay_valid_65 && _dataflow__delay_ready_65) begin
_dataflow__delay_valid_65 <= 0;
end
if((_dataflow__delay_ready_65 || !_dataflow__delay_valid_65) && _dataflow__delay_ready_64) begin
_dataflow__delay_valid_65 <= _dataflow__delay_valid_64;
end
if((_dataflow__delay_ready_81 || !_dataflow__delay_valid_81) && _dataflow__delay_ready_80 && _dataflow__delay_valid_80) begin
_dataflow__delay_data_81 <= _dataflow__delay_data_80;
end
if(_dataflow__delay_valid_81 && _dataflow__delay_ready_81) begin
_dataflow__delay_valid_81 <= 0;
end
if((_dataflow__delay_ready_81 || !_dataflow__delay_valid_81) && _dataflow__delay_ready_80) begin
_dataflow__delay_valid_81 <= _dataflow__delay_valid_80;
end
if((_dataflow__delay_ready_66 || !_dataflow__delay_valid_66) && _dataflow__delay_ready_65 && _dataflow__delay_valid_65) begin
_dataflow__delay_data_66 <= _dataflow__delay_data_65;
end
if(_dataflow__delay_valid_66 && _dataflow__delay_ready_66) begin
_dataflow__delay_valid_66 <= 0;
end
if((_dataflow__delay_ready_66 || !_dataflow__delay_valid_66) && _dataflow__delay_ready_65) begin
_dataflow__delay_valid_66 <= _dataflow__delay_valid_65;
end
if((_dataflow__delay_ready_82 || !_dataflow__delay_valid_82) && _dataflow__delay_ready_81 && _dataflow__delay_valid_81) begin
_dataflow__delay_data_82 <= _dataflow__delay_data_81;
end
if(_dataflow__delay_valid_82 && _dataflow__delay_ready_82) begin
_dataflow__delay_valid_82 <= 0;
end
if((_dataflow__delay_ready_82 || !_dataflow__delay_valid_82) && _dataflow__delay_ready_81) begin
_dataflow__delay_valid_82 <= _dataflow__delay_valid_81;
end
if((_dataflow__delay_ready_67 || !_dataflow__delay_valid_67) && _dataflow__delay_ready_66 && _dataflow__delay_valid_66) begin
_dataflow__delay_data_67 <= _dataflow__delay_data_66;
end
if(_dataflow__delay_valid_67 && _dataflow__delay_ready_67) begin
_dataflow__delay_valid_67 <= 0;
end
if((_dataflow__delay_ready_67 || !_dataflow__delay_valid_67) && _dataflow__delay_ready_66) begin
_dataflow__delay_valid_67 <= _dataflow__delay_valid_66;
end
if((_dataflow__delay_ready_83 || !_dataflow__delay_valid_83) && _dataflow__delay_ready_82 && _dataflow__delay_valid_82) begin
_dataflow__delay_data_83 <= _dataflow__delay_data_82;
end
if(_dataflow__delay_valid_83 && _dataflow__delay_ready_83) begin
_dataflow__delay_valid_83 <= 0;
end
if((_dataflow__delay_ready_83 || !_dataflow__delay_valid_83) && _dataflow__delay_ready_82) begin
_dataflow__delay_valid_83 <= _dataflow__delay_valid_82;
end
if((_dataflow__delay_ready_68 || !_dataflow__delay_valid_68) && _dataflow__delay_ready_67 && _dataflow__delay_valid_67) begin
_dataflow__delay_data_68 <= _dataflow__delay_data_67;
end
if(_dataflow__delay_valid_68 && _dataflow__delay_ready_68) begin
_dataflow__delay_valid_68 <= 0;
end
if((_dataflow__delay_ready_68 || !_dataflow__delay_valid_68) && _dataflow__delay_ready_67) begin
_dataflow__delay_valid_68 <= _dataflow__delay_valid_67;
end
if((_dataflow__delay_ready_84 || !_dataflow__delay_valid_84) && _dataflow__delay_ready_83 && _dataflow__delay_valid_83) begin
_dataflow__delay_data_84 <= _dataflow__delay_data_83;
end
if(_dataflow__delay_valid_84 && _dataflow__delay_ready_84) begin
_dataflow__delay_valid_84 <= 0;
end
if((_dataflow__delay_ready_84 || !_dataflow__delay_valid_84) && _dataflow__delay_ready_83) begin
_dataflow__delay_valid_84 <= _dataflow__delay_valid_83;
end
if((_dataflow__delay_ready_69 || !_dataflow__delay_valid_69) && _dataflow__delay_ready_68 && _dataflow__delay_valid_68) begin
_dataflow__delay_data_69 <= _dataflow__delay_data_68;
end
if(_dataflow__delay_valid_69 && _dataflow__delay_ready_69) begin
_dataflow__delay_valid_69 <= 0;
end
if((_dataflow__delay_ready_69 || !_dataflow__delay_valid_69) && _dataflow__delay_ready_68) begin
_dataflow__delay_valid_69 <= _dataflow__delay_valid_68;
end
if((_dataflow__delay_ready_85 || !_dataflow__delay_valid_85) && _dataflow__delay_ready_84 && _dataflow__delay_valid_84) begin
_dataflow__delay_data_85 <= _dataflow__delay_data_84;
end
if(_dataflow__delay_valid_85 && _dataflow__delay_ready_85) begin
_dataflow__delay_valid_85 <= 0;
end
if((_dataflow__delay_ready_85 || !_dataflow__delay_valid_85) && _dataflow__delay_ready_84) begin
_dataflow__delay_valid_85 <= _dataflow__delay_valid_84;
end
if((_dataflow_minus_ready_20 || !_dataflow_minus_valid_20) && (_dataflow_times_ready_12 && _dataflow_times_ready_14) && (_dataflow_times_valid_12 && _dataflow_times_valid_14)) begin
_dataflow_minus_data_20 <= _dataflow_times_data_12 - _dataflow_times_data_14;
end
if(_dataflow_minus_valid_20 && _dataflow_minus_ready_20) begin
_dataflow_minus_valid_20 <= 0;
end
if((_dataflow_minus_ready_20 || !_dataflow_minus_valid_20) && (_dataflow_times_ready_12 && _dataflow_times_ready_14)) begin
_dataflow_minus_valid_20 <= _dataflow_times_valid_12 && _dataflow_times_valid_14;
end
if((_dataflow_plus_ready_21 || !_dataflow_plus_valid_21) && (_dataflow_times_ready_16 && _dataflow_times_ready_18) && (_dataflow_times_valid_16 && _dataflow_times_valid_18)) begin
_dataflow_plus_data_21 <= _dataflow_times_data_16 + _dataflow_times_data_18;
end
if(_dataflow_plus_valid_21 && _dataflow_plus_ready_21) begin
_dataflow_plus_valid_21 <= 0;
end
if((_dataflow_plus_ready_21 || !_dataflow_plus_valid_21) && (_dataflow_times_ready_16 && _dataflow_times_ready_18)) begin
_dataflow_plus_valid_21 <= _dataflow_times_valid_16 && _dataflow_times_valid_18;
end
if((_dataflow_minus_ready_34 || !_dataflow_minus_valid_34) && (_dataflow_times_ready_26 && _dataflow_times_ready_28) && (_dataflow_times_valid_26 && _dataflow_times_valid_28)) begin
_dataflow_minus_data_34 <= _dataflow_times_data_26 - _dataflow_times_data_28;
end
if(_dataflow_minus_valid_34 && _dataflow_minus_ready_34) begin
_dataflow_minus_valid_34 <= 0;
end
if((_dataflow_minus_ready_34 || !_dataflow_minus_valid_34) && (_dataflow_times_ready_26 && _dataflow_times_ready_28)) begin
_dataflow_minus_valid_34 <= _dataflow_times_valid_26 && _dataflow_times_valid_28;
end
if((_dataflow_plus_ready_35 || !_dataflow_plus_valid_35) && (_dataflow_times_ready_30 && _dataflow_times_ready_32) && (_dataflow_times_valid_30 && _dataflow_times_valid_32)) begin
_dataflow_plus_data_35 <= _dataflow_times_data_30 + _dataflow_times_data_32;
end
if(_dataflow_plus_valid_35 && _dataflow_plus_ready_35) begin
_dataflow_plus_valid_35 <= 0;
end
if((_dataflow_plus_ready_35 || !_dataflow_plus_valid_35) && (_dataflow_times_ready_30 && _dataflow_times_ready_32)) begin
_dataflow_plus_valid_35 <= _dataflow_times_valid_30 && _dataflow_times_valid_32;
end
if((_dataflow__delay_ready_70 || !_dataflow__delay_valid_70) && _dataflow__delay_ready_69 && _dataflow__delay_valid_69) begin
_dataflow__delay_data_70 <= _dataflow__delay_data_69;
end
if(_dataflow__delay_valid_70 && _dataflow__delay_ready_70) begin
_dataflow__delay_valid_70 <= 0;
end
if((_dataflow__delay_ready_70 || !_dataflow__delay_valid_70) && _dataflow__delay_ready_69) begin
_dataflow__delay_valid_70 <= _dataflow__delay_valid_69;
end
if((_dataflow__delay_ready_86 || !_dataflow__delay_valid_86) && _dataflow__delay_ready_85 && _dataflow__delay_valid_85) begin
_dataflow__delay_data_86 <= _dataflow__delay_data_85;
end
if(_dataflow__delay_valid_86 && _dataflow__delay_ready_86) begin
_dataflow__delay_valid_86 <= 0;
end
if((_dataflow__delay_ready_86 || !_dataflow__delay_valid_86) && _dataflow__delay_ready_85) begin
_dataflow__delay_valid_86 <= _dataflow__delay_valid_85;
end
if((_dataflow_minus_ready_48 || !_dataflow_minus_valid_48) && (_dataflow_times_ready_40 && _dataflow_times_ready_42) && (_dataflow_times_valid_40 && _dataflow_times_valid_42)) begin
_dataflow_minus_data_48 <= _dataflow_times_data_40 - _dataflow_times_data_42;
end
if(_dataflow_minus_valid_48 && _dataflow_minus_ready_48) begin
_dataflow_minus_valid_48 <= 0;
end
if((_dataflow_minus_ready_48 || !_dataflow_minus_valid_48) && (_dataflow_times_ready_40 && _dataflow_times_ready_42)) begin
_dataflow_minus_valid_48 <= _dataflow_times_valid_40 && _dataflow_times_valid_42;
end
if((_dataflow_plus_ready_49 || !_dataflow_plus_valid_49) && (_dataflow_times_ready_44 && _dataflow_times_ready_46) && (_dataflow_times_valid_44 && _dataflow_times_valid_46)) begin
_dataflow_plus_data_49 <= _dataflow_times_data_44 + _dataflow_times_data_46;
end
if(_dataflow_plus_valid_49 && _dataflow_plus_ready_49) begin
_dataflow_plus_valid_49 <= 0;
end
if((_dataflow_plus_ready_49 || !_dataflow_plus_valid_49) && (_dataflow_times_ready_44 && _dataflow_times_ready_46)) begin
_dataflow_plus_valid_49 <= _dataflow_times_valid_44 && _dataflow_times_valid_46;
end
if((_dataflow_plus_ready_50 || !_dataflow_plus_valid_50) && (_dataflow_minus_ready_20 && _dataflow_minus_ready_34) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34)) begin
_dataflow_plus_data_50 <= _dataflow_minus_data_20 + _dataflow_minus_data_34;
end
if(_dataflow_plus_valid_50 && _dataflow_plus_ready_50) begin
_dataflow_plus_valid_50 <= 0;
end
if((_dataflow_plus_ready_50 || !_dataflow_plus_valid_50) && (_dataflow_minus_ready_20 && _dataflow_minus_ready_34)) begin
_dataflow_plus_valid_50 <= _dataflow_minus_valid_20 && _dataflow_minus_valid_34;
end
if((_dataflow_plus_ready_51 || !_dataflow_plus_valid_51) && (_dataflow_plus_ready_21 && _dataflow_plus_ready_35) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35)) begin
_dataflow_plus_data_51 <= _dataflow_plus_data_21 + _dataflow_plus_data_35;
end
if(_dataflow_plus_valid_51 && _dataflow_plus_ready_51) begin
_dataflow_plus_valid_51 <= 0;
end
if((_dataflow_plus_ready_51 || !_dataflow_plus_valid_51) && (_dataflow_plus_ready_21 && _dataflow_plus_ready_35)) begin
_dataflow_plus_valid_51 <= _dataflow_plus_valid_21 && _dataflow_plus_valid_35;
end
if((_dataflow_minus_ready_52 || !_dataflow_minus_valid_52) && (_dataflow_minus_ready_20 && _dataflow_minus_ready_34) && (_dataflow_minus_valid_20 && _dataflow_minus_valid_34)) begin
_dataflow_minus_data_52 <= _dataflow_minus_data_20 - _dataflow_minus_data_34;
end
if(_dataflow_minus_valid_52 && _dataflow_minus_ready_52) begin
_dataflow_minus_valid_52 <= 0;
end
if((_dataflow_minus_ready_52 || !_dataflow_minus_valid_52) && (_dataflow_minus_ready_20 && _dataflow_minus_ready_34)) begin
_dataflow_minus_valid_52 <= _dataflow_minus_valid_20 && _dataflow_minus_valid_34;
end
if((_dataflow_minus_ready_53 || !_dataflow_minus_valid_53) && (_dataflow_plus_ready_21 && _dataflow_plus_ready_35) && (_dataflow_plus_valid_21 && _dataflow_plus_valid_35)) begin
_dataflow_minus_data_53 <= _dataflow_plus_data_21 - _dataflow_plus_data_35;
end
if(_dataflow_minus_valid_53 && _dataflow_minus_ready_53) begin
_dataflow_minus_valid_53 <= 0;
end
if((_dataflow_minus_ready_53 || !_dataflow_minus_valid_53) && (_dataflow_plus_ready_21 && _dataflow_plus_ready_35)) begin
_dataflow_minus_valid_53 <= _dataflow_plus_valid_21 && _dataflow_plus_valid_35;
end
if((_dataflow__delay_ready_71 || !_dataflow__delay_valid_71) && _dataflow__delay_ready_70 && _dataflow__delay_valid_70) begin
_dataflow__delay_data_71 <= _dataflow__delay_data_70;
end
if(_dataflow__delay_valid_71 && _dataflow__delay_ready_71) begin
_dataflow__delay_valid_71 <= 0;
end
if((_dataflow__delay_ready_71 || !_dataflow__delay_valid_71) && _dataflow__delay_ready_70) begin
_dataflow__delay_valid_71 <= _dataflow__delay_valid_70;
end
if((_dataflow__delay_ready_87 || !_dataflow__delay_valid_87) && _dataflow__delay_ready_86 && _dataflow__delay_valid_86) begin
_dataflow__delay_data_87 <= _dataflow__delay_data_86;
end
if(_dataflow__delay_valid_87 && _dataflow__delay_ready_87) begin
_dataflow__delay_valid_87 <= 0;
end
if((_dataflow__delay_ready_87 || !_dataflow__delay_valid_87) && _dataflow__delay_ready_86) begin
_dataflow__delay_valid_87 <= _dataflow__delay_valid_86;
end
if(_dataflow_times_ready_54 || !_dataflow_times_valid_54) begin
_dataflow_times_mul_odata_reg_54 <= _dataflow_times_mul_odata_54;
end
if(_dataflow_times_ready_54 || !_dataflow_times_valid_54) begin
_dataflow_times_mul_valid_reg_54 <= _dataflow_times_mul_ovalid_54;
end
if(_dataflow_times_ready_56 || !_dataflow_times_valid_56) begin
_dataflow_times_mul_odata_reg_56 <= _dataflow_times_mul_odata_56;
end
if(_dataflow_times_ready_56 || !_dataflow_times_valid_56) begin
_dataflow_times_mul_valid_reg_56 <= _dataflow_times_mul_ovalid_56;
end
if(_dataflow_times_ready_58 || !_dataflow_times_valid_58) begin
_dataflow_times_mul_odata_reg_58 <= _dataflow_times_mul_odata_58;
end
if(_dataflow_times_ready_58 || !_dataflow_times_valid_58) begin
_dataflow_times_mul_valid_reg_58 <= _dataflow_times_mul_ovalid_58;
end
if(_dataflow_times_ready_60 || !_dataflow_times_valid_60) begin
_dataflow_times_mul_odata_reg_60 <= _dataflow_times_mul_odata_60;
end
if(_dataflow_times_ready_60 || !_dataflow_times_valid_60) begin
_dataflow_times_mul_valid_reg_60 <= _dataflow_times_mul_ovalid_60;
end
if((_dataflow__delay_ready_72 || !_dataflow__delay_valid_72) && _dataflow__delay_ready_71 && _dataflow__delay_valid_71) begin
_dataflow__delay_data_72 <= _dataflow__delay_data_71;
end
if(_dataflow__delay_valid_72 && _dataflow__delay_ready_72) begin
_dataflow__delay_valid_72 <= 0;
end
if((_dataflow__delay_ready_72 || !_dataflow__delay_valid_72) && _dataflow__delay_ready_71) begin
_dataflow__delay_valid_72 <= _dataflow__delay_valid_71;
end
if((_dataflow__delay_ready_88 || !_dataflow__delay_valid_88) && _dataflow__delay_ready_87 && _dataflow__delay_valid_87) begin
_dataflow__delay_data_88 <= _dataflow__delay_data_87;
end
if(_dataflow__delay_valid_88 && _dataflow__delay_ready_88) begin
_dataflow__delay_valid_88 <= 0;
end
if((_dataflow__delay_ready_88 || !_dataflow__delay_valid_88) && _dataflow__delay_ready_87) begin
_dataflow__delay_valid_88 <= _dataflow__delay_valid_87;
end
if((_dataflow__delay_ready_96 || !_dataflow__delay_valid_96) && _dataflow_minus_ready_48 && _dataflow_minus_valid_48) begin
_dataflow__delay_data_96 <= _dataflow_minus_data_48;
end
if(_dataflow__delay_valid_96 && _dataflow__delay_ready_96) begin
_dataflow__delay_valid_96 <= 0;
end
if((_dataflow__delay_ready_96 || !_dataflow__delay_valid_96) && _dataflow_minus_ready_48) begin
_dataflow__delay_valid_96 <= _dataflow_minus_valid_48;
end
if((_dataflow__delay_ready_104 || !_dataflow__delay_valid_104) && _dataflow_plus_ready_49 && _dataflow_plus_valid_49) begin
_dataflow__delay_data_104 <= _dataflow_plus_data_49;
end
if(_dataflow__delay_valid_104 && _dataflow__delay_ready_104) begin
_dataflow__delay_valid_104 <= 0;
end
if((_dataflow__delay_ready_104 || !_dataflow__delay_valid_104) && _dataflow_plus_ready_49) begin
_dataflow__delay_valid_104 <= _dataflow_plus_valid_49;
end
if((_dataflow__delay_ready_112 || !_dataflow__delay_valid_112) && _dataflow_plus_ready_50 && _dataflow_plus_valid_50) begin
_dataflow__delay_data_112 <= _dataflow_plus_data_50;
end
if(_dataflow__delay_valid_112 && _dataflow__delay_ready_112) begin
_dataflow__delay_valid_112 <= 0;
end
if((_dataflow__delay_ready_112 || !_dataflow__delay_valid_112) && _dataflow_plus_ready_50) begin
_dataflow__delay_valid_112 <= _dataflow_plus_valid_50;
end
if((_dataflow__delay_ready_120 || !_dataflow__delay_valid_120) && _dataflow_plus_ready_51 && _dataflow_plus_valid_51) begin
_dataflow__delay_data_120 <= _dataflow_plus_data_51;
end
if(_dataflow__delay_valid_120 && _dataflow__delay_ready_120) begin
_dataflow__delay_valid_120 <= 0;
end
if((_dataflow__delay_ready_120 || !_dataflow__delay_valid_120) && _dataflow_plus_ready_51) begin
_dataflow__delay_valid_120 <= _dataflow_plus_valid_51;
end
if((_dataflow__delay_ready_73 || !_dataflow__delay_valid_73) && _dataflow__delay_ready_72 && _dataflow__delay_valid_72) begin
_dataflow__delay_data_73 <= _dataflow__delay_data_72;
end
if(_dataflow__delay_valid_73 && _dataflow__delay_ready_73) begin
_dataflow__delay_valid_73 <= 0;
end
if((_dataflow__delay_ready_73 || !_dataflow__delay_valid_73) && _dataflow__delay_ready_72) begin
_dataflow__delay_valid_73 <= _dataflow__delay_valid_72;
end
if((_dataflow__delay_ready_89 || !_dataflow__delay_valid_89) && _dataflow__delay_ready_88 && _dataflow__delay_valid_88) begin
_dataflow__delay_data_89 <= _dataflow__delay_data_88;
end
if(_dataflow__delay_valid_89 && _dataflow__delay_ready_89) begin
_dataflow__delay_valid_89 <= 0;
end
if((_dataflow__delay_ready_89 || !_dataflow__delay_valid_89) && _dataflow__delay_ready_88) begin
_dataflow__delay_valid_89 <= _dataflow__delay_valid_88;
end
if((_dataflow__delay_ready_97 || !_dataflow__delay_valid_97) && _dataflow__delay_ready_96 && _dataflow__delay_valid_96) begin
_dataflow__delay_data_97 <= _dataflow__delay_data_96;
end
if(_dataflow__delay_valid_97 && _dataflow__delay_ready_97) begin
_dataflow__delay_valid_97 <= 0;
end
if((_dataflow__delay_ready_97 || !_dataflow__delay_valid_97) && _dataflow__delay_ready_96) begin
_dataflow__delay_valid_97 <= _dataflow__delay_valid_96;
end
if((_dataflow__delay_ready_105 || !_dataflow__delay_valid_105) && _dataflow__delay_ready_104 && _dataflow__delay_valid_104) begin
_dataflow__delay_data_105 <= _dataflow__delay_data_104;
end
if(_dataflow__delay_valid_105 && _dataflow__delay_ready_105) begin
_dataflow__delay_valid_105 <= 0;
end
if((_dataflow__delay_ready_105 || !_dataflow__delay_valid_105) && _dataflow__delay_ready_104) begin
_dataflow__delay_valid_105 <= _dataflow__delay_valid_104;
end
if((_dataflow__delay_ready_113 || !_dataflow__delay_valid_113) && _dataflow__delay_ready_112 && _dataflow__delay_valid_112) begin
_dataflow__delay_data_113 <= _dataflow__delay_data_112;
end
if(_dataflow__delay_valid_113 && _dataflow__delay_ready_113) begin
_dataflow__delay_valid_113 <= 0;
end
if((_dataflow__delay_ready_113 || !_dataflow__delay_valid_113) && _dataflow__delay_ready_112) begin
_dataflow__delay_valid_113 <= _dataflow__delay_valid_112;
end
if((_dataflow__delay_ready_121 || !_dataflow__delay_valid_121) && _dataflow__delay_ready_120 && _dataflow__delay_valid_120) begin
_dataflow__delay_data_121 <= _dataflow__delay_data_120;
end
if(_dataflow__delay_valid_121 && _dataflow__delay_ready_121) begin
_dataflow__delay_valid_121 <= 0;
end
if((_dataflow__delay_ready_121 || !_dataflow__delay_valid_121) && _dataflow__delay_ready_120) begin
_dataflow__delay_valid_121 <= _dataflow__delay_valid_120;
end
if((_dataflow__delay_ready_74 || !_dataflow__delay_valid_74) && _dataflow__delay_ready_73 && _dataflow__delay_valid_73) begin
_dataflow__delay_data_74 <= _dataflow__delay_data_73;
end
if(_dataflow__delay_valid_74 && _dataflow__delay_ready_74) begin
_dataflow__delay_valid_74 <= 0;
end
if((_dataflow__delay_ready_74 || !_dataflow__delay_valid_74) && _dataflow__delay_ready_73) begin
_dataflow__delay_valid_74 <= _dataflow__delay_valid_73;
end
if((_dataflow__delay_ready_90 || !_dataflow__delay_valid_90) && _dataflow__delay_ready_89 && _dataflow__delay_valid_89) begin
_dataflow__delay_data_90 <= _dataflow__delay_data_89;
end
if(_dataflow__delay_valid_90 && _dataflow__delay_ready_90) begin
_dataflow__delay_valid_90 <= 0;
end
if((_dataflow__delay_ready_90 || !_dataflow__delay_valid_90) && _dataflow__delay_ready_89) begin
_dataflow__delay_valid_90 <= _dataflow__delay_valid_89;
end
if((_dataflow__delay_ready_98 || !_dataflow__delay_valid_98) && _dataflow__delay_ready_97 && _dataflow__delay_valid_97) begin
_dataflow__delay_data_98 <= _dataflow__delay_data_97;
end
if(_dataflow__delay_valid_98 && _dataflow__delay_ready_98) begin
_dataflow__delay_valid_98 <= 0;
end
if((_dataflow__delay_ready_98 || !_dataflow__delay_valid_98) && _dataflow__delay_ready_97) begin
_dataflow__delay_valid_98 <= _dataflow__delay_valid_97;
end
if((_dataflow__delay_ready_106 || !_dataflow__delay_valid_106) && _dataflow__delay_ready_105 && _dataflow__delay_valid_105) begin
_dataflow__delay_data_106 <= _dataflow__delay_data_105;
end
if(_dataflow__delay_valid_106 && _dataflow__delay_ready_106) begin
_dataflow__delay_valid_106 <= 0;
end
if((_dataflow__delay_ready_106 || !_dataflow__delay_valid_106) && _dataflow__delay_ready_105) begin
_dataflow__delay_valid_106 <= _dataflow__delay_valid_105;
end
if((_dataflow__delay_ready_114 || !_dataflow__delay_valid_114) && _dataflow__delay_ready_113 && _dataflow__delay_valid_113) begin
_dataflow__delay_data_114 <= _dataflow__delay_data_113;
end
if(_dataflow__delay_valid_114 && _dataflow__delay_ready_114) begin
_dataflow__delay_valid_114 <= 0;
end
if((_dataflow__delay_ready_114 || !_dataflow__delay_valid_114) && _dataflow__delay_ready_113) begin
_dataflow__delay_valid_114 <= _dataflow__delay_valid_113;
end
if((_dataflow__delay_ready_122 || !_dataflow__delay_valid_122) && _dataflow__delay_ready_121 && _dataflow__delay_valid_121) begin
_dataflow__delay_data_122 <= _dataflow__delay_data_121;
end
if(_dataflow__delay_valid_122 && _dataflow__delay_ready_122) begin
_dataflow__delay_valid_122 <= 0;
end
if((_dataflow__delay_ready_122 || !_dataflow__delay_valid_122) && _dataflow__delay_ready_121) begin
_dataflow__delay_valid_122 <= _dataflow__delay_valid_121;
end
if((_dataflow__delay_ready_75 || !_dataflow__delay_valid_75) && _dataflow__delay_ready_74 && _dataflow__delay_valid_74) begin
_dataflow__delay_data_75 <= _dataflow__delay_data_74;
end
if(_dataflow__delay_valid_75 && _dataflow__delay_ready_75) begin
_dataflow__delay_valid_75 <= 0;
end
if((_dataflow__delay_ready_75 || !_dataflow__delay_valid_75) && _dataflow__delay_ready_74) begin
_dataflow__delay_valid_75 <= _dataflow__delay_valid_74;
end
if((_dataflow__delay_ready_91 || !_dataflow__delay_valid_91) && _dataflow__delay_ready_90 && _dataflow__delay_valid_90) begin
_dataflow__delay_data_91 <= _dataflow__delay_data_90;
end
if(_dataflow__delay_valid_91 && _dataflow__delay_ready_91) begin
_dataflow__delay_valid_91 <= 0;
end
if((_dataflow__delay_ready_91 || !_dataflow__delay_valid_91) && _dataflow__delay_ready_90) begin
_dataflow__delay_valid_91 <= _dataflow__delay_valid_90;
end
if((_dataflow__delay_ready_99 || !_dataflow__delay_valid_99) && _dataflow__delay_ready_98 && _dataflow__delay_valid_98) begin
_dataflow__delay_data_99 <= _dataflow__delay_data_98;
end
if(_dataflow__delay_valid_99 && _dataflow__delay_ready_99) begin
_dataflow__delay_valid_99 <= 0;
end
if((_dataflow__delay_ready_99 || !_dataflow__delay_valid_99) && _dataflow__delay_ready_98) begin
_dataflow__delay_valid_99 <= _dataflow__delay_valid_98;
end
if((_dataflow__delay_ready_107 || !_dataflow__delay_valid_107) && _dataflow__delay_ready_106 && _dataflow__delay_valid_106) begin
_dataflow__delay_data_107 <= _dataflow__delay_data_106;
end
if(_dataflow__delay_valid_107 && _dataflow__delay_ready_107) begin
_dataflow__delay_valid_107 <= 0;
end
if((_dataflow__delay_ready_107 || !_dataflow__delay_valid_107) && _dataflow__delay_ready_106) begin
_dataflow__delay_valid_107 <= _dataflow__delay_valid_106;
end
if((_dataflow__delay_ready_115 || !_dataflow__delay_valid_115) && _dataflow__delay_ready_114 && _dataflow__delay_valid_114) begin
_dataflow__delay_data_115 <= _dataflow__delay_data_114;
end
if(_dataflow__delay_valid_115 && _dataflow__delay_ready_115) begin
_dataflow__delay_valid_115 <= 0;
end
if((_dataflow__delay_ready_115 || !_dataflow__delay_valid_115) && _dataflow__delay_ready_114) begin
_dataflow__delay_valid_115 <= _dataflow__delay_valid_114;
end
if((_dataflow__delay_ready_123 || !_dataflow__delay_valid_123) && _dataflow__delay_ready_122 && _dataflow__delay_valid_122) begin
_dataflow__delay_data_123 <= _dataflow__delay_data_122;
end
if(_dataflow__delay_valid_123 && _dataflow__delay_ready_123) begin
_dataflow__delay_valid_123 <= 0;
end
if((_dataflow__delay_ready_123 || !_dataflow__delay_valid_123) && _dataflow__delay_ready_122) begin
_dataflow__delay_valid_123 <= _dataflow__delay_valid_122;
end
if((_dataflow__delay_ready_76 || !_dataflow__delay_valid_76) && _dataflow__delay_ready_75 && _dataflow__delay_valid_75) begin
_dataflow__delay_data_76 <= _dataflow__delay_data_75;
end
if(_dataflow__delay_valid_76 && _dataflow__delay_ready_76) begin
_dataflow__delay_valid_76 <= 0;
end
if((_dataflow__delay_ready_76 || !_dataflow__delay_valid_76) && _dataflow__delay_ready_75) begin
_dataflow__delay_valid_76 <= _dataflow__delay_valid_75;
end
if((_dataflow__delay_ready_92 || !_dataflow__delay_valid_92) && _dataflow__delay_ready_91 && _dataflow__delay_valid_91) begin
_dataflow__delay_data_92 <= _dataflow__delay_data_91;
end
if(_dataflow__delay_valid_92 && _dataflow__delay_ready_92) begin
_dataflow__delay_valid_92 <= 0;
end
if((_dataflow__delay_ready_92 || !_dataflow__delay_valid_92) && _dataflow__delay_ready_91) begin
_dataflow__delay_valid_92 <= _dataflow__delay_valid_91;
end
if((_dataflow__delay_ready_100 || !_dataflow__delay_valid_100) && _dataflow__delay_ready_99 && _dataflow__delay_valid_99) begin
_dataflow__delay_data_100 <= _dataflow__delay_data_99;
end
if(_dataflow__delay_valid_100 && _dataflow__delay_ready_100) begin
_dataflow__delay_valid_100 <= 0;
end
if((_dataflow__delay_ready_100 || !_dataflow__delay_valid_100) && _dataflow__delay_ready_99) begin
_dataflow__delay_valid_100 <= _dataflow__delay_valid_99;
end
if((_dataflow__delay_ready_108 || !_dataflow__delay_valid_108) && _dataflow__delay_ready_107 && _dataflow__delay_valid_107) begin
_dataflow__delay_data_108 <= _dataflow__delay_data_107;
end
if(_dataflow__delay_valid_108 && _dataflow__delay_ready_108) begin
_dataflow__delay_valid_108 <= 0;
end
if((_dataflow__delay_ready_108 || !_dataflow__delay_valid_108) && _dataflow__delay_ready_107) begin
_dataflow__delay_valid_108 <= _dataflow__delay_valid_107;
end
if((_dataflow__delay_ready_116 || !_dataflow__delay_valid_116) && _dataflow__delay_ready_115 && _dataflow__delay_valid_115) begin
_dataflow__delay_data_116 <= _dataflow__delay_data_115;
end
if(_dataflow__delay_valid_116 && _dataflow__delay_ready_116) begin
_dataflow__delay_valid_116 <= 0;
end
if((_dataflow__delay_ready_116 || !_dataflow__delay_valid_116) && _dataflow__delay_ready_115) begin
_dataflow__delay_valid_116 <= _dataflow__delay_valid_115;
end
if((_dataflow__delay_ready_124 || !_dataflow__delay_valid_124) && _dataflow__delay_ready_123 && _dataflow__delay_valid_123) begin
_dataflow__delay_data_124 <= _dataflow__delay_data_123;
end
if(_dataflow__delay_valid_124 && _dataflow__delay_ready_124) begin
_dataflow__delay_valid_124 <= 0;
end
if((_dataflow__delay_ready_124 || !_dataflow__delay_valid_124) && _dataflow__delay_ready_123) begin
_dataflow__delay_valid_124 <= _dataflow__delay_valid_123;
end
if((_dataflow__delay_ready_77 || !_dataflow__delay_valid_77) && _dataflow__delay_ready_76 && _dataflow__delay_valid_76) begin
_dataflow__delay_data_77 <= _dataflow__delay_data_76;
end
if(_dataflow__delay_valid_77 && _dataflow__delay_ready_77) begin
_dataflow__delay_valid_77 <= 0;
end
if((_dataflow__delay_ready_77 || !_dataflow__delay_valid_77) && _dataflow__delay_ready_76) begin
_dataflow__delay_valid_77 <= _dataflow__delay_valid_76;
end
if((_dataflow__delay_ready_93 || !_dataflow__delay_valid_93) && _dataflow__delay_ready_92 && _dataflow__delay_valid_92) begin
_dataflow__delay_data_93 <= _dataflow__delay_data_92;
end
if(_dataflow__delay_valid_93 && _dataflow__delay_ready_93) begin
_dataflow__delay_valid_93 <= 0;
end
if((_dataflow__delay_ready_93 || !_dataflow__delay_valid_93) && _dataflow__delay_ready_92) begin
_dataflow__delay_valid_93 <= _dataflow__delay_valid_92;
end
if((_dataflow__delay_ready_101 || !_dataflow__delay_valid_101) && _dataflow__delay_ready_100 && _dataflow__delay_valid_100) begin
_dataflow__delay_data_101 <= _dataflow__delay_data_100;
end
if(_dataflow__delay_valid_101 && _dataflow__delay_ready_101) begin
_dataflow__delay_valid_101 <= 0;
end
if((_dataflow__delay_ready_101 || !_dataflow__delay_valid_101) && _dataflow__delay_ready_100) begin
_dataflow__delay_valid_101 <= _dataflow__delay_valid_100;
end
if((_dataflow__delay_ready_109 || !_dataflow__delay_valid_109) && _dataflow__delay_ready_108 && _dataflow__delay_valid_108) begin
_dataflow__delay_data_109 <= _dataflow__delay_data_108;
end
if(_dataflow__delay_valid_109 && _dataflow__delay_ready_109) begin
_dataflow__delay_valid_109 <= 0;
end
if((_dataflow__delay_ready_109 || !_dataflow__delay_valid_109) && _dataflow__delay_ready_108) begin
_dataflow__delay_valid_109 <= _dataflow__delay_valid_108;
end
if((_dataflow__delay_ready_117 || !_dataflow__delay_valid_117) && _dataflow__delay_ready_116 && _dataflow__delay_valid_116) begin
_dataflow__delay_data_117 <= _dataflow__delay_data_116;
end
if(_dataflow__delay_valid_117 && _dataflow__delay_ready_117) begin
_dataflow__delay_valid_117 <= 0;
end
if((_dataflow__delay_ready_117 || !_dataflow__delay_valid_117) && _dataflow__delay_ready_116) begin
_dataflow__delay_valid_117 <= _dataflow__delay_valid_116;
end
if((_dataflow__delay_ready_125 || !_dataflow__delay_valid_125) && _dataflow__delay_ready_124 && _dataflow__delay_valid_124) begin
_dataflow__delay_data_125 <= _dataflow__delay_data_124;
end
if(_dataflow__delay_valid_125 && _dataflow__delay_ready_125) begin
_dataflow__delay_valid_125 <= 0;
end
if((_dataflow__delay_ready_125 || !_dataflow__delay_valid_125) && _dataflow__delay_ready_124) begin
_dataflow__delay_valid_125 <= _dataflow__delay_valid_124;
end
if((_dataflow__delay_ready_78 || !_dataflow__delay_valid_78) && _dataflow__delay_ready_77 && _dataflow__delay_valid_77) begin
_dataflow__delay_data_78 <= _dataflow__delay_data_77;
end
if(_dataflow__delay_valid_78 && _dataflow__delay_ready_78) begin
_dataflow__delay_valid_78 <= 0;
end
if((_dataflow__delay_ready_78 || !_dataflow__delay_valid_78) && _dataflow__delay_ready_77) begin
_dataflow__delay_valid_78 <= _dataflow__delay_valid_77;
end
if((_dataflow__delay_ready_94 || !_dataflow__delay_valid_94) && _dataflow__delay_ready_93 && _dataflow__delay_valid_93) begin
_dataflow__delay_data_94 <= _dataflow__delay_data_93;
end
if(_dataflow__delay_valid_94 && _dataflow__delay_ready_94) begin
_dataflow__delay_valid_94 <= 0;
end
if((_dataflow__delay_ready_94 || !_dataflow__delay_valid_94) && _dataflow__delay_ready_93) begin
_dataflow__delay_valid_94 <= _dataflow__delay_valid_93;
end
if((_dataflow__delay_ready_102 || !_dataflow__delay_valid_102) && _dataflow__delay_ready_101 && _dataflow__delay_valid_101) begin
_dataflow__delay_data_102 <= _dataflow__delay_data_101;
end
if(_dataflow__delay_valid_102 && _dataflow__delay_ready_102) begin
_dataflow__delay_valid_102 <= 0;
end
if((_dataflow__delay_ready_102 || !_dataflow__delay_valid_102) && _dataflow__delay_ready_101) begin
_dataflow__delay_valid_102 <= _dataflow__delay_valid_101;
end
if((_dataflow__delay_ready_110 || !_dataflow__delay_valid_110) && _dataflow__delay_ready_109 && _dataflow__delay_valid_109) begin
_dataflow__delay_data_110 <= _dataflow__delay_data_109;
end
if(_dataflow__delay_valid_110 && _dataflow__delay_ready_110) begin
_dataflow__delay_valid_110 <= 0;
end
if((_dataflow__delay_ready_110 || !_dataflow__delay_valid_110) && _dataflow__delay_ready_109) begin
_dataflow__delay_valid_110 <= _dataflow__delay_valid_109;
end
if((_dataflow__delay_ready_118 || !_dataflow__delay_valid_118) && _dataflow__delay_ready_117 && _dataflow__delay_valid_117) begin
_dataflow__delay_data_118 <= _dataflow__delay_data_117;
end
if(_dataflow__delay_valid_118 && _dataflow__delay_ready_118) begin
_dataflow__delay_valid_118 <= 0;
end
if((_dataflow__delay_ready_118 || !_dataflow__delay_valid_118) && _dataflow__delay_ready_117) begin
_dataflow__delay_valid_118 <= _dataflow__delay_valid_117;
end
if((_dataflow__delay_ready_126 || !_dataflow__delay_valid_126) && _dataflow__delay_ready_125 && _dataflow__delay_valid_125) begin
_dataflow__delay_data_126 <= _dataflow__delay_data_125;
end
if(_dataflow__delay_valid_126 && _dataflow__delay_ready_126) begin
_dataflow__delay_valid_126 <= 0;
end
if((_dataflow__delay_ready_126 || !_dataflow__delay_valid_126) && _dataflow__delay_ready_125) begin
_dataflow__delay_valid_126 <= _dataflow__delay_valid_125;
end
if((_dataflow_minus_ready_62 || !_dataflow_minus_valid_62) && (_dataflow_times_ready_54 && _dataflow_times_ready_56) && (_dataflow_times_valid_54 && _dataflow_times_valid_56)) begin
_dataflow_minus_data_62 <= _dataflow_times_data_54 - _dataflow_times_data_56;
end
if(_dataflow_minus_valid_62 && _dataflow_minus_ready_62) begin
_dataflow_minus_valid_62 <= 0;
end
if((_dataflow_minus_ready_62 || !_dataflow_minus_valid_62) && (_dataflow_times_ready_54 && _dataflow_times_ready_56)) begin
_dataflow_minus_valid_62 <= _dataflow_times_valid_54 && _dataflow_times_valid_56;
end
if((_dataflow_plus_ready_63 || !_dataflow_plus_valid_63) && (_dataflow_times_ready_58 && _dataflow_times_ready_60) && (_dataflow_times_valid_58 && _dataflow_times_valid_60)) begin
_dataflow_plus_data_63 <= _dataflow_times_data_58 + _dataflow_times_data_60;
end
if(_dataflow_plus_valid_63 && _dataflow_plus_ready_63) begin
_dataflow_plus_valid_63 <= 0;
end
if((_dataflow_plus_ready_63 || !_dataflow_plus_valid_63) && (_dataflow_times_ready_58 && _dataflow_times_ready_60)) begin
_dataflow_plus_valid_63 <= _dataflow_times_valid_58 && _dataflow_times_valid_60;
end
if((_dataflow__delay_ready_79 || !_dataflow__delay_valid_79) && _dataflow__delay_ready_78 && _dataflow__delay_valid_78) begin
_dataflow__delay_data_79 <= _dataflow__delay_data_78;
end
if(_dataflow__delay_valid_79 && _dataflow__delay_ready_79) begin
_dataflow__delay_valid_79 <= 0;
end
if((_dataflow__delay_ready_79 || !_dataflow__delay_valid_79) && _dataflow__delay_ready_78) begin
_dataflow__delay_valid_79 <= _dataflow__delay_valid_78;
end
if((_dataflow__delay_ready_95 || !_dataflow__delay_valid_95) && _dataflow__delay_ready_94 && _dataflow__delay_valid_94) begin
_dataflow__delay_data_95 <= _dataflow__delay_data_94;
end
if(_dataflow__delay_valid_95 && _dataflow__delay_ready_95) begin
_dataflow__delay_valid_95 <= 0;
end
if((_dataflow__delay_ready_95 || !_dataflow__delay_valid_95) && _dataflow__delay_ready_94) begin
_dataflow__delay_valid_95 <= _dataflow__delay_valid_94;
end
if((_dataflow__delay_ready_103 || !_dataflow__delay_valid_103) && _dataflow__delay_ready_102 && _dataflow__delay_valid_102) begin
_dataflow__delay_data_103 <= _dataflow__delay_data_102;
end
if(_dataflow__delay_valid_103 && _dataflow__delay_ready_103) begin
_dataflow__delay_valid_103 <= 0;
end
if((_dataflow__delay_ready_103 || !_dataflow__delay_valid_103) && _dataflow__delay_ready_102) begin
_dataflow__delay_valid_103 <= _dataflow__delay_valid_102;
end
if((_dataflow__delay_ready_111 || !_dataflow__delay_valid_111) && _dataflow__delay_ready_110 && _dataflow__delay_valid_110) begin
_dataflow__delay_data_111 <= _dataflow__delay_data_110;
end
if(_dataflow__delay_valid_111 && _dataflow__delay_ready_111) begin
_dataflow__delay_valid_111 <= 0;
end
if((_dataflow__delay_ready_111 || !_dataflow__delay_valid_111) && _dataflow__delay_ready_110) begin
_dataflow__delay_valid_111 <= _dataflow__delay_valid_110;
end
if((_dataflow__delay_ready_119 || !_dataflow__delay_valid_119) && _dataflow__delay_ready_118 && _dataflow__delay_valid_118) begin
_dataflow__delay_data_119 <= _dataflow__delay_data_118;
end
if(_dataflow__delay_valid_119 && _dataflow__delay_ready_119) begin
_dataflow__delay_valid_119 <= 0;
end
if((_dataflow__delay_ready_119 || !_dataflow__delay_valid_119) && _dataflow__delay_ready_118) begin
_dataflow__delay_valid_119 <= _dataflow__delay_valid_118;
end
if((_dataflow__delay_ready_127 || !_dataflow__delay_valid_127) && _dataflow__delay_ready_126 && _dataflow__delay_valid_126) begin
_dataflow__delay_data_127 <= _dataflow__delay_data_126;
end
if(_dataflow__delay_valid_127 && _dataflow__delay_ready_127) begin
_dataflow__delay_valid_127 <= 0;
end
if((_dataflow__delay_ready_127 || !_dataflow__delay_valid_127) && _dataflow__delay_ready_126) begin
_dataflow__delay_valid_127 <= _dataflow__delay_valid_126;
end
end
end
endmodule
module multiplier_0
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_0
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_0
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_1
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_1
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_1
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_2
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_2
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_2
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_3
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_3
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_3
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_4
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_4
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_4
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_5
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_5
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_5
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_6
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_6
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_6
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_7
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_7
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_7
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_8
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_8
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_8
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_9
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_9
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_9
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_10
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_10
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_10
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_11
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_11
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_11
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_12
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_12
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_12
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_13
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_13
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_13
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_14
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_14
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_14
(
input CLK,
input update,
input [16-1:0] a,
input [1-1:0] b,
output [17-1:0] c
);
reg signed [16-1:0] _a;
reg signed [1-1:0] _b;
wire signed [17-1:0] _mul;
reg signed [17-1:0] _pipe_mul0;
reg signed [17-1:0] _pipe_mul1;
reg signed [17-1:0] _pipe_mul2;
reg signed [17-1:0] _pipe_mul3;
reg signed [17-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
module multiplier_15
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_15
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_15
(
input CLK,
input update,
input [16-1:0] a,
input [2-1:0] b,
output [18-1:0] c
);
reg signed [16-1:0] _a;
reg signed [2-1:0] _b;
wire signed [18-1:0] _mul;
reg signed [18-1:0] _pipe_mul0;
reg signed [18-1:0] _pipe_mul1;
reg signed [18-1:0] _pipe_mul2;
reg signed [18-1:0] _pipe_mul3;
reg signed [18-1:0] _pipe_mul4;
assign _mul = _a * _b;
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = dataflow_fft4.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
"""AIGER circuit class based on
https://github.com/mvcisback/py-aiger/blob/main/aiger/parser.py"""
import re
class Header:
def __init__(self, max_var_id: int, num_inputs: int, num_latches: int,
num_outputs: int, num_ands: int):
self.max_var_id = max_var_id
self.num_inputs = num_inputs
self.num_latches = num_latches
self.num_outputs = num_outputs
self.num_ands = num_ands
def __str__(self):
return f'aag {self.max_var_id} {self.num_inputs} {self.num_latches} ' \
f'{self.num_outputs} {self.num_ands}'
class Latch:
def __init__(self, lit: int, next_lit: int):
self.lit = lit
self.next_lit = next_lit
def __str__(self):
return f'{self.lit} {self.next_lit}'
class And:
def __init__(self, lit: int, arg1: int, arg2: int):
self.lit = lit
self.arg1 = arg1
self.arg2 = arg2
def __str__(self):
return f'{self.lit} {self.arg1} {self.arg2}'
class Symbol:
def __init__(
self,
kind: str,
idx: int,
name: str,
):
self.kind = kind
self.idx = idx
self.name = name
def __str__(self):
return f'{self.kind}{self.idx} {self.name}'
class Circuit:
def __init__(self,
header=None,
inputs=None,
latches=None,
outputs=None,
ands=None,
symbols=None,
comments=None):
self.header = header
self.inputs = inputs if inputs else []
self.latches = latches if latches else []
self.outputs = outputs if outputs else []
self.ands = ands if ands else []
self.symbols = symbols if symbols else []
self.comments = comments if comments else []
@property
def max_var_id(self):
lit = 0
if self.inputs:
lit = max(self.inputs)
components = self.latches + self.ands
if components:
lit = max(lit, max([x.lit for x in components]))
return lit // 2
@property
def num_inputs(self):
return len(self.inputs)
@property
def num_latches(self):
return len(self.latches)
@property
def num_outputs(self):
return len(self.outputs)
@property
def num_ands(self):
return len(self.ands)
@property
def input_var_ids(self):
return [i // 2 for i in self.inputs]
@property
def latch_var_ids(self):
return [l.lit // 2 for l in self.latches]
@property
def output_var_ids(self):
return [o // 2 for o in self.outputs]
@property
def and_var_ids(self):
return [a.lit // 2 for a in self.ands]
def get_latch_by_idx(self, idx):
for latch in self.latches:
if latch.lit // 2 == idx:
return latch
return None
def __str__(self):
return '\n'.join([
str(x) for x in [
self.header, *self.inputs, *self.latches, *self.outputs,
*self.ands, *self.symbols, *self.comments
]
])
HEADER_PATTERN = re.compile(r"aag (\d+) (\d+) (\d+) (\d+) (\d+)")
def parse_header(line, state):
if state.header:
return False
match = HEADER_PATTERN.fullmatch(line)
if not match:
raise ValueError(f"Failed to parse aag header: {line}")
try:
ids = [int(idx) for idx in match.groups()]
if any(x < 0 for x in ids):
raise ValueError("Indicies must be positive")
max_var_id, num_inputs, num_latches, num_outputs, num_ands = ids
if num_inputs + num_latches + num_ands > max_var_id:
raise ValueError(
"Sum of number of inputs, latches and ands is greater than max variable index"
)
state.header = Header(max_var_id, num_inputs, num_latches, num_outputs,
num_ands)
except ValueError as exc:
raise ValueError('Failed to parse aag header') from exc
return True
IO_PATTERN = re.compile(r"(\d+)")
def parse_input(line, state):
match = IO_PATTERN.fullmatch(line)
if not match or state.num_inputs >= state.header.num_inputs:
return False
lit = int(line)
state.inputs.append(lit)
return True
def parse_output(line, state):
match = IO_PATTERN.fullmatch(line)
if not match or state.num_outputs >= state.header.num_outputs:
return False
lit = int(line)
state.outputs.append(lit)
return True
LATCH_PATTERN = re.compile(r"(\d+) (\d+)")
def parse_latch(line, state):
if state.header.num_latches and state.num_latches >= state.header.num_latches:
return False
match = LATCH_PATTERN.fullmatch(line)
if not match:
if state.header.num_latches:
raise ValueError(f'Expecting a latch: {line}')
return False
groups = match.groups()
lit = int(groups[0])
next_lit = int(groups[1])
state.latches.append(Latch(lit, next_lit))
return True
AND_PATTERN = re.compile(r"(\d+) (\d+) (\d+)")
def parse_and(line, state):
if state.header.num_ands and state.num_ands >= state.header.num_ands:
return False
match = AND_PATTERN.fullmatch(line)
if not match:
if state.header.num_ands:
raise ValueError(f'Expecting an and: {line}')
return False
groups = match.groups()
lit = int(groups[0])
arg1 = int(groups[1])
arg2 = int(groups[2])
state.ands.append(And(lit, arg1, arg2))
return True
SYM_PATTERN = re.compile(r"([ilo])(\d+) (.*)")
def parse_symbol(line, state):
match = SYM_PATTERN.fullmatch(line)
if not match:
return False
kind, idx, name = match.groups()
state.symbols.append(Symbol(kind, idx, name))
return True
def parse_comment(line, state):
if state.comments:
state.comments.append(line.restrip())
elif line.rstrip() == 'c':
state.comments = []
else:
return False
return True
DEFAULT_COMPONENTS = [
'header', 'inputs', 'latches', 'outputs', 'ands', 'symbols', 'comments'
]
def parser_seq(components):
for component in components:
yield {
'header': parse_header,
'inputs': parse_input,
'latches': parse_latch,
'outputs': parse_output,
'ands': parse_and,
'symbols': parse_symbol,
'comments': parse_comment
}.get(component)
def parse(circuit: str, components=None, state=None):
if not components:
components = DEFAULT_COMPONENTS
if not state:
state = Circuit()
parsers = parser_seq(components)
parser = next(parsers)
lines = circuit.split('\n')
for line in lines:
while not parser(line, state):
try:
parser = next(parsers)
except StopIteration as exc:
raise ValueError(f'Could not parse line: {line}') from exc
return state
def parse_no_header(circuit: str,
num_inputs: int,
num_outputs: int,
components=None):
state = Circuit()
state.header = Header(None, num_inputs, None, num_outputs, None)
parse(circuit, components, state)
state.header.max_var_id = state.max_var_id
state.header.num_latches = state.num_latches
state.header.num_ands = state.num_ands
return state
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. image:: https://user-images.githubusercontent.com/32848391/46815773-dc919500-cd7b-11e8-8e80-8b83f760a303.png
A python module for scientific analysis and visualization of 3D objects and point clouds based on VTK and numpy.
"""
__author__ = "<NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "dev"
__website__ = "https://github.com/marcomusy/vedo"
######################################################################## imports
import os
import sys
import vtk
import warnings
import logging
from deprecated import deprecated
import numpy as np
from numpy import sin, cos, sqrt, exp, log, dot, cross # just because useful
#################################################
from vedo.version import _version as __version__
from vedo.utils import *
from vedo.settings import _setts as settings
from vedo.colors import *
from vedo.shapes import *
from vedo.io import *
from vedo.base import *
from vedo.ugrid import *
from vedo.assembly import *
from vedo.pointcloud import *
from vedo.mesh import *
from vedo.picture import *
from vedo.volume import *
from vedo.tetmesh import *
from vedo.shapes import *
from vedo.addons import *
from vedo.plotter import *
# HACK: need to uncomment this to generate html documentation
#from vedo.dolfin import _inputsort
#import vedo.base as base
import vedo.docs as docs # needed by spyder console, otherwise complains
##################################################################################
########################################################################## GLOBALS
vtk_version = [
int(vtk.vtkVersion().GetVTKMajorVersion()),
int(vtk.vtkVersion().GetVTKMinorVersion()),
int(vtk.vtkVersion().GetVTKBuildVersion()),
]
try:
import platform
sys_platform = platform.system()
except:
sys_platform = ""
if vtk_version[0] >= 9:
if "Windows" in sys_platform:
settings.useDepthPeeling = True
######################################################################### logging
class _LoggingCustomFormatter(logging.Formatter):
logformat = "[vedo.%(filename)s:%(lineno)d] %(levelname)s: %(message)s"
white = "\x1b[1m"
grey = "\x1b[2m\x1b[1m\x1b[38;20m"
yellow = "\x1b[1m\x1b[33;20m"
red = "\x1b[1m\x1b[31;20m"
inv_red = "\x1b[7m\x1b[1m\x1b[31;1m"
reset = "\x1b[0m"
FORMATS = {
logging.DEBUG: grey + logformat + reset,
logging.INFO: white + logformat + reset,
logging.WARNING: yellow + logformat + reset,
logging.ERROR: red + logformat + reset,
logging.CRITICAL: inv_red + logformat + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
logger = logging.getLogger("vedo")
_chsh = logging.StreamHandler()
_chsh.flush = sys.stdout.flush
_chsh.setLevel(logging.DEBUG)
_chsh.setFormatter(_LoggingCustomFormatter())
logger.addHandler(_chsh)
logger.setLevel(logging.INFO)
# silence annoying messages
warnings.simplefilter(action="ignore", category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
################################################################################
installdir = os.path.dirname(__file__)
dataurl = "https://vedo.embl.es/examples/data/"
plotter_instance = None
notebook_plotter = None
notebookBackend = None
interactorStyle = None # internal use only
## textures
textures_path = os.path.join(installdir, "textures/")
textures = []
for f in os.listdir(textures_path):
tfn = f.split(".")[0]
if 'earth' in tfn: continue
textures.append(tfn)
## fonts
fonts_path = os.path.join(installdir, "fonts/")
fonts = []
for f in os.listdir(fonts_path):
if '.npz' in f: continue
fonts.append(f.split(".")[0])
fonts = list(sorted(fonts))
################################################################## deprecated
@deprecated(reason="\x1b[7m\x1b[1m\x1b[31;1mPlease use Plotter(backend='...')\x1b[0m")
def embedWindow(backend='ipyvtk', verbose=True):
"""Use this function to control whether the rendering window is inside
the jupyter notebook or as an independent external window"""
global notebook_plotter, notebookBackend
if not backend:
notebookBackend = None
notebook_plotter = None
return ####################
else:
if any(['SPYDER' in name for name in os.environ]):
notebookBackend = None
notebook_plotter = None
return
try:
get_ipython()
except NameError:
notebookBackend = None
notebook_plotter = None
return
backend = backend.lower()
notebookBackend = backend
if backend=='k3d':
try:
import k3d
if k3d._version.version_info != (2, 7, 4):
print('Warning: only k3d version 2.7.4 is currently supported')
# print('> pip install k3d==2.7.4')
except ModuleNotFoundError:
notebookBackend = None
if verbose:
print('embedWindow(verbose=True): could not load k3d module, try:')
print('> pip install k3d==2.7.4')
elif 'ipygany' in backend: # ipygany
try:
import ipygany
except ModuleNotFoundError:
notebookBackend = None
if verbose:
print('embedWindow(verbose=True): could not load ipygany module, try:')
print('> pip install ipygany')
elif 'itk' in backend: # itkwidgets
try:
import itkwidgets
except ModuleNotFoundError:
notebookBackend = None
if verbose:
print('embedWindow(verbose=True): could not load itkwidgets module, try:')
print('> pip install itkwidgets')
elif backend.lower() == '2d':
pass
elif backend =='panel':
try:
import panel
panel.extension('vtk')
except:
if verbose:
print('embedWindow(verbose=True): could not load panel try:')
print('> pip install panel')
elif 'ipyvtk' in backend:
try:
from ipyvtklink.viewer import ViewInteractiveWidget
except ModuleNotFoundError:
if verbose:
print('embedWindow(verbose=True): could not load ipyvtklink try:')
print('> pip install ipyvtklink')
else:
print("Unknown backend", backend)
raise RuntimeError()
|
<reponame>sannidhiteredesai/PersonalAccountant<filename>test/pa/test_report15g.py<gh_stars>1-10
from unittest import TestCase
from pa.pa.report15g import *
import pa.pa.report15g as report15g
from datetime import date
import datetime
class MockDateTime(datetime.datetime):
@classmethod
def now(cls): return cls(2019, 1, 1)
report15g.datetime = MockDateTime
class TestInterest(TestCase):
def test_cumulative_interest(self):
# start_date = fy and end_date = next_fy
self.assertEqual(103.81, get_cumulative_interest(principal=1000, roi=10,
start_date=date(2019, 4, 1),
end_date=date(2020, 4, 1)))
# start_date < fy and end_date < next_fy
self.assertEqual(8.27, get_cumulative_interest(principal=1000, roi=10,
start_date=date(2019, 3, 29),
end_date=date(2019, 5, 1)))
# start_date < fy and end_date > next_fy
self.assertEqual(103.9, get_cumulative_interest(principal=1000, roi=10,
start_date=date(2019, 3, 29),
end_date=date(2020, 5, 2)))
# start_date > fy and end_date < next_fy
self.assertEqual(21.62, get_cumulative_interest(principal=1000, roi=10,
start_date=date(2019, 4, 20),
end_date=date(2019, 7, 8)))
# start_date > fy and end_date > next_fy
self.assertEqual(98.06, get_cumulative_interest(principal=1000, roi=10,
start_date=date(2019, 4, 20),
end_date=date(2020, 5, 20)))
def test_quarterly_interest(self):
# start_date = fy and end_date = next_fy
self.assertEqual(100, get_quarterly_interest(principal=1000, roi=10,
start_date=date(2019, 4, 1),
end_date=date(2020, 4, 1)))
# start_date < fy and end_date < next_fy
self.assertEqual(8.33, get_quarterly_interest(principal=1000, roi=10,
start_date=date(2019, 3, 29),
end_date=date(2019, 5, 1)))
# start_date < fy and end_date > next_fy
self.assertEqual(100, get_quarterly_interest(principal=1000, roi=10,
start_date=date(2019, 3, 29),
end_date=date(2020, 5, 2)))
# start_date > fy and end_date < next_fy
self.assertEqual(21.6, get_quarterly_interest(principal=1000, roi=10,
start_date=date(2019, 4, 20),
end_date=date(2019, 7, 8)))
# start_date > fy and end_date > next_fy
self.assertEqual(94.68, get_quarterly_interest(principal=1000, roi=10,
start_date=date(2019, 4, 20),
end_date=date(2020, 5, 20)))
class TestPeriodBetween(TestCase):
def compare(self, expected_period, actual_period):
assertion_error = f'{expected_period} != {actual_period}'
if expected_period != actual_period: raise AssertionError(assertion_error)
for e, a in zip(expected_period, actual_period):
if type(e) != type(a): raise AssertionError(assertion_error)
return True
def test_end_date_less_or_equal_to_start_date(self):
self.compare([], get_period_between(date(2019, 12, 31), date(2018, 3, 1)))
self.compare([], get_period_between(date(2019, 12, 31), date(2019, 12, 31)))
def test_dates_in_same_month(self):
self.compare([Days(9)], get_period_between(date(2019, 1, 1), date(2019, 1, 10)))
self.compare([Days(1)], get_period_between(date(2019, 1, 12), date(2019, 1, 13)))
self.compare([Days(6)], get_period_between(date(2019, 1, 25), date(2019, 1, 31)))
def test_dates_with_start_of_month_in_year(self):
self.compare([Months(1)], get_period_between(date(2019, 1, 1), date(2019, 2, 1)))
self.compare([Months(3)], get_period_between(date(2019, 1, 1), date(2019, 4, 1)))
def test_dates_in_same_year(self):
self.compare([Days(1)], get_period_between(date(2019, 1, 29), date(2019, 1, 30)))
self.compare([Days(3), Months(1), Days(29)], get_period_between(date(2019, 1, 29), date(2019, 3, 30)))
def test_dates_in_different_years(self):
self.compare([Days(3)], get_period_between(date(2018, 12, 29), date(2019, 1, 1)))
self.compare([Days(7)], get_period_between(date(2018, 12, 29), date(2019, 1, 5)))
self.compare([Days(3), Months(1), Days(2)], get_period_between(date(2018, 12, 29), date(2019, 2, 3)))
class TestFinancialYear(TestCase):
def test_get_financial_year(self):
self.assertEqual(2019, get_financial_year()[0])
self.assertEqual(2020, get_financial_year()[1])
|
<reponame>zairwolf/pilot
# --------------------------------------------------------------------------- #
# #
# from: https://github.com/jyoung8607/openpilot/tree/vw-community-private-pq #
# verify: <EMAIL>(https://shop442817640.taobao.com) #
# Date: 2020-05-22 #
# #
# --------------------------------------------------------------------------- #
from cereal import car
from selfdrive.car import dbc_dict
class CarControllerParams:
# FIXME: testing HCA at 100Hz on PQ to verify timebomb behavior
# HCA_STEP = 2 # HCA_01 message frequency 50Hz
HCA_STEP = 1 # HCA_01 message frequency 100Hz
# FIXME: LDW is sent at 20Hz on PQ, need to make this conditional
# LDW_STEP = 10 # LDW_02 message frequency 10Hz
LDW_STEP = 5 # LDW_02 message frequency 20Hz
GRA_ACC_STEP = 3 # GRA_ACC_01 message frequency 33Hz
GRA_VBP_STEP = 100 # Send ACC virtual button presses once a second
GRA_VBP_COUNT = 16 # Send VBP messages for ~0.5s (GRA_ACC_STEP * 16)
# Observed documented MQB limits: 3.00 Nm max, rate of change 5.00 Nm/sec.
# Limiting rate-of-change based on real-world testing and Comma's safety
# requirements for minimum time to lane departure.
STEER_MAX = 300 # Max heading control assist torque 3.00 Nm
# FIXME: testing steering rate hax for PQ
# STEER_DELTA_UP = 4 # Max HCA reached in 1.50s (STEER_MAX / (50Hz * 1.50))
# STEER_DELTA_DOWN = 10 # Min HCA reached in 0.60s (STEER_MAX / (50Hz * 0.60))
STEER_DELTA_UP = 2 # Max HCA reached in 1.50s (STEER_MAX / (100Hz * 1.50))
STEER_DELTA_DOWN = 5 # Min HCA reached in 0.60s (STEER_MAX / (100Hz * 0.60))
STEER_DRIVER_ALLOWANCE = 80
STEER_DRIVER_MULTIPLIER = 3 # weight driver torque heavily
STEER_DRIVER_FACTOR = 1 # from dbc
class CANBUS:
pt = 0
cam = 2
NWL = car.CarParams.NetworkLocation
TRANS = car.CarParams.TransmissionType
GEAR = car.CarState.GearShifter
BUTTON_STATES = {
"accelCruise": False,
"decelCruise": False,
"cancel": False,
"setCruise": False,
"resumeCruise": False,
"gapAdjustCruise": False
}
MQB_LDW_MESSAGES = {
"none": 0, # Nothing to display
"laneAssistUnavailChime": 1, # "Lane Assist currently not available." with chime
"laneAssistUnavailNoSensorChime": 3, # "Lane Assist not available. No sensor view." with chime
"laneAssistTakeOverUrgent": 4, # "Lane Assist: Please Take Over Steering" with urgent beep
"emergencyAssistUrgent": 6, # "Emergency Assist: Please Take Over Steering" with urgent beep
"laneAssistTakeOverChime": 7, # "Lane Assist: Please Take Over Steering" with chime
"laneAssistTakeOverSilent": 8, # "Lane Assist: Please Take Over Steering" silent
"emergencyAssistChangingLanes": 9, # "Emergency Assist: Changing lanes..." with urgent beep
"laneAssistDeactivated": 10, # "Lane Assist deactivated." silent with persistent icon afterward
}
class CAR:
GENERICMQB = "Generic Volkswagen MQB Platform Vehicle"
GENERICPQ = "Generic Volkswagen PQ35/PQ46/NMS Platform Vehicle"
# Mega-fingerprint used to identify any and all MQB platform vehicles. Specific
# make and model characteristics are looked up from the VIN later.
# Note: 1471:8 observed as 1471:4 on a 2019 Jetta, and we can't carry both in one FP, effect TBD
FINGERPRINTS = {
CAR.GENERICMQB: [
{178: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 695: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1122: 8, 1123: 8,
1124: 8, 1646: 8, 1648: 8, 1153: 8, 134: 8, 1162: 8, 1175: 8, 159: 8, 795: 8, 679: 8, 681: 8, 173: 8, 1712: 6,
1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8, 1312: 8, 806: 8, 253: 8, 1792: 8, 257: 8, 260: 8, 262: 8,
897: 8, 264: 8, 779: 8, 780: 8, 783: 8, 278: 8, 279: 8, 792: 8, 283: 8, 285: 8, 286: 8, 901: 8, 288: 8, 289: 8,
290: 8, 804: 8, 294: 8, 807: 8, 808: 8, 809: 8, 299: 8, 302: 8, 1351: 8, 346: 8, 870: 8, 1385: 8, 896: 8, 64: 8,
898: 8, 1413: 8, 917: 8, 919: 8, 927: 8, 1440: 5, 929: 8, 930: 8, 427: 8, 949: 8, 958: 8, 960: 4, 418: 8, 981: 8,
987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1514: 8, 1515: 8, 1520: 8, 1019: 8, 385: 8, 668: 8, 1120: 8,
1438: 8, 1461: 8, 391: 8, 1511: 8, 1516: 8, 568: 8, 569: 8, 826: 8, 827: 8, 1156: 8, 1157: 8, 1158: 8, 1471: 8,
1635: 8, 376: 8},
# 2018 skoda kodiaq from @Gold
{64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 385: 8, 418: 8, 427: 8, 573: 8, 668: 8, 679: 8, 681: 8, 684: 8, 695: 8, 779: 8, 780: 8, 783: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 828: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1120: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1529: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8 }
],
CAR.GENERICPQ: [
# kamold, Edgy, austinc3030, Roy_001
{80: 4, 194: 8, 208: 6, 210: 5, 294: 8, 416: 8, 428: 8, 640: 8, 648: 8, 800: 8, 835: 3, 870: 8, 872: 8, 878: 8,
896: 8, 906: 4, 912: 8, 914: 8, 919: 8, 928: 8, 978: 7, 1056: 8, 1088: 8, 1152: 8, 1175: 8, 1184: 8, 1192: 8,
1312: 8, 1386: 8, 1392: 5, 1394: 1, 1408: 8, 1440: 8, 1463: 8, 1470: 5, 1472: 8, 1488: 8, 1490: 8, 1500: 8,
1550: 2, 1651: 3, 1652: 8, 1654: 2, 1658: 4, 1691: 3, 1736: 2, 1757: 8, 1824: 7, 1845: 7, 2000: 8, 1420: 8},
# cd (powertrain CAN direct)
{16: 7, 17: 7, 80: 4, 174: 8, 194: 8, 208: 6, 416: 8, 428: 8, 640: 8, 648: 8, 672: 8, 800: 8, 896: 8, 906: 4,
912: 8, 914: 8, 915: 8, 919: 8, 928: 8, 946: 8, 976: 6, 978: 7, 1056: 8, 1152: 8, 1160: 8, 1162: 8, 1164: 8,
1175: 8, 1184: 8, 1192: 8, 1306: 8, 1312: 8, 1344: 8, 1360: 8, 1386: 8, 1392: 5, 1394: 1, 1408: 8, 1416: 8,
1420: 8, 1423: 8, 1440: 8, 1463: 8, 1488: 8, 1490: 8, 1494: 2, 1500: 8, 1504: 8, 1523: 8, 1527: 4, 1654: 2,
1658: 2, 1754: 8, 1824: 7, 1827: 7, 2000: 8},
{80: 4, 194: 8, 208: 6, 210: 5, 416: 8, 428: 8, 640: 8, 648: 8, 695: 8, 800: 8, 835: 3, 870: 8, 872: 8, 878: 8, 896: 8, 906: 4, 912: 8, 914: 8, 928: 8, 954: 8, 978: 7, 1056: 8, 1088: 8, 1152: 8, 1175: 8, 1184: 8, 1192: 8, 1312: 8, 1386: 8, 1392: 5, 1394: 1, 1408: 8, 1440: 8, 1463: 8, 1470: 5, 1472: 8, 1488: 8, 1490: 8, 1500: 8, 1550: 2, 1650: 8, 1651: 4, 1652: 8, 1691: 4, 1757: 3, 1824: 7, 1845: 7, 2000: 8},
{80: 4, 194: 8, 208: 6, 210: 5, 416: 8, 428: 8, 640: 8, 648: 8, 695: 8, 800: 8, 835: 3, 870: 8, 872: 8, 878: 8, 896: 8, 906: 4, 912: 8, 914: 8, 928: 8, 954: 8, 978: 7, 1056: 8, 1088: 8, 1152: 8, 1175: 8, 1184: 8, 1192: 8, 1312: 8, 1386: 8, 1392: 5, 1394: 1, 1408: 8, 1440: 8, 1463: 8, 1470: 5, 1472: 8, 1488: 8, 1490: 8, 1500: 8, 1550: 2, 1650: 5, 1651: 3, 1652: 8, 1691: 3, 1736: 2, 1757: 8, 1824: 7, 1845: 7, 2000: 8
},
{80: 4, 194: 8, 208: 6, 210: 5, 416: 8, 428: 8, 640: 8, 648: 8, 695: 8, 800: 8, 835: 3, 870: 8, 872: 8, 878: 8, 896: 8, 906: 4, 912: 8, 914: 8, 928: 8, 954: 8, 978: 7, 1056: 8, 1088: 8, 1152: 8, 1175: 8, 1184: 8, 1192: 8, 1312: 8, 1386: 8, 1392: 5, 1394: 1, 1408: 8, 1440: 8, 1463: 8, 1470: 5, 1472: 8, 1488: 8, 1490: 8, 1500: 8, 1550: 2, 1650: 4, 1651: 3, 1652: 8, 1691: 2, 1736: 2, 1757: 3, 1824: 7, 1845: 7, 2000: 8
}
],
}
MQB_CARS = [CAR.GENERICMQB]
PQ_CARS = [CAR.GENERICPQ]
DBC = {
CAR.GENERICMQB: dbc_dict('vw_mqb_2010', None),
CAR.GENERICPQ: dbc_dict('vw_golf_mk4', None),
}
|
<gh_stars>1-10
from typing import Union, Optional, List, Dict, Any
import datasets
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from transformers import RobertaTokenizer, PreTrainedTokenizerBase
from loguru import logger
from dataclasses import dataclass
from transformers.file_utils import PaddingStrategy
import torch
@dataclass
class DataCollator:
tokenizer: PreTrainedTokenizerBase
padding: Union[str, PaddingStrategy] = "max_length"
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
premise_features = [
{
key[len("premise_") :]: value
for key, value in feature.items()
if key.startswith("premise_")
}
for feature in features
]
max_length = max(feat["input_ids"].shape[-1] for feat in premise_features)
hypothesis_features = [
{
key[len("hypothesis_") :]: value
for key, value in feature.items()
if key.startswith("hypothesis_")
}
for feature in features
]
max_length = max(
max_length, max(feat["input_ids"].shape[-1] for feat in hypothesis_features)
)
labels = torch.LongTensor([feat["label"].item() for feat in features])
premise_batch = self.tokenizer.pad(
premise_features,
padding=self.padding,
max_length=max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
hypothesis_batch = self.tokenizer.pad(
hypothesis_features,
padding=self.padding,
max_length=max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch = {
**{"premise_" + key: value for key, value in premise_batch.items()},
**{"hypothesis_" + key: value for key, value in hypothesis_batch.items()},
"label": labels,
}
return batch
class MNLILightningDataModule(pl.LightningDataModule):
def __init__(self, model_name, batch_size, num_workers):
super().__init__()
self.model_name = model_name
self.batch_size = batch_size
self.num_workers = num_workers
self.tokenizer = RobertaTokenizer.from_pretrained(self.model_name)
self.collate_fn = DataCollator(self.tokenizer)
self.filter_fn = lambda item: item["label"] != -1
def _transform(self, item):
premise, hypothesis, label = (
item["premise"],
item["hypothesis"],
item["label"],
)
premise_inputs = self.tokenizer(premise)
hypothesis_inputs = self.tokenizer(hypothesis)
output = {
"premise_input_ids": premise_inputs["input_ids"],
"premise_attention_mask": premise_inputs["attention_mask"],
"hypothesis_input_ids": hypothesis_inputs["input_ids"],
"hypothesis_attention_mask": hypothesis_inputs["attention_mask"],
"label": label,
}
return output
def _data_processing(self, dataset: datasets.arrow_dataset.Dataset, name: str):
logger.info(f"{name} data transformation...")
dataset = dataset.filter(self.filter_fn)
dataset = dataset.map(self._transform)
dataset.set_format(type="torch", columns=self.columns)
logger.info(f"{name} data transformation complted.")
return dataset
def prepare_data(self) -> None:
logger.info("Dataset downloading...")
self.dataset = datasets.load_dataset("multi_nli")
self.train, self.validation, self.test = (
self.dataset["train"],
self.dataset["validation_matched"],
self.dataset["validation_mismatched"],
)
self.columns = [
"premise_input_ids",
"premise_attention_mask",
"hypothesis_input_ids",
"hypothesis_attention_mask",
"label",
]
logger.info("Dataset filtering")
self.train = self._data_processing(self.train, "Training")
self.validation = self._data_processing(self.validation, "Validation")
self.test = self._data_processing(self.test, "Testing")
def train_dataloader(self):
return DataLoader(
self.train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.validation,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.test,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
|
<reponame>rowedenny/ULTRA_pytorch<filename>ultra/learning_algorithm/pairwise_debias.py
"""Training and testing the Pairwise Debiasing algorithm for unbiased learning to rank.
See the following paper for more information on the Pairwise Debiasing algorithm.
* <NAME>, <NAME>, <NAME>, and <NAME>. "Unbiased LambdaMART: An Unbiased Pairwise Learning-to-Rank Algorithm." In The World Wide Web Conference, pp. 2830-2836. ACM, 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from ultra.learning_algorithm.base_algorithm import BaseAlgorithm
import ultra.utils
def get_bernoulli_sample(probs):
"""Conduct Bernoulli sampling according to a specific probability distribution.
Args:
prob: (tf.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution.
Returns:
A Tensor of binary samples (0 or 1) with the same shape of probs.
"""
return torch.ceil(probs - torch.rand(probs.shape).to(device=torch.device('cuda')))
class PairDebias(BaseAlgorithm):
"""The Pairwise Debiasing algorithm for unbiased learning to rank.
This class implements the Pairwise Debiasing algorithm based on the input layer
feed. See the following paper for more information on the algorithm.
* Hu, Ziniu, <NAME>, <NAME>, and <NAME>. "Unbiased LambdaMART: An Unbiased Pairwise Learning-to-Rank Algorithm." In The World Wide Web Conference, pp. 2830-2836. ACM, 2019.
"""
def __init__(self, data_set, exp_settings):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
"""
print('Build Pairwise Debiasing algorithm.')
self.hparams = ultra.utils.hparams.HParams(
EM_step_size=0.05, # Step size for EM algorithm.
learning_rate=0.005, # Learning rate.
max_gradient_norm=5.0, # Clip gradients to this norm.
# An int specify the regularization term.
regulation_p=1,
# Set strength for L2 regularization.
l2_loss=0.0,
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
self.cuda = torch.device('cuda')
self.writer = SummaryWriter()
self.train_summary = {}
self.eval_summary = {}
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
if 'selection_bias_cutoff' in self.exp_settings.keys():
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.feature_size = data_set.feature_size
self.model = self.create_model(self.feature_size)
self.max_candidate_num = exp_settings['max_candidate_num']
self.learning_rate = float(self.hparams.learning_rate)
self.is_cuda_avail = torch.cuda.is_available()
# Feeds for inputs.
self.letor_features_name = "letor_features"
self.letor_features = None
self.docid_inputs_name = [] # a list of top documents
self.labels_name = [] # the labels for the documents (e.g., clicks)
self.docid_inputs = [] # a list of top documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs_name.append("docid_input{0}".format(i))
self.labels_name.append("label{0}".format(i))
self.global_step = 0
if 'selection_bias_cutoff' in self.exp_settings:
self.rank_list_size = self.exp_settings['selection_bias_cutoff']
self.t_plus = torch.ones([1, self.rank_list_size])
self.t_minus = torch.ones([1, self.rank_list_size])
if self.is_cuda_avail:
self.t_plus = torch.ones([1, self.rank_list_size], device=self.cuda)
self.t_minus = torch.ones([1, self.rank_list_size], device=self.cuda)
self.t_plus.requires_grad = False
self.t_minus.requires_grad = False
# Select optimizer
self.optimizer_func = torch.optim.Adagrad(self.model.parameters(), lr=self.hparams.learning_rate)
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate)
def train(self, input_feed):
"""Run a step of the model feeding the given inputs for training process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
self.labels = []
self.docid_inputs = []
self.model.train()
self.letor_features = input_feed["letor_features"]
for i in range(self.rank_list_size):
self.docid_inputs.append(input_feed[self.docid_inputs_name[i]])
self.labels.append(input_feed[self.labels_name[i]])
self.labels = torch.as_tensor(self.labels)
if self.is_cuda_avail:
self.labels = self.labels.to(device=self.cuda)
self.docid_inputs = torch.as_tensor(data=self.docid_inputs, dtype=torch.int64)
train_output = self.ranking_model(self.model,
self.rank_list_size)
self.splitted_t_plus = torch.split(
self.t_plus, 1, dim=1)
self.splitted_t_minus = torch.split(
self.t_minus, 1, dim=1)
split_size = int(train_output.shape[1] / self.rank_list_size)
output_list = torch.split(train_output, split_size, dim=1)
t_plus_loss_list = [0.0 for _ in range(self.rank_list_size)]
t_minus_loss_list = [0.0 for _ in range(self.rank_list_size)]
self.loss = 0.0
for i in range(self.rank_list_size):
for j in range(self.rank_list_size):
if i == j:
continue
valid_pair_mask = torch.minimum(
torch.ones_like(
self.labels[i]), F.relu(self.labels[i] - self.labels[j]))
pair_loss = torch.sum(
valid_pair_mask *
self.pairwise_cross_entropy_loss(
output_list[i], output_list[j])
)
t_plus_loss_list[i] += pair_loss / self.splitted_t_minus[j]
t_minus_loss_list[j] += pair_loss / self.splitted_t_plus[i]
self.loss += pair_loss / \
self.splitted_t_plus[i] / self.splitted_t_minus[j]
with torch.no_grad():
self.t_plus = (1 - self.hparams.EM_step_size) * self.t_plus + self.hparams.EM_step_size * torch.pow(
torch.cat(t_plus_loss_list, dim=1) / t_plus_loss_list[0], 1 / (self.hparams.regulation_p + 1))
self.t_minus = (1 - self.hparams.EM_step_size) * self.t_minus + self.hparams.EM_step_size * torch.pow(torch.cat(
t_minus_loss_list, dim=1) / t_minus_loss_list[0], 1 / (self.hparams.regulation_p + 1))
# Add l2 loss
params = self.model.parameters()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * self.l2_loss(p)
self.opt_step(self.optimizer_func, params)
print(" Loss %f at Global Step %d" % (self.loss.item(), self.global_step))
self.global_step+=1
return self.loss.item(), None, self.train_summary
def validation(self, input_feed, is_online_simulation= False):
"""Run a step of the model feeding the given inputs for validating process.
Args:
input_feed: (dictionary) A dictionary containing all the input feed data.
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
self.model.eval()
self.create_input_feed(input_feed, self.max_candidate_num)
with torch.no_grad():
self.output = self.ranking_model(self.model,
self.max_candidate_num)
if not is_online_simulation:
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.output)
for metric in self.exp_settings['metrics']:
topn = self.exp_settings['metrics_topn']
metric_values = ultra.utils.make_ranking_metric_fn(
metric, topn)(self.labels, pad_removed_output, None)
for topn, metric_value in zip(topn, metric_values):
self.create_summary('%s_%d' % (metric, topn),
'%s_%d' % (metric, topn), metric_value.item(), False)
return None, self.output, self.eval_summary # loss, outputs, summary. |
#
# Copyright (c) 2009-2015 <NAME> <<EMAIL>>
#
# See the file LICENSE.txt for your full rights.
#
"""Driver for sqlite"""
from __future__ import with_statement
import os.path
# Import sqlite3. If it does not support the 'with' statement, then
# import pysqlite2, which might...
import sqlite3
if not hasattr(sqlite3.Connection, "__exit__"): # @UndefinedVariable
del sqlite3
from pysqlite2 import dbapi2 as sqlite3 #@Reimport @UnresolvedImport
sqlite_version = sqlite3.sqlite_version
import weedb
from weeutil.weeutil import to_int, to_bool
def guard(fn):
"""Decorator function that converts sqlite exceptions into weedb exceptions."""
def guarded_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except sqlite3.IntegrityError, e:
raise weedb.IntegrityError(e)
except sqlite3.OperationalError, e:
# Change no such table errors into a ProgrammingError
# (this is what MySQL does).
if e.message.lower().startswith("no such table"):
raise weedb.ProgrammingError(e)
raise weedb.OperationalError(e)
return guarded_fn
def connect(database_name='', SQLITE_ROOT='', driver='', **argv):
"""Factory function, to keep things compatible with DBAPI. """
return Connection(database_name=database_name, SQLITE_ROOT=SQLITE_ROOT, **argv)
def create(database_name='', SQLITE_ROOT='', driver='', **argv):
"""Create the database specified by the db_dict. If it already exists,
an exception of type DatabaseExists will be thrown."""
file_path = get_filepath(SQLITE_ROOT, database_name, **argv)
# Check whether the database file exists:
if os.path.exists(file_path):
raise weedb.DatabaseExists("Database %s already exists" % (file_path,))
else:
# If it doesn't exist, create the parent directories
fileDirectory = os.path.dirname(file_path)
if not os.path.exists(fileDirectory):
os.makedirs(fileDirectory)
timeout = to_int(argv.get('timeout', 5))
isolation_level = argv.get('isolation_level')
connection = sqlite3.connect(file_path, timeout=timeout, isolation_level=isolation_level)
connection.close()
def get_filepath(SQLITE_ROOT, database_name, **argv):
# For backwards compatibility, allow the keyword 'root', if 'SQLITE_ROOT' is
# not defined:
root_dir = SQLITE_ROOT or argv.get('root', '')
return os.path.join(root_dir, database_name)
def drop(database_name='', SQLITE_ROOT='', driver='', **argv):
file_path = get_filepath(SQLITE_ROOT, database_name, **argv)
try:
os.remove(file_path)
except OSError:
raise weedb.NoDatabase("""Attempt to drop non-existent database %s""" % (file_path,))
class Connection(weedb.Connection):
"""A wrapper around a sqlite3 connection object."""
def __init__(self, database_name='', SQLITE_ROOT='', pragmas=None, **argv):
"""Initialize an instance of Connection.
Parameters:
database_name: The name of the Sqlite database. This is generally the file name
SQLITE_ROOT: The path to the directory holding the database. Joining "SQLITE_ROOT" with
"database_name" results in the full path to the sqlite file.
pragmas: Any pragma statements, in the form of a dictionary.
timeout: The amount of time, in seconds, to wait for a lock to be released.
Optional. Default is 5.
isolation_level: The type of isolation level to use. One of None,
DEFERRED, IMMEDIATE, or EXCLUSIVE. Default is None (autocommit mode).
If the operation fails, an exception of type weedb.OperationalError will be raised.
"""
self.file_path = get_filepath(SQLITE_ROOT, database_name, **argv)
if not os.path.exists(self.file_path):
raise weedb.OperationalError("Attempt to open a non-existent database %s" % self.file_path)
timeout = to_int(argv.get('timeout', 5))
isolation_level = argv.get('isolation_level')
try:
connection = sqlite3.connect(self.file_path, timeout=timeout, isolation_level=isolation_level)
except sqlite3.OperationalError:
# The Pysqlite driver does not include the database file path.
# Include it in case it might be useful.
raise weedb.OperationalError("Unable to open database '%s'" % (self.file_path,))
if pragmas is not None:
for pragma in pragmas:
connection.execute("PRAGMA %s=%s;" % (pragma, pragmas[pragma]))
weedb.Connection.__init__(self, connection, database_name, 'sqlite')
@guard
def cursor(self):
"""Return a cursor object."""
return Cursor(self.connection)
@guard
def execute(self, sql_string, sql_tuple=()):
"""Execute a sql statement. This specialized version takes advantage
of sqlite's ability to do an execute without a cursor."""
with self.connection:
self.connection.execute(sql_string, sql_tuple)
@guard
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
for row in self.connection.execute("""SELECT tbl_name FROM sqlite_master WHERE type='table';"""):
# Extract the table name. Sqlite returns unicode, so always
# convert to a regular string:
table_list.append(str(row[0]))
return table_list
@guard
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
for row in self.connection.execute("""PRAGMA table_info(%s);""" % table):
if row[2].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[2]).upper()
yield (row[0], str(row[1]), coltype, not to_bool(row[3]), row[4], to_bool(row[5]))
def columnsOf(self, table):
"""Return a list of columns in the specified table. If the table does not exist,
None is returned."""
column_list = [row[1] for row in self.genSchemaOf(table)]
# If there are no columns (which means the table did not exist) raise an exceptional
if not column_list:
raise weedb.ProgrammingError("No such table %s" % table)
return column_list
@guard
def get_variable(self, var_name):
cursor = self.connection.cursor()
try:
cursor.execute("PRAGMA %s;" % var_name)
row = cursor.fetchone()
return None if row is None else (var_name, row[0])
finally:
cursor.close()
@guard
def begin(self):
self.connection.execute("BEGIN TRANSACTION")
@guard
def commit(self):
self.connection.commit()
@guard
def rollback(self):
self.connection.rollback()
@guard
def close(self):
self.connection.close()
class Cursor(sqlite3.Cursor):
"""A wrapper around the sqlite cursor object"""
# The sqlite3 cursor object is very full featured. We need only turn
# the sqlite exceptions into weedb exceptions.
def __init__(self, *args, **kwargs):
sqlite3.Cursor.__init__(self, *args, **kwargs)
@guard
def execute(self, *args, **kwargs):
return sqlite3.Cursor.execute(self, *args, **kwargs)
@guard
def fetchone(self):
return sqlite3.Cursor.fetchone(self)
@guard
def fetchall(self):
return sqlite3.Cursor.fetchall(self)
@guard
def fetchmany(self, size=None):
if size is None: size = self.arraysize
return sqlite3.Cursor.fetchmany(self, size)
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2015 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
MediaEval Person Discovery Task evaluation -- EwMAP
Usage:
evaluation [options] <reference.shot> <reference.ref> <reference.eviref> <hypothesis.label> <hypothesis.evidence>
Options:
-h --help Show this screen.
--version Show version.
--queries=<queries.lst> Query list.
--levenshtein=<threshold> Levenshtein ratio threshold [default: 0.95]
--consensus=<consensus.shot> Label-annotated subset of <reference.shot>
"""
from __future__ import print_function
from docopt import docopt
from Levenshtein import ratio
import numpy as np
from common import loadShot, loadLabel, loadEvidence
from common import loadLabelReference, loadEvidenceReference
def loadFiles(shot, reference, evireference, label, evidence, consensus=None):
shot = loadShot(shot)
label = loadLabel(label)
evidence = loadEvidence(evidence)
# check that labels are only provided for selected shots
labelShots = set(
tuple(s) for _, s in label[['videoID', 'shotNumber']].iterrows())
if not labelShots.issubset(set(shot.index)):
msg = ('Labels should only be computed for provided shots.')
raise ValueError(msg)
# check that evidence is provided for every unique label
labelNames = set(label['personName'].unique())
evidenceNames = set(evidence['personName'].unique())
if labelNames != evidenceNames:
msg = ('There must be exactly one evidence '
'per unique name in label submission.')
raise ValueError(msg)
# check that there is no more than one evidence per label
if len(evidenceNames) != len(evidence):
msg = ('There must be exactly one evidence '
'per unique name in label submission.')
raise ValueError(msg)
# check that evidences are chosen among selected shots
evidenceShots = set(tuple(s) for _, s in evidence[['videoID', 'shotNumber']].iterrows())
if not evidenceShots.issubset(set(shot.index)):
msg = ('Evidences should only be chosen among provided shots.')
raise ValueError(msg)
# only keep labels for shot with consensus
if consensus:
consensus = loadShot(consensus)
mask = label.apply(
lambda x: (x['videoID'], x['shotNumber']) in set(consensus.index),
axis=1)
label = label[mask]
reference = loadLabelReference(reference)
evireference = loadEvidenceReference(evireference)
return reference, evireference, label, evidence
def closeEnough(personName, query, threshold):
return ratio(query, personName) >= threshold
def computeAveragePrecision(vReturned, vRelevant):
nReturned = len(vReturned)
nRelevant = len(vRelevant)
if nRelevant == 0:
return 1.
if nReturned == 0:
return 0.
returnedIsRelevant = np.array([item in vRelevant for item in vReturned])
precision = np.cumsum(returnedIsRelevant) / (1. + np.arange(nReturned))
return np.sum(precision * returnedIsRelevant) / nRelevant
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1')
shot = arguments['<reference.shot>']
reference = arguments['<reference.ref>']
evireference = arguments['<reference.eviref>']
label = arguments['<hypothesis.label>']
evidence = arguments['<hypothesis.evidence>']
threshold = float(arguments['--levenshtein'])
consensus = arguments['--consensus']
reference, evireference, label, evidence = loadFiles(
shot, reference, evireference, label, evidence, consensus=consensus)
if arguments['--queries']:
with open(arguments['--queries'], 'r') as f:
queries = [line.strip() for line in f]
else:
# build list of queries from evireference
queries = sorted(set(evireference['personName'].unique()))
# query --> averagePrecision dictionary
averagePrecision = {}
correctness = {}
for query in queries:
# find most similar personName
ratios = [(personName, ratio(query, personName))
for personName in evidence.personName.unique()]
best = sorted(ratios, key=lambda x: x[1], reverse=True)[0]
personName = best[0] if best[1] > threshold else None
# =====================================================================
# Evaluation of LABELS
# =====================================================================
# get relevant shots for this query, according to reference
qRelevant = reference[reference.personName == query]
qRelevant = qRelevant[['videoID', 'shotNumber']]
qRelevant = set((videoID, shotNumber)
for _, videoID, shotNumber in qRelevant.itertuples())
# get returned shots for this query
# (i.e. shots containing closest personName)
qReturned = label[label.personName == personName]
# this can only happen with --consensus option
# when hypothesis contains shot in the out of consensus part
# hack to solve this corner case
if len(qReturned) == 0:
averagePrecision[query] = 0. if len(qRelevant) > 0. else 1.
else:
# sort shots by decreasing confidence
# (in case of shots returned twice for this query, keep maximum)
qReturned = (qReturned.groupby(['videoID', 'shotNumber'])
.aggregate(np.max)
.sort_values(by=['confidence'], ascending=False))
# get list of returned shots in decreasing confidence
qReturned = list(qReturned.index)
# compute average precision for this query
averagePrecision[query] = computeAveragePrecision(qReturned,
qRelevant)
# =====================================================================
# Evaluation of EVIDENCES
# =====================================================================
if personName is None:
correctness[query] = 0. if len(qRelevant) > 0. else 1.
continue
# get evidence shots for this query, according to reference
qRelevant = evireference[evireference.personName == query]
qRelevant = qRelevant[['videoID', 'shotNumber', 'source']]
_qRelevant = set([])
for _, videoID, shotNumber, source in qRelevant.itertuples():
if source == 'both':
_qRelevant.add((videoID, shotNumber, 'audio'))
_qRelevant.add((videoID, shotNumber, 'image'))
else:
_qRelevant.add((videoID, shotNumber, source))
qRelevant = _qRelevant
qReturned = evidence[evidence.personName == personName][[
'videoID', 'shotNumber', 'source']]
for _, videoID, shotNumber, source in qReturned.itertuples():
break
if (videoID, shotNumber, source) in qRelevant:
correctness[query] = 1. if best[1] > threshold else 0.
else:
correctness[query] = 0.
MAP = np.mean([averagePrecision[query] for query in queries])
mCorrectness = np.mean([correctness[query] for query in queries])
EwMAP = np.mean([correctness[query] * averagePrecision[query]
for query in queries])
print('%.2f %%' % (100 * EwMAP))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Raspiled - HTTP Listener
Listens on HTTP port 9090 for commands. Passes them on to any classes
running.
@requires: twisted
"""
from __future__ import unicode_literals
import os
import sys
# Add some gymnastics so we can use imports relative to the parent dir.
my_dir = os.path.dirname(os.path.realpath(__file__)) # The directory we're running in
sys.path.append(os.path.dirname(my_dir)) # Parent dir
from src.config import CONFIG, get_setting, DEBUG
from utils import *
from ledstrip import LEDStrip
from subprocess import check_output, CalledProcessError
from twisted.internet import reactor, endpoints
from twisted.web.server import Site, Request
from named_colours import NAMED_COLOURS
import copy
import logging
import configparser
import six
from six.moves.urllib.parse import urlencode
APP_NAME = "python ./raspiled_listener.py"
RASPILED_DIR = os.path.dirname(os.path.realpath(__file__)) # The directory we're running in
RESOLVED_USER_SETTINGS = CONFIG # Alias for clarity
@six.python_2_unicode_compatible
class Preset(object):
"""
Represents a preset for the web UI for the user to click on
args and kwargs become the querystring
"""
args = None
kwargs = None
label = None
display_colour = None
display_gradient = None
def __init__(self, label="??", display_colour=None, display_gradient=None, is_sequence=False, is_sun=False, *args, **kwargs):
"""
Sets up this preset
"""
self.label = label
self.display_colour = display_colour
self.display_gradient = display_gradient or []
self.is_sequence = is_sequence
self.is_sun = is_sun
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""
Says what this is
"""
out = "Preset '{label}': {colour} - {querystring} - {sunquery}".format(
label=self.label,
colour=self.colour,
querystring=self.querystring,
sunquery=self.sunquery)
return out
def __str__(self):
return self.render()
@property
def colours(self):
"""
Returns a faithful hex value for the given colour(s)
"""
if not self.display_gradient:
colours = [self.display_colour] # Listify single entity
else:
colours = self.display_gradient
colours_out_list = []
for colour_term in colours:
try:
col_value = NAMED_COLOURS[str(colour_term).lower()]
except KeyError:
col_value = colour_term
colours_out_list.append(col_value)
return colours_out_list
@property
def colour(self):
"""
Returns a string value for the colours in the form of faithful hex
"""
return ", ".join(self.colours)
def colours_for_css_background(self):
"""
Renders the colours as a CSS background!
linear-gradient(to right, col1 , col2, col3)
"""
css_colours = self.colours
if len(css_colours) < 1: # No colours, go with trans
return "transparent"
elif len(css_colours) == 1: # One colour means one single coloured bg
return self.colours[0]
return """linear-gradient(40deg, {colour_list})""".format(colour_list=", ".join(css_colours))
@property
def querystring(self):
"""
Converts args and kwargs into a querystring
"""
kwargs = copy.copy(self.kwargs)
for arg in self.args: # Add in terms for args
kwargs[arg] = ""
qs = urlencode(kwargs, doseq=True) # Flattens list
return qs
def render_css(self):
"""
Generates a CSS gradient from the self.display_gradient list
"""
if self.display_gradient:
return "background: linear-gradient(-40deg, {colour_values}); color: white; text-shadow: 2px 2px 2px #000000".format(colour_values=self.colour)
if self.display_colour:
contrast_colour = LEDStrip.contrast_from_bg(col=self.colour, dark_default="202020")
return "background: {display_colour}; color: {contrast_colour}".format(
display_colour=self.colours_for_css_background(),
contrast_colour=contrast_colour
)
return ""
def render_is_sequence(self):
"""
Returns Javascript boolean for whether this is a sequence or not
"""
if self.is_sequence:
return "true"
return ""
@property
def sunquery(self):
"""
Returns sunset or sunrise temperature values
"""
if self.is_sun:
sunarg = {}
# for ii in range(0,len(self.display_gradient)):
# if self.display_gradient[0]>self.display_gradient[1]:
sunarg['temp'] = list(self.display_gradient) # self.display_gradient[ii].split('K')[0]
cs = urlencode(sunarg, doseq=True)
return cs
return ""
def render(self):
"""
Renders this preset as an HTML button
"""
html = """
<a href="javascript:void(0);" class="select_preset preset_button" data-qs="{querystring}" data-sequence="{is_sequence}" data-color="{sun_temp}" style="{css_style}">
{label}
</a>
""".format(
querystring=self.querystring,
css_style=self.render_css(),
label=self.label,
is_sequence=self.render_is_sequence(),
sun_temp=self.sunquery
)
return html
class PresetSpace(object):
"""
Simply spaces presets apart!
"""
def render(self):
return " "
class PresetRow(object):
"""
Simply spaces presets apart!
"""
def render(self):
return "<br>"
class RaspiledControlResource(RaspberryPiWebResource):
"""
Our web page for controlling the LED strips
"""
led_strip = None # Populated at init
# State what params should automatically trigger actions. If none supplied will show a default page. Specified in order of hierarchy
PARAM_TO_ACTION_MAPPING = (
# Stat actions
("off", "off"),
("stop", "stop"),
("set", "set"),
("fade", "fade"),
("color", "fade"),
("colour", "fade"),
# Sequences
("sunrise", "sunrise"),
("morning", "sunrise"),
("dawn", "sunrise"),
("sunset", "sunset"),
("evening", "sunset"),
("dusk", "sunset"),
("night", "sunset"),
("jump", "jump"),
("rotate", "rotate"),
("rot", "rotate"),
("huerot", "rotate"),
("colors", "rotate"),
("colours", "rotate"),
# Docs:
("capabilities", "capabilities"),
("capability", "capabilities"),
("status", "status"),
)
# State what presets to render:
OFF_PRESET = Preset(label="""<img src="/static/figs/power-button-off.svg" class="icon_power_off"> Off""", display_colour="black", off="")
PRESETS = {
"Whites": ( # I've had to change the displayed colours from the strip colours for a closer apparent match
Preset(label="Candle", display_colour="1500K", fade="1000K"),
Preset(label="Tungsten", display_colour="3200K", fade="2000K"),
Preset(label="Bulb match", display_colour="3900K", fade="ff821c"),
Preset(label="Warm white", display_colour="4800K", fade="2600k"), # Bulb match
Preset(label="Strip white", display_colour="6000K", fade="3200K"),
Preset(label="Daylight", display_colour="6900K", fade="5800K"),
Preset(label="Cool white", display_colour="9500K", fade="10500K"),
),
"Sunrise / Sunset": (
Preset(label="↑ 2hr", display_gradient=("2000K", "5000K"), sunrise=60 * 60 * 2, is_sequence=True, is_sun=True),
Preset(label="↑ 1hr", display_gradient=("2000K", "5000K"), sunrise=60 * 60 * 1, is_sequence=True, is_sun=True),
Preset(label="↑ 30m", display_gradient=("2000K", "5000K"), sunrise=60 * 30, is_sequence=True, is_sun=True),
Preset(label="↑ 1m", display_gradient=("2000K", "5000K"), sunrise=60 * 1, is_sequence=True, is_sun=True),
PresetSpace(),
Preset(label="↓ 1m", display_gradient=("5000K", "2000K"), sunset=60 * 1, is_sequence=True, is_sun=True),
Preset(label="↓ 30m", display_gradient=("5000K", "2000K"), sunset=60 * 30, is_sequence=True, is_sun=True),
Preset(label="↓ 1hr", display_gradient=("5000K", "2000K"), sunset=60 * 60 * 1, is_sequence=True, is_sun=True),
Preset(label="↓ 2hr", display_gradient=("5000K", "2000K"), sunset=60 * 60 * 2, is_sequence=True, is_sun=True),
),
"Colours": (
Preset(label="Red", display_colour="#FF0000", fade="#FF0000"),
Preset(label="Orange", display_colour="#FF8800", fade="#FF8800"),
Preset(label="Yellow", display_colour="#FFFF00", fade="#FFFF00"),
Preset(label="Lime", display_colour="#88FF00", fade="#88FF00"),
Preset(label="Green", display_colour="#00BB00", fade="#00FF00"),
Preset(label="Aqua", display_colour="#00FF88", fade="#00FF88"),
Preset(label="Cyan", display_colour="#00FFFF", fade="#00FFFF"),
Preset(label="Blue", display_colour="#0088FF", fade="#0088FF"),
Preset(label="Indigo", display_colour="#0000FF", fade="#0000FF"),
Preset(label="Purple", display_colour="#8800FF", fade="#7A00FF"), # There's a difference!
Preset(label="Magenta", display_colour="#FF00FF", fade="#FF00FF"),
Preset(label="Crimson", display_colour="#FF0088", fade="#FF0088"),
PresetRow(),
Preset(label="Tasty Teal", display_colour="#009882", fade="#00FF4D"),
Preset(label="Super Crimson", display_colour="#FF0077", fade="#FF0033"),
),
"Sequences": (
Preset(label="🔥 Campfire", display_gradient=("600K", "400K", "1000K", "400K"), rotate="700K,500K,1100K,600K,800K,1000K,500K,1200K",
milliseconds="1800", is_sequence=True),
Preset(label="🐟 Fish tank", display_gradient=("#00FF88", "#0088FF", "#007ACC", "#00FFFF"), rotate="00FF88,0088FF,007ACC,00FFFF",
milliseconds="2500", is_sequence=True),
Preset(label="🎉 Party", display_gradient=("cyan", "yellow", "magenta"), rotate="cyan,yellow,magenta", milliseconds="1250",
is_sequence=True),
Preset(label="🌻 Flamboyant", display_gradient=("yellow", "magenta"), jump="yellow,magenta", milliseconds="150", is_sequence=True),
Preset(label="🎄 Christmas", display_gradient=("green", "red"), rotate="green,red", milliseconds="300", is_sequence=True),
Preset(label="🚨 NeeNaw", display_gradient=("cyan", "blue"), jump="cyan,blue", milliseconds="100", is_sequence=True),
Preset(label="🚨 NeeNaw USA", display_gradient=("red", "blue"), jump="red,blue", milliseconds="100", is_sequence=True),
Preset(label="🌈 Full circle", display_gradient=(
"#FF0000", "#FF8800", "#FFFF00", "#88FF00", "#00FF00", "#00FF88", "#00FFFF", "#0088FF", "#0000FF", "#8800FF", "#FF00FF", "#FF0088"),
milliseconds=500, rotate="#FF0000,FF8800,FFFF00,88FF00,00FF00,00FF88,00FFFF,0088FF,0000FF,8800FF,FF00FF,FF0088", is_sequence=True),
)
}
PRESETS_COPY = copy.deepcopy(PRESETS) # Modifiable dictionary. Used in alarms and music.
def __init__(self, *args, **kwargs):
"""
@TODO: perform LAN discovery, interrogate the resources, generate controls for all of them
"""
self.led_strip = LEDStrip(RESOLVED_USER_SETTINGS)
RaspberryPiWebResource.__init__(self, *args, **kwargs) # Super, deals with generating the static directory etc
def render_controls(self, request):
"""
Show the main controls screen
"""
context = {
"off_preset_html": self.OFF_PRESET.render(),
"light_html": self.render_light_presets(request),
"alarm_html": self.render_alarm_presets(request),
"music_html": self.render_udevelop_presets(request),
"controls_html": self.render_udevelop_presets(request),
}
return RaspberryPiWebResource.render_controls(self, request, context)
#### Additional pages available via the menu ####
def render_light_presets(self, request):
"""
Renders the light presets as options
@param request: The http request object
"""
out_html_list = []
for group_name, presets in self.PRESETS.items():
preset_list = []
# Inner for
for preset in presets:
preset_html = preset.render()
preset_list.append(preset_html)
group_html = """
<div class="preset_group">
<h2>{group_name}</h2>
<div class="presets_row">
{preset_html}
</div>
</div>
""".format(
group_name=group_name,
preset_html="\n".join(preset_list)
)
out_html_list.append(group_html)
out_html = "\n".join(out_html_list)
return out_html
def render_alarm_presets(self, request):
"""
Renders the alarm presets as options. Same sunrise or sunset routine except for 100k.
"""
out_html_list = []
preset_list = []
# Inner for
group_name = "Sunrise / Sunset"
presets = self.PRESETS_COPY[group_name]
for preset in presets:
try:
if preset.display_gradient[0] == '5000K':
preset.display_gradient = ('5000K', '50K')
else:
preset.display_gradient = ('50K', '5000K')
except:
pass
preset_html = preset.render()
preset_list.append(preset_html)
group_html = """
<p id="clock" class="current-colour"></p>
<h2>{group_name}</h2>
<div class="sun-alarm" data-latitude="{users_latitude}" data-longitude="{users_longitude}"></div>
<div class="preset_group">
<div class="presets_row">
{preset_html}
</div>
</div>
""".format(
group_name=group_name,
preset_html="\n".join(preset_list),
users_latitude=get_setting("latitude", 52.2053),
users_longitude=get_setting("longitude", 0.1218)
)
out_html_list.append(group_html)
out_html = "\n".join(out_html_list)
return out_html
def render_udevelop_presets(self, request):
"""
Renders the Under Development text.
"""
out_html = """
<div class="underdevelop">
<h1> Under Development, please refer to the Github repository.</h1>
</div>
"""
return out_html
# Actions: These are the actions our web server can initiate. Triggered by hitting the url with ?action_name=value ####
def before_action(self, *args, **kwargs):
"""
Called just before an action takes place. We stop whatever current sequence is running
"""
self.led_strip.stop_current_sequence() # Stop current sequence
def action__set(self, request):
"""
Run when user wants to set a colour to a specified value
"""
set_colour = request.get_param("set", force=unicode)
D("Set to: %s" % set_colour)
return self.led_strip.set(set_colour)
action__set.capability = {
"param": "set",
"description": "Sets the RGB strip to a single colour.",
"value": "<unicode> A named colour (e.g. 'pink') or colour hex value (e.g. '#19BECA').",
"validity": "<unicode> A known named colour, or valid colour hex in the range #000000-#FFFFFF.",
"widget": "colourpicker",
"returns": "<unicode> The hex value of the colour the RGB strip has been set to."
}
def action__fade(self, request):
"""
Run when user wants to set a colour to a specified value
"""
fade_colour = request.get_param("fade", force=unicode)
logging.info("Fade to: %s" % fade_colour)
return self.led_strip.fade(fade_colour)
action__fade.capability = {
"param": "fade",
"description": "Fades the RGB strip from its current colour to a specified colour.",
"value": "<unicode> A named colour (e.g. 'pink') or colour hex value (e.g. '#19BECA').",
"validity": "<unicode> A known named colour, or valid colour hex in the range #000000-#FFFFFF",
"returns": "<unicode> The hex value of the colour the RGB strip has been set to."
}
def action__sunrise(self, request):
"""
Performs a sunrise over the specified period of time
"""
seconds = request.get_param(["seconds", "s", "sunrise"], default=10.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
temp_start = request.get_param(['temp_start', 'K'], default=None, force=unicode)
temp_end = request.get_param('temp_end', default=None, force=unicode)
logging.info("Sunrise: %s seconds" % (seconds + (milliseconds / 1000.0)))
return self.led_strip.sunrise(seconds=seconds, milliseconds=milliseconds, temp_start=temp_start, temp_end=temp_end)
action__sunrise.capability = {
"param": "sunrise",
"description": "Gently fades-in the RGB strip from deep red to daylight.",
"value": "The number of seconds you would like the sunrise to take.",
"validity": "<float> > 0",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the sunrise should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "1000",
},
{
"param": "temp_start",
"value": "The colour temperature you wish to start from (e.g. 500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "6500K"
},
{
"param": "temp_end",
"value": "The colour temperature you wish to finish at (e.g. 4500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "500K"
}
],
"returns": "<unicode> The hex value of the colour the RGB strip has been set to."
}
def action__sunset(self, request):
"""
Performs a sunset over the specified period of time
"""
seconds = request.get_param(["seconds", "s", "sunset"], default=10.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
temp_start = request.get_param(['temp_start', 'K'], default=None, force=unicode)
temp_end = request.get_param('temp_end', default=None, force=unicode)
logging.info("Sunset: %s seconds" % (seconds + (milliseconds / 1000.0)))
return self.led_strip.sunset(seconds=seconds, milliseconds=milliseconds, temp_start=temp_start, temp_end=temp_end)
action__sunset.capability = {
"param": "sunset",
"description": "Gently fades-out the RGB strip from daylight to deep-red.",
"value": "The number of seconds you would like the sunrise to take.",
"validity": "<float> > 0",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the sunset should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "1000",
},
{
"param": "temp_start",
"value": "The colour temperature you wish to start from (e.g. 500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "500K"
},
{
"param": "temp_end",
"value": "The colour temperature you wish to finish at (e.g. 4500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "6500K"
}
],
"returns": ""
}
def action__jump(self, request):
"""
Jump from one specified colour to the next
"""
jump_colours = request.get_param_values("jump")
seconds = request.get_param(["seconds", "s"], default=0.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
self.led_strip.stop_current_sequence() # Terminate any crap that's going on
total_seconds = (seconds + (milliseconds / 1000.0))
logging.info("Jump: %s, %s seconds" % (jump_colours, total_seconds))
return self.led_strip.jump(jump_colours, seconds=seconds, milliseconds=milliseconds) # Has its own colour sanitisation routine
action__jump.capability = {
"param": "jump",
"description": "Hops from one colour to the next over an even period of time.",
"value": "A comma delimited list of colours you wish to jump between.",
"validity": "<unicode> valid colour names or hex values separated by commas (e.g. red,blue,green,cyan,#FF00FF)",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the each colour should be displayed for. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "200",
},
{
"param": "seconds",
"value": "The number of seconds each colour should be displayed for. Will be added to milliseconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "0",
},
],
"returns": "<unicode> The first hex value of sequence."
}
def action__rotate(self, request):
"""
Rotates (fades) from one specified colour to the next
"""
rotate_colours = request.get_param_values("rotate")
seconds = request.get_param(["seconds", "s"], default=0.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
self.led_strip.stop_current_sequence() # Terminate any crap that's going on
total_seconds = (seconds + (milliseconds / 1000.0))
logging.info("Rotate: %s, %s seconds" % (rotate_colours, total_seconds))
return self.led_strip.rotate(rotate_colours, seconds=seconds, milliseconds=milliseconds) # Has its own colour sanitisation routine
action__rotate.capability = {
"param": "rotate",
"description": "Fades from one colour to the next over an even period of time.",
"value": "A comma delimited list of colours you wish to cross-fade between.",
"validity": "<unicode> valid colour names or hex values separated by commas (e.g. red,blue,green,cyan,#FF00FF)",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the each colour fade should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "200",
},
{
"param": "seconds",
"value": "The number of seconds each colour fade should take. Will be added to milliseconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "0",
},
],
"returns": "<unicode> The first hex value of sequence."
}
def action__stop(self, request):
"""
Stops the current sequence
"""
return self.led_strip.stop()
action__stop.capability = {
"param": "stop",
"description": "Halts the current sequence or fade.",
"value": "",
"returns": "<unicode> The hex value of colour the RGB strip got halted on."
}
def action__off(self, request):
"""
Turns the strip off
"""
logging.info("Off!")
return self.led_strip.off()
action__off.capability = {
"param": "off",
"description": "Stops any fades or sequences. Quickly Fades the RGB strip to black (no light)",
"value": "",
"returns": "<unicode> The hex value of colour the RGB strip ends up at (#000000)."
}
def information__status(self, request, *args, **kwargs):
"""
Reports the status of the RGB LED strip
"""
current_rgb = "({})".format(self.led_strip)
current_hex = self.led_strip.hex
contrast_colour = self.led_strip.contrast_from_bg(current_hex, dark_default="202020")
return {
"sequence": self.led_strip.sequence_colours,
"current_hex": current_hex,
"current": current_rgb,
"current_colour": current_rgb,
"current_rgb": current_rgb,
"contrast": contrast_colour,
"contrast_colour": contrast_colour
}
def teardown(self):
"""
Called automatically when exiting the parent reactor
"""
self.led_strip.teardown()
class NotSet():
pass
NOT_SET = NotSet()
class SmartRequest(Request, object):
"""
The class for request objects returned by our web server.
This child version has methods for easily grabbing params safely.
Usage:
#If you just want the first value
sunset = request["sunset"]
sunset = request.get_param("sunset")
#You can even test the water with multiple values, it will stop at the first valid one
sunset = request.get_param(["sunset","ss","twilight"])
#If you want a whole list of values
jump = request.get_list("jump")
See docs: https://twistedmatrix.com/documents/8.0.0/api/twisted.web.server.Request.html
"""
def __init__(self, *args, **kwargs):
super(SmartRequest, self).__init__(*args, **kwargs)
def get_param_values(self, name, default=None):
"""
Failsafe way of getting querystring get and post params from the Request object
If not provided, will return default
@return: ["val1","val2"] LIST of arguments, or the default
"""
return self.args.get(name, default)
get_params = get_param_values # Alias
get_list = get_param_values # Alias
get_params_list = get_param_values # Alias
def get_param(self, names, default=None, force=None):
"""
Failsafe way of getting a single querystring value. Will only return one (the first) value if found
@param names: <str> The name of the param to fetch, or a list of candidate names to try
@keyword default: The default value to return if we cannot get a valid value
@keyword force: <type> A class / type to force the output into. Default is returned if we cannot force the value into this type
"""
if isinstance(names, (str, unicode)):
names = [names]
val = NOT_SET
for name in names:
val = self.get_param_values(name=name, default=NOT_SET)
if val is not NOT_SET: # Once we find a valid value, continue
break
# If we have no valid value, then bail
if val is NOT_SET:
return default
try:
if len(val) == 1:
single_val = val[0]
if force is not None:
return force(single_val)
return single_val
else:
mult_val = val
if force is not None:
mult_val = [force(ii) for ii in val]
return mult_val
except (IndexError, ValueError, TypeError):
pass
return default
get_value = get_param
param = get_param
def has_params(self, *param_names):
"""
Returns True or the value if any of the param names given by args exist
"""
for param_name in param_names:
try:
return self.args[param_name] or True
except KeyError:
pass
return False
has_param = has_params
has_key = has_params
def __getitem__(self, name):
"""
Lazy way of getting a param list, with the fallback default being None
"""
return self.get_param(name)
class RaspiledControlSite(Site, object):
"""
Site thread which initialises the RaspiledControlResource properly
"""
ip_address = None
def __init__(self, *args, **kwargs):
resource = kwargs.pop("resource", RaspiledControlResource())
super(RaspiledControlSite, self).__init__(resource=resource, requestFactory=SmartRequest, *args, **kwargs)
def buildProtocol(self, addr):
self.ip_address = addr
self.resource.ip_address = addr
return super(RaspiledControlSite, self).buildProtocol(addr)
def setup_broadcasting(self, reactor):
self.resource.setup_broadcasting(reactor)
def stopFactory(self):
"""
Called automatically when exiting the reactor. Here we tell the LEDstrip to tear down its resources
"""
self.resource.teardown()
def get_matching_pids(name, exclude_self=True):
"""
Checks the process ID of the specified processes matching name, having excluded itself
check_output(["pidof", str]) will return a space delimited list of all process ids
@param name: <str> The process name to search for
@keyword exclude_self: <Bool> Whether to remove own ID from returned list (e.g. if searching for a python script!)
@return: <list [<str>,]> List of PIDs
"""
# Get all matching PIDs
try:
pids_str = check_output(["pidof", name])
except CalledProcessError: # No matches
pids_str = ""
# Process string-list into python list
pids = pids_str.strip().split(" ")
# Remove self if required:
if exclude_self:
my_pid = str(os.getpid()) # Own PID - getpid() returns integer
try:
pids.remove(my_pid) # Remove my PID string:
except ValueError:
pass
return pids
def checkClientAgainstWhitelist(ip, user, token):
IPS = {
'IP1': '127.0.0.1',
}
config_path = os.path.expanduser(RASPILED_DIR + '/.whitelist')
parser = configparser.ConfigParser(defaults=IPS)
if os.path.exists(config_path):
parser.read(config_path)
else:
with open(config_path, 'w') as f:
parser.write(f)
connection = False
whitelist = parser.defaults()
for ii in whitelist.keys():
if ip == whitelist[ii]:
logging.info('Client registered')
connection = True
break
else:
connection = False
return connection
def start_if_not_running():
"""
Checks if the process is running, if not, starts it!
"""
pids = get_matching_pids(APP_NAME, exclude_self=True) # Will remove own PID
pids = filter(bool, pids)
if not pids: # No match! Implies we need to fire up the listener
logging.info("[STARTING] Raspiled Listener with PID %s" % str(os.getpid()))
# First the web
factory = RaspiledControlSite(timeout=8) # 8s timeout
try:
pi_port = int(RESOLVED_USER_SETTINGS.get('pi_port', 9090))
except (TypeError, ValueError):
raise ConfigurationError("You have an invalid value for 'pi_port' in your settings. This needs to be a valid port number (integer).")
endpoint = endpoints.TCP4ServerEndpoint(reactor, pi_port)
endpoint.listen(factory)
# factory.setup_broadcasting(reactor) # Uncomment to broadcast stuff over network!
reactor.run()
else:
logging.info("Raspiled Listener already running with PID %s" % ", ".join(pids))
if __name__ == "__main__":
start_if_not_running()
|
<reponame>sraaphorst/gem_adapt_queue
# <NAME> 17 July 2018
# This module contains the individual weight function components.
# obsweight is the main function.
import numpy as np
import astropy.units as u
def radist(ra, tot_time, obs_time, verbose = False):
"""
Compute weighting factors for RA distribution of total remaining observation tot_time.
Observations are binned using 30 degree regions around the celestial sphere.
Parameters
----------
ra : array of 'astropy.units' degrees
Right ascensions of all remaining observations in queue
tot_time : array of 'astropy.units' hours
Total times of observations
obs_time : array of 'astropy.units' hours
Observed times of observations
Returns
-------
array of floats
"""
bin_edges = [0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330., 360.] * u.deg
if verbose:
print('target ra distribution...')
print('ra', ra)
print('tot_time', tot_time)
print('obs_time', obs_time)
print('bins edges', bin_edges)
bin_nums = np.digitize(ra, bins=bin_edges) - 1 # get ra bin index for each target
if verbose:
print('histogram bin indices', bin_nums)
# Sum total observing hours in 30 degree bins
nbin = len(bin_edges) - 1
bin_factors = np.zeros(nbin) * u.h
for i in np.arange(0, nbin):
ii = np.where(bin_nums == i)[0][:]
bin_factors[i] = bin_factors[i] + sum(tot_time[ii] - obs_time[ii])
if verbose:
print('Total tot_time (ra distribution)', bin_factors)
bin_factors = bin_factors / np.mean(bin_factors)
if verbose:
print('bin_factors (ra distribution weight)', bin_factors)
# Generate list of ra weights corresponding to order of observations in obstable
wra = np.empty(len(ra)) # reset index value
for j in np.arange(nbin): # Get hour angle histogram bin of current target
wra[np.where(np.logical_and(ra >= bin_edges[j], ra < bin_edges[j + 1]))[0][:]] = bin_factors[j]
if verbose:
print('wra', wra)
return wra
def cond_match(iq, cc, bg, wv, skyiq, skycc, skywv, skybg, negha, user_prior, verbose = False):
"""
Match condition constraints to actual conditions:
- Set cmatch to zero for times where the required conditions
are worse than the actual conditions.
- Multiply cmatch by 0.75 at times when the actual image quality
conditions are better than required.
- Multiply cmatch by 0.75 at times when the actual cloud conditions
are better than required.
Parameters
----------
iq : float
observation image quality constraint percentile
cc : float
observation cloud condition constraint percentile
bg : float
observation sky background constraint percentile
wv : float
observation water vapour constraint percentile
skyiq : np.array of float
sky image quality percentile along tot_time grid
skycc : np.array of float
sky cloud condition percentile along tot_time grid
skywv : np.array of float
sky water vapour percentile along tot_time grid
skybg : array of floats
target sky background percentiles along tot_time grid
skybg : np.ndarray of floats
actual sky background conditions converted from sky brightness magnitudes
negha : boolean
True if target is visible at negative hour angles.
Returns
-------
cmatch : array of floats
cmatch weights
"""
cmatch = np.ones(len(skybg))
# Where actual conditions worse than requirements
bad_iq = skyiq > iq
bad_cc = skycc > cc
bad_bg = skybg > bg
bad_wv = skywv > wv
# Multiply weights by 0 where actual conditions worse than required .
i_bad_cond = np.where(np.logical_or(np.logical_or(bad_iq, bad_cc), np.logical_or(bad_bg, bad_wv)))[0][:]
cmatch[i_bad_cond] = 0.
# Multiply weights by 0.75 where iq better than required and target
# does not set soon and not a ToO. Effectively drop one band.
# Bryan - using negha is wrong here, a setting target will have min(HA) > 0
i_better_iq = np.where(skyiq < iq)[0][:]
if len(i_better_iq) != 0 and negha and 'Target of Opportunity' not in user_prior:
cmatch = cmatch * 0.75
# Multiply weights by 0.75 where cc better than required and target
# does not set soon and is not a ToO. Effectively drop one band.
i_better_cc = np.where(skycc < cc)[0][:]
if len(i_better_cc) != 0 and negha and 'Target of Opportunity' not in user_prior:
cmatch = cmatch * 0.75
if verbose:
print(iq, cc, bg, wv)
print(skyiq, skycc, skybg, skywv)
# print('iq worse than required', bad_iq)
# print('cc worse than required', bad_cc)
# print('bg worse than required', bad_bg)
# print('wv worse than required', bad_wv)
# print('i_bad_cond', i_bad_cond)
# print('iq better than required', i_better_iq)
# print('cc better than required', i_better_cc)
return cmatch
def total_cond(iq, cc, bg, wv):
"""
Returns a weighting factor representative of the quality of conditions required to execute the observation.
twcond = (1./cond['iq'])**3 + (1./cond['cc'])**3 + (1./cond['bg'])**3 + (1./cond['wv'])**3
Parameters
----------
iq : float
observation image quality constraint percentile
cc : float
observation cloud condition constraint percentile
bg : float
observation sky background constraint percentile
wv : float
observation water vapour constraint percentile
Returns
-------
twcond : float
total conditions weight
"""
return (1./iq)**3 + (1./cc)**3 + (1./bg)**3 + (1./wv)**3
def airmass(am, ha, elev):
"""
Compute airmass weights:
- 0. if airmass is greater than 2.
- 0. if elevation constraint not satisfied.
Parameters
----------
am : array of floats
target airmass at times throughout observing window.
ha : array of 'astropy.units' hourangles
target hour angles along tot_time grid
elev : dictionary
observation elevation constraint. Keys 'type', 'min', and 'max'.
Returns
-------
wam : array of floats
airmass weights
"""
wam = np.ones(len(am))
i_bad_AM = np.where(am > 2.1)[0][:]
wam[i_bad_AM] = 0.
if elev['type'] == 'Airmass':
i_bad_elev = np.where(np.logical_or(am < elev['min'], am > elev['max']))[0][:]
wam[i_bad_elev] = 0.
elif elev['type'] == 'Hour Angle':
i_bad_elev = np.where(np.logical_or(ha < elev['min'], ha > elev['max']))[0][:]
wam[i_bad_elev] = 0.
return wam
def windconditions(dir, vel, az, verbose = False):
"""
Wind condition weights:
- 0. if wind speed is greater than 10km/h
AND the telescope is pointed within 20deg of the wind direction.
Parameters
----------
az : np.array of 'astropy.units' degrees
target azimuth angles along tot_time grid
dir : np.array of 'astropy.units' degrees
wind direction along tot_time grid
vel : np.array of 'astropy.units' m/s
wind velocity along tot_time grid
Return
-------
wwind : array of floats
wind condition weights
"""
if verbose:
print('Wind vel:', vel)
print('Wind dir:', dir)
print('AZ', az)
wwind = np.ones(len(az))
ii = np.where(np.logical_and(vel > 10.*u.m/u.s,
np.logical_or(abs(az - dir) <= 20.*u.deg, 360.*u.deg - abs(az - dir) <= 20.*u.deg)))[0][:]
if len(ii) != 0:
wwind[ii] = 0.
if verbose:
print('ii ((vel > 10.*u.m/u.s) and (abs(dir - az) < 20.*u.deg))', ii)
print('wwind', wwind)
return wwind
def hourangle(latitude, dec, ha, verbose = False):
"""
Compute a weight representing the target location and visibility window.
Parameters
----------
latitude : '~astropy.coordinates.angles.Latitude' or '~astropy.units'
observatory latitude
dec : '~astropy.units' degree
target declination
ha : np.ndarray of '~astropy.units' hourangle
target hour angles along tot_time grid
Return
-------
wha : float array
hourangle weights
"""
if latitude < 0:
decdiff = latitude - dec
else:
decdiff = dec - latitude
declim = [90., -30., -45., -50, -90.] * u.deg
wval = [1.0, 1.3, 1.6, 2.0]
wdec = 0.
for i in np.arange(4):
if np.logical_and(decdiff < declim[i], decdiff >= declim[i+1]):
wdec = wval[i]
# HA - if within -1hr of transit at twilight it gets higher weight
# Bryan - multiplying by wdec here now seems wrong, it will change the shape
if abs(decdiff) < 40. * u.deg:
c = wdec * np.array([3., 0.1, -0.06]) # weighted to slightly positive HA
else:
c = wdec * np.array([3., 0., -0.08]) # weighted to 0 HA if Xmin > 1.3
# Multiply by wdec here? Need to test
wha = c[0] + c[1] / u.hourangle * ha + c[2] / (u.hourangle ** 2) * ha ** 2
ii = np.where(wha <= 0)[0][:]
wha[ii] = 0.
if np.amin(ha) >= -1. * u.hourangle:
wha = wha * 1.5
if verbose:
print('multiplied wha by 1.5')
# Kristin's suggestion
# if np.amin(ha) >= 0. * u.hourangle:
# wha = wha * 10.5
# if verbose:
# print('multiplied wha by 10.5')
if verbose:
print('wdec', wdec)
print('lat', latitude)
print('decdiff', decdiff)
print('HA/unit^2', ha / (u.hourangle ** 2))
# print('min HA', np.amin(ha).hour)
print('min HA', np.amin(ha))
return wha
def rankingband(band):
"""
Compute ranking band weight.
Parameters
----------
band : int
observation ranking band (1, 2, 3 or 4)
"""
return (4. - np.int(band)) * 1000
def userpriority(user_prior):
"""
Compute user priority weight.
Parameters
----------
user_prior : string
observation user priority (Low, Medium, High, or Target of Opportunity)
"""
if 'Target of Opportunity' in user_prior:
wprior = 500.
elif user_prior == 'High':
wprior = 2.
elif user_prior == 'Medium':
wprior = 1.
elif user_prior == 'Low':
wprior = 0.
else:
wprior = 0.
return wprior
def status(prog_comp, obs_comp):
"""
Compute weighting factor representative of observation and program status.
- 1.0 if observation and program have not been observed
- 1.5 if program has been partially observed
- 2.0 if observation has been partially observed
Parameters
----------
prog_comp : float
fraction of program completed.
obs_comp : float
fraction of observation completed.
Returns
-------
wstatus : float
program status weighting factor
"""
if prog_comp > 0.0:
wstatus = 1.5
if obs_comp > 0.0:
wstatus = 2.0
else:
wstatus = 1.
return wstatus
def complete(prog_comp, obs_comp):
"""
Observation completion weighting factor.
- 1.0 if observation not completed
- 0.0 if observation or program are completed
Parameters
----------
prog_comp : float
fraction of program completed.
obs_comp : float
fraction of observation completed.
Returns
-------
float
completion weighting factor
"""
if obs_comp >= 1. or prog_comp >= 1.:
return 0
else:
return 1
def time_wins(grid_size, i_wins, verbose = False):
"""
Set weights to 0 if they are not within the observation tot_time windows.
grid_size : int
number of spaces in tot_time grid
i_wins : list of integer pair(s)
indices of available tot_time windows along tot_time grid.
Example
-------
An observation with 4 tot_time windows within the current night...
time_wins[i] = [
[0, 10],
[30, 50],
[80,100],
[110, 120]
]
Returns
-------
weights : np.array of floats
new observation weights along tot_time grid.
"""
if verbose:
print('i_wins:')
[print(win) for win in i_wins]
weights = np.zeros(grid_size)
indarrays = []
for win in i_wins: # get indices spanned by windows
indarrays.append(np.arange(win[0], win[1]+1))
indices = np.concatenate(indarrays)
weights[indices] = 1.
if verbose:
print(indices)
return weights
def obsweight(obs_id, ra, dec, iq, cc, bg, wv, elev_const, i_wins, band, user_prior, AM, HA, AZ, latitude, prog_comp,
obs_comp, skyiq, skycc, skybg, skywv, winddir, windvel, wra, verbose = False, debug = False):
"""
Calculate observation weights.
Parameters
----------
obs_id : string
observation identifier (only needed if printing output)
ra : 'astropy.units' degrees
observation right ascension
dec : 'astropy.units' degrees
observation declination
iq : float
observation image quality constraint percentile
cc : float
observation cloud condition constraint percentile
bg : float
observation sky background constraint percentile
wv : float
observation water vapour constraint percentile
elev_const : dictionary
observation elevation constraint (type, min, max).
Example
-------
elev_const = {type='Hour Angle', min='-2.00', max='2.00'}
i_wins : list of integer pair(s)
indices of observation tot_time window(s) along tot_time grid.
Example
-------
an observation with two tot_time windows would look something like...
i_wins = [
[0,80],
[110, 130],
]
band : int
observation ranking band (1, 2, 3, 4)
user_prior : string
observation user priority ('Low', 'Medium', 'High', 'Target of Opportunity')
obs_comp : np.array of float
fraction of observation completed
AM : np.array of floats
target airmasses along tot_time grid
HA : np.array of 'astropy.units' hourangles
target hour angles along tot_time grid
AZ : np.array of 'astropy.units' radians
target azimuth angles along tot_time grid
skyiq : np.array of float
sky image quality percentile along tot_time grid
skycc : np.array of float
sky cloud condition percentile along tot_time grid
skywv : np.array of float
sky water vapour percentile along tot_time grid
skybg : array of floats
target sky background percentiles along tot_time grid
latitude : '~astropy.coordinates.angles.Latitude' or '~astropy.unit.Quantity'
observatory latitude
prog_comp : float
Completion fraction of program
winddir : np.array of 'astropy.units' degrees
wind direction along tot_time grid
windvel : np.array of 'astropy.units' m/s
wind velocity along tot_time grid
wra : np.ndarray of floats
RA tot_time distribution weighting factor
Returns
-------
weights : np.ndarray of floats
"""
verbose2 = debug # only show obs. info and final weight
if verbose or verbose2:
print(obs_id, ra, dec, iq, cc, bg, wv, elev_const, band, user_prior, obs_comp)
# -- Match tot_time windows --
wwins = time_wins(grid_size=len(skyiq), i_wins=i_wins)
if verbose:
print('wwins', wwins)
# -- Matching required conditions to actual --
cmatch = cond_match(iq=iq, cc=cc, bg=bg, wv=wv, skyiq=skyiq, skycc=skycc, skywv=skywv, skybg=skybg,
negha=min(HA) < 0. * u.hourangle, user_prior=user_prior, verbose = verbose)
if verbose:
print('iq, cc, bg, wv', iq, cc, bg, wv)
print('skyiq, skycc, skybg, skywv', skyiq, skycc, skybg, skywv)
print('cmatch', cmatch)
print('minHA<0', min(HA) < 0. * u.hourangle)
# -- Total required conditions --
twcond = total_cond(iq=iq, cc=cc, bg=bg, wv=wv)
if verbose:
print('twcond', twcond)
# -- Airmass/elevation constraints --
wam = airmass(am=AM, ha=HA, elev=elev_const)
if verbose:
print('AM', AM)
print('HA.hour', HA)
print('elev', elev_const)
print('wam', wam)
# -- Wind --
# Wind, do not point within 20deg of wind if over limit
wwind = windconditions(dir=winddir, vel=windvel, az=AZ, verbose=verbose)
if verbose:
print('wwind', wwind)
# -- Hour Angle / Location --
wha = hourangle(latitude=latitude, dec=dec, ha=HA, verbose=verbose)
if verbose:
print('wha', wha)
# -- Band --
wband = rankingband(band=band)
if verbose:
print('wband', wband)
# -- User Priority --
wprior = userpriority(user_prior=user_prior)
if verbose:
print('wprior', wprior)
# -- Program/Observation Status --
wstatus = status(prog_comp=prog_comp, obs_comp=obs_comp)
if verbose:
print('wstatus', wstatus)
# -- Observation completion --
wcplt = complete(prog_comp=prog_comp, obs_comp=obs_comp)
if verbose:
print('wcplt', wcplt)
# -- Partner Balance --
wbal = 0.
if verbose:
print('wbal', wbal)
print('wra', wra)
# if 'Target of Opportunity' in user_prior: # stop ToOs from dropping a band when sky conditions are good.
# cmatch = 1.
# ****** Final weighting function ******
weight = (twcond + wstatus * wha + wprior + wband + wbal + wra) * cmatch * wam * wwind * wcplt * wwins
if verbose or verbose2:
print('Total weight', weight)
return weight
|
<filename>ckanext/activity/tests/logic/test_action.py
# -*- coding: utf-8 -*-
import copy
import datetime
import time
import pytest
import ckan.plugins.toolkit as tk
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
from ckanext.activity.model.activity import Activity, package_activity_list
def _clear_activities():
from ckan import model
model.Session.query(Activity).delete()
model.Session.flush()
def _seconds_since_timestamp(timestamp, format_):
dt = datetime.datetime.strptime(timestamp, format_)
now = datetime.datetime.utcnow()
assert now > dt # we assume timestamp is not in the future
return (now - dt).total_seconds()
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("with_plugins")
class TestLimits:
def test_activity_list_actions(self):
actions = [
"user_activity_list",
"package_activity_list",
"group_activity_list",
"organization_activity_list",
"recently_changed_packages_activity_list",
"current_package_list_with_resources",
]
for action in actions:
with pytest.raises(tk.ValidationError):
helpers.call_action(
action,
id="test_user",
limit="not_an_int",
offset="not_an_int",
)
with pytest.raises(tk.ValidationError):
helpers.call_action(
action, id="test_user", limit=-1, offset=-1
)
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("non_clean_db", "with_plugins")
class TestActivityShow:
def test_simple_with_data(self, package, user, activity_factory):
activity = activity_factory(
user_id=user["id"],
object_id=package["id"],
activity_type="new package",
data={"package": copy.deepcopy(package), "actor": "Mr Someone"},
)
activity_shown = helpers.call_action(
"activity_show", id=activity["id"]
)
assert activity_shown["user_id"] == user["id"]
assert (
_seconds_since_timestamp(
activity_shown["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"
)
< 10
)
assert activity_shown["object_id"] == package["id"]
assert activity_shown["data"] == {
"package": package,
"actor": "Mr Someone",
}
assert activity_shown["activity_type"] == "new package"
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestPackageActivityList(object):
def test_create_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" in activities[0]["data"]["package"]
def test_change_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert (
activities[0]["data"]["package"]["title"]
== "Dataset with changed title"
)
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" in activities[0]["data"]["package"]
def test_change_dataset_change_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"][0] = dict(key="rating", value="ok")
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" in activities[0]["data"]["package"]
def test_change_dataset_delete_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" in activities[0]["data"]["package"]
def test_change_dataset_add_resource(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
factories.Resource(package_id=dataset["id"], user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# NB the detail is not included - that is only added in by
# activity_list_to_html()
def test_change_dataset_change_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"][0]["format"] = "pdf"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_delete_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_tag_from_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user, tags=[dict(name="checked")])
_clear_activities()
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_private_dataset_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_private_dataset_delete_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_types_activities(self, types):
dataset = factories.Dataset()
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type=activity_type,
data=None,
)
for activity_type in types
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return dataset["id"]
def test_error_bad_search(self):
with pytest.raises(tk.ValidationError):
helpers.call_action(
"package_activity_list",
id=id,
activity_types=["new package"],
exclude_activity_types=["deleted package"],
)
def test_activity_types_filter(self):
types = [
"new package",
"changed package",
"deleted package",
"changed package",
"new package",
]
id = self._create_bulk_types_activities(types)
activities_new = helpers.call_action(
"package_activity_list", id=id, activity_types=["new package"]
)
assert len(activities_new) == 2
activities_not_new = helpers.call_action(
"package_activity_list",
id=id,
exclude_activity_types=["new package"],
)
assert len(activities_not_new) == 3
activities_delete = helpers.call_action(
"package_activity_list", id=id, activity_types=["deleted package"]
)
assert len(activities_delete) == 1
activities_not_deleted = helpers.call_action(
"package_activity_list",
id=id,
exclude_activity_types=["deleted package"],
)
assert len(activities_not_deleted) == 4
def _create_bulk_package_activities(self, count):
dataset = factories.Dataset()
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return dataset["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"package_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list",
include_hidden_activity=True,
id=dataset["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
def _create_dataset_with_activities(self, updates: int = 3):
user = factories.User()
dataset = factories.Dataset(user=user)
ctx = {"user": user["name"]}
for c in range(updates):
dataset["title"] = "Dataset v{}".format(c)
helpers.call_action("package_update", context=ctx, **dataset)
return dataset
def test_activity_after(self):
"""Test activities after timestamp"""
dataset = self._create_dataset_with_activities()
db_activities = package_activity_list(dataset["id"], limit=10)
pkg_activities = helpers.call_action(
"package_activity_list",
id=dataset["id"],
after=db_activities[2].timestamp.timestamp(),
)
# we expect just 2 (the first 2)
assert len(pkg_activities) == 2
# first activity here is the first one.
assert pkg_activities[0]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[0]["timestamp"]
)
assert pkg_activity_time == db_activities[0].timestamp
# last activity here is the 2nd one.
assert pkg_activities[1]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[1]["timestamp"]
)
assert pkg_activity_time == db_activities[1].timestamp
def test_activity_offset(self):
"""Test activities after timestamp"""
dataset = self._create_dataset_with_activities()
db_activities = package_activity_list(dataset["id"], limit=10)
pkg_activities = helpers.call_action(
"package_activity_list", id=dataset["id"], offset=2
)
# we expect just 2 (the last 2)
assert len(pkg_activities) == 2
# first activity here is the first one.
assert pkg_activities[0]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[0]["timestamp"]
)
assert pkg_activity_time == db_activities[2].timestamp
# last activity here is the package creation.
assert pkg_activities[1]["activity_type"] == "new package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[1]["timestamp"]
)
assert pkg_activity_time == db_activities[3].timestamp
def test_activity_before(self):
"""Test activities before timestamp"""
dataset = self._create_dataset_with_activities()
db_activities = package_activity_list(dataset["id"], limit=10)
pkg_activities = helpers.call_action(
"package_activity_list",
id=dataset["id"],
before=db_activities[1].timestamp.timestamp(),
)
# we expect just 2 (the last 2)
assert len(pkg_activities) == 2
# first activity here is the first one.
assert pkg_activities[0]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[0]["timestamp"]
)
assert pkg_activity_time == db_activities[2].timestamp
# last activity here is the package creation.
assert pkg_activities[-1]["activity_type"] == "new package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[-1]["timestamp"]
)
assert pkg_activity_time == db_activities[3].timestamp
def test_activity_after_before(self):
"""Test activities before timestamp"""
dataset = self._create_dataset_with_activities()
db_activities = package_activity_list(dataset["id"], limit=10)
pkg_activities = helpers.call_action(
"package_activity_list",
id=dataset["id"],
before=db_activities[1].timestamp.timestamp(),
after=db_activities[3].timestamp.timestamp(),
)
# we expect just 1 (db_activities[2])
assert len(pkg_activities) == 1
# first activity here is the first one.
assert pkg_activities[0]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[0]["timestamp"]
)
assert pkg_activity_time == db_activities[2].timestamp
def test_activity_after_before_offset(self):
"""Test activities before timestamp"""
dataset = self._create_dataset_with_activities(updates=4)
db_activities = package_activity_list(dataset["id"], limit=10)
pkg_activities = helpers.call_action(
"package_activity_list",
id=dataset["id"],
before=db_activities[1].timestamp.timestamp(),
after=db_activities[4].timestamp.timestamp(),
offset=1,
)
# we expect just 1 (db_activities[3])
assert len(pkg_activities) == 1
# first activity here is the first one.
assert pkg_activities[0]["activity_type"] == "changed package"
pkg_activity_time = datetime.datetime.fromisoformat(
pkg_activities[0]["timestamp"]
)
assert pkg_activity_time == db_activities[3].timestamp
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestUserActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
def test_user_update_activity_stream(self):
"""Test that the right activity is emitted when updating a user."""
user = factories.User()
before = datetime.datetime.utcnow()
# FIXME we have to pass the email address and password to user_update
# even though we're not updating those fields, otherwise validation
# fails.
helpers.call_action(
"user_update",
id=user["id"],
name=user["name"],
email=user["email"],
password=factories.User.stub().password,
fullname="updated full name",
)
activity_stream = helpers.call_action(
"user_activity_list", id=user["id"]
)
latest_activity = activity_stream[0]
assert latest_activity["activity_type"] == "changed user"
assert latest_activity["object_id"] == user["id"]
assert latest_activity["user_id"] == user["id"]
after = datetime.datetime.utcnow()
timestamp = datetime.datetime.strptime(
latest_activity["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"
)
assert timestamp >= before and timestamp <= after
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_dataset_changed_by_another_user(self):
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
# the user might have created the dataset, but a change by another
# user does not show on the user's activity stream
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_using_group_delete(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_by_updating_state(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
group["state"] = "deleted"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_using_organization_delete(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
helpers.call_action(
"organization_delete", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_by_updating_state(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
org["state"] = "deleted"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def _create_bulk_user_activities(self, count):
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_user_activities(35)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_user_activities(7)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_user_activities(9)
results = helpers.call_action("user_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestGroupActivityList(object):
def test_create_group(self):
user = factories.User()
group = factories.Group(user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_change_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
original_title = group["title"]
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed group",
"new group",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Group with changed title"
)
# the old group still has the old title
assert activities[1]["activity_type"] == "new group"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# NOTE:
# ideally the dataset's deletion would not show up in its old group
# but it can't be helped without _group_activity_query getting very
# complicated
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_group_activities(self, count):
group = factories.Group()
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=group["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return group["id"]
def test_limit_default(self):
id = self._create_bulk_group_activities(35)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_group_activities(7)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_group_activities(9)
results = helpers.call_action("group_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action(
"group_activity_list", include_hidden_activity=True, id=group["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestOrganizationActivityList(object):
def test_bulk_make_public(self):
org = factories.Organization()
dataset1 = factories.Dataset(owner_org=org["id"], private=True)
dataset2 = factories.Dataset(owner_org=org["id"], private=True)
helpers.call_action(
"bulk_update_public",
{},
datasets=[dataset1["id"], dataset2["id"]],
org_id=org["id"],
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert activities[0]["activity_type"] == "changed package"
def test_bulk_delete(self):
org = factories.Organization()
dataset1 = factories.Dataset(owner_org=org["id"])
dataset2 = factories.Dataset(owner_org=org["id"])
helpers.call_action(
"bulk_update_delete",
{},
datasets=[dataset1["id"], dataset2["id"]],
org_id=org["id"],
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert activities[0]["activity_type"] == "deleted package"
def test_create_organization(self):
user = factories.User()
org = factories.Organization(user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_change_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
original_title = org["title"]
org["title"] = "Organization with changed title"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed organization",
"new organization",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Organization with changed title"
)
# the old org still has the old title
assert activities[1]["activity_type"] == "new organization"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the org
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the group
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# dataset deletion should not show up in its former org
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_org_activities(self, count):
org = factories.Organization()
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=org["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return org["id"]
def test_limit_default(self):
id = self._create_bulk_org_activities(35)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_org_activities(7)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_org_activities(9)
results = helpers.call_action(
"organization_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list",
include_hidden_activity=True,
id=org["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestRecentlyChangedPackagesActivityList:
def test_create_dataset(self):
user = factories.User()
org = factories.Dataset(user=user)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["package"]["title"] == org["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_package_activities(self, count):
from ckan import model
user = factories.User()
objs = [
Activity(
user_id=user["id"],
object_id=None,
activity_type="new_package",
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_package_activities(35)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
self._create_bulk_package_activities(7)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
self._create_bulk_package_activities(9)
results = helpers.call_action(
"recently_changed_packages_activity_list", limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestDashboardActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def _create_bulk_package_activities(self, count):
user = factories.User()
from ckan import model
objs = [
Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"dashboard_activity_list", limit="9", context={"user": id}
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestDashboardNewActivities(object):
def test_users_own_activities(self):
# a user's own activities are not shown as "new"
user = factories.User()
dataset = factories.Dataset(user=user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
group = factories.Group(user=user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
new_activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["is_new"] for activity in new_activities] == [
False
] * 7
new_activities_count = helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
assert new_activities_count == 0
def test_activities_by_a_followed_user(self):
user = factories.User()
followed_user = factories.User()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **followed_user
)
_clear_activities()
dataset = factories.Dataset(user=followed_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update",
context={"user": followed_user["name"]},
**dataset,
)
helpers.call_action(
"package_delete",
context={"user": followed_user["name"]},
**dataset,
)
group = factories.Group(user=followed_user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": followed_user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": followed_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
activity["activity_type"] for activity in activities[::-1]
] == [
"new package",
"changed package",
"deleted package",
"new group",
"changed group",
"deleted group",
]
assert [activity["is_new"] for activity in activities] == [True] * 6
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 6
)
def test_activities_on_a_followed_dataset(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
dataset = factories.Dataset(user=another_user)
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new package", True),
# NB The 'new package' activity is in our activity stream and shows
# as "new" even though it occurred before we followed it. This is
# known & intended design.
("changed package", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_on_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
group = factories.Group(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": another_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new group", False), # False because user did this one herself
("changed group", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 1
)
def test_activities_on_a_dataset_in_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
group = factories.Group(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
_clear_activities()
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [("new package", True), ("changed package", True)]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_on_a_dataset_in_a_followed_org(self):
user = factories.User()
another_user = factories.Sysadmin()
org = factories.Organization(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **org
)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=another_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [("new package", True), ("changed package", True)]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_that_should_not_show(self):
user = factories.User()
_clear_activities()
# another_user does some activity unconnected with user
another_user = factories.Sysadmin()
group = factories.Group(user=another_user)
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == []
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 0
)
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_maximum_number_of_new_activities(self):
"""Test that the new activities count does not go higher than 5, even
if there are more than 5 new activities from the user's followers."""
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset()
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
for n in range(0, 7):
dataset["notes"] = "Updated {n} times".format(n=n)
helpers.call_action(
"package_update",
context={"user": another_user["name"]},
**dataset,
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 5
)
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("clean_db", "with_request_context", "with_plugins")
class TestSendEmailNotifications(object):
# TODO: this action doesn't do much. Maybe it well be better to move tests
# into lib.email_notifications eventually
def check_email(self, email, address, name, subject):
assert email[1] == "<EMAIL>"
assert email[2] == [address]
assert subject in email[3]
# TODO: Check that body contains link to dashboard and email prefs.
def test_fresh_setupnotifications(self, mail_server):
helpers.call_action("send_email_notifications")
assert (
len(mail_server.get_smtp_messages()) == 0
), "Notification came out of nowhere"
def test_single_notification(self, mail_server):
pkg = factories.Dataset()
user = factories.User(activity_streams_email_notifications=True)
helpers.call_action(
"follow_dataset", {"user": user["name"]}, id=pkg["id"]
)
helpers.call_action("package_update", id=pkg["id"], notes="updated")
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 1
self.check_email(
messages[0],
user["email"],
user["name"],
"1 new activity from CKAN",
)
def test_multiple_notifications(self, mail_server):
pkg = factories.Dataset()
user = factories.User(activity_streams_email_notifications=True)
helpers.call_action(
"follow_dataset", {"user": user["name"]}, id=pkg["id"]
)
for i in range(3):
helpers.call_action(
"package_update", id=pkg["id"], notes=f"updated {i} times"
)
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 1
self.check_email(
messages[0],
user["email"],
user["name"],
"3 new activities from CKAN",
)
def test_no_notifications_if_dashboard_visited(self, mail_server):
pkg = factories.Dataset()
user = factories.User(activity_streams_email_notifications=True)
helpers.call_action(
"follow_dataset", {"user": user["name"]}, id=pkg["id"]
)
helpers.call_action("package_update", id=pkg["id"], notes="updated")
new_activities_count = helpers.call_action(
"dashboard_new_activities_count",
{"user": user["name"]},
id=pkg["id"],
)
assert new_activities_count == 1
helpers.call_action(
"dashboard_mark_activities_old",
{"user": user["name"]},
id=pkg["id"],
)
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 0
def test_notifications_disabled_by_default(self):
user = factories.User()
assert not user["activity_streams_email_notifications"]
def test_no_emails_when_notifications_disabled(self, mail_server):
pkg = factories.Dataset()
user = factories.User()
helpers.call_action(
"follow_dataset", {"user": user["name"]}, id=pkg["id"]
)
helpers.call_action("package_update", id=pkg["id"], notes="updated")
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 0
new_activities_count = helpers.call_action(
"dashboard_new_activities_count",
{"user": user["name"]},
id=pkg["id"],
)
assert new_activities_count == 1
@pytest.mark.ckan_config(
"ckan.activity_streams_email_notifications", False
)
def test_send_email_notifications_feature_disabled(self, mail_server):
with pytest.raises(tk.ValidationError):
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 0
@pytest.mark.ckan_config("ckan.email_notifications_since", ".000001")
def test_email_notifications_since(self, mail_server):
pkg = factories.Dataset()
user = factories.User(activity_streams_email_notifications=True)
helpers.call_action(
"follow_dataset", {"user": user["name"]}, id=pkg["id"]
)
helpers.call_action("package_update", id=pkg["id"], notes="updated")
time.sleep(0.01)
helpers.call_action("send_email_notifications")
messages = mail_server.get_smtp_messages()
assert len(messages) == 0
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("non_clean_db", "with_plugins")
class TestDashboardMarkActivitiesOld(object):
def test_mark_as_old_some_activities_by_a_followed_user(self):
# do some activity that will show up on user's dashboard
user = factories.User()
# now some activity that is "new" because it is by a followed user
followed_user = factories.User()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **followed_user
)
dataset = factories.Dataset(user=followed_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update",
context={"user": followed_user["name"]},
**dataset,
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 3
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new user", False),
("new user", True),
("new package", True),
("changed package", True),
]
helpers.call_action(
"dashboard_mark_activities_old", context={"user": user["name"]}
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 0
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new user", False),
("new user", False),
("new package", False),
("changed package", False),
]
@pytest.mark.ckan_config("ckan.plugins", "activity")
@pytest.mark.usefixtures("non_clean_db", "with_plugins")
class TestFollow:
@pytest.mark.usefixtures("app")
def test_follow_dataset_no_activity(self):
user = factories.User()
dataset = factories.Dataset()
_clear_activities()
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, id=dataset["id"]
)
assert not helpers.call_action("user_activity_list", id=user["id"])
@pytest.mark.usefixtures("app")
def test_follow_group_no_activity(self):
user = factories.User()
group = factories.Group()
_clear_activities()
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
assert not helpers.call_action("user_activity_list", id=user["id"])
@pytest.mark.usefixtures("app")
def test_follow_organization_no_activity(self):
user = factories.User()
org = factories.Organization()
_clear_activities()
helpers.call_action(
"follow_group", context={"user": user["name"]}, **org
)
assert not helpers.call_action("user_activity_list", id=user["id"])
@pytest.mark.usefixtures("app")
def test_follow_user_no_activity(self):
user = factories.User()
user2 = factories.User()
_clear_activities()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **user2
)
assert not helpers.call_action("user_activity_list", id=user["id"])
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import abc
import logging
import typing
from decimal import Decimal
from .context import Context
from .group import Group
from .token import Token
from .tokenvalue import TokenValue
from .tradeexecutor import TradeExecutor
from .wallet import Wallet
# # 🥭 WalletBalancer
#
# This notebook deals with balancing a wallet after processing liquidations, so that it has
# appropriate funds for the next liquidation.
#
# We want to be able to maintain liquidity in our wallet. For instance if there are a lot of
# ETH shorts being liquidated, we'll need to supply ETH, but what do we do when we run out
# of ETH and there are still liquidations to perform?
#
# We 'balance' our wallet tokens, buying or selling or swapping them as required.
#
# # 🥭 Target Balances
#
# To be able to maintain the right balance of tokens, we need to know what the right
# balance is. Different people have different opinions, and we don't all have the same
# value in our liquidator accounts, so we need a way to allow whoever is running the
# liquidator to specify what the target token balances should be.
#
# There are two possible approaches to specifying the target value:
# * A 'fixed' value, like 10 ETH
# * A 'percentage' value, like 20% ETH
#
# Percentage is trickier, because to come up with the actual target we need to take into
# account the wallet value and the current price of the target token.
#
# The way this all hangs together is:
# * A parser parses string values (probably from a command-line) into `TargetBalance`
# objects.
# * There are two types of `TargetBalance` objects - `FixedTargetBalance` and
# `PercentageTargetBalance`.
# * To get the actual `TokenValue` for balancing, the `TargetBalance` must be 'resolved'
# by calling `resolve()` with the appropriate token price and wallet value.
#
# # 🥭 TargetBalance class
#
# This is the abstract base class for our target balances, to allow them to be treated polymorphically.
#
class TargetBalance(metaclass=abc.ABCMeta):
def __init__(self, token: Token):
self.token = token
@abc.abstractmethod
def resolve(self, current_price: Decimal, total_value: Decimal) -> TokenValue:
raise NotImplementedError("TargetBalance.resolve() is not implemented on the base type.")
def __repr__(self) -> str:
return f"{self}"
# # 🥭 FixedTargetBalance class
#
# This is the simple case, where the `FixedTargetBalance` object contains enough information on its own to build the resolved `TokenValue` object.
#
class FixedTargetBalance(TargetBalance):
def __init__(self, token: Token, value: Decimal):
super().__init__(token)
self.value = value
def resolve(self, current_price: Decimal, total_value: Decimal) -> TokenValue:
return TokenValue(self.token, self.value)
def __str__(self) -> str:
return f"""« FixedTargetBalance [{self.value} {self.token.name}] »"""
# # 🥭 PercentageTargetBalance
#
# This is the more complex case, where the target is a percentage of the total wallet
# balance.
#
# So, to actually calculate the right target, we need to know the total wallet balance and
# the current price. Once we have those the calculation is just:
# >
# > _wallet fraction_ is _percentage_ of _wallet value_
# >
# > _target balance_ is _wallet fraction_ divided by _token price_
#
class PercentageTargetBalance(TargetBalance):
def __init__(self, token: Token, target_percentage: Decimal):
super().__init__(token)
self.target_fraction = target_percentage / 100
def resolve(self, current_price: Decimal, total_value: Decimal) -> TokenValue:
target_value = total_value * self.target_fraction
target_size = target_value / current_price
return TokenValue(self.token, target_size)
def __str__(self) -> str:
return f"""« PercentageTargetBalance [{self.target_fraction * 100}% {self.token.name}] »"""
# # 🥭 TargetBalanceParser class
#
# The `TargetBalanceParser` takes a string like "BTC:0.2" or "ETH:20%" and returns the appropriate TargetBalance object.
#
# This has a lot of manual error handling because it's likely the error messages will be seen by people and so we want to be as clear as we can what specifically is wrong.
#
class TargetBalanceParser:
def __init__(self, tokens: typing.List[Token]):
self.tokens = tokens
def parse(self, to_parse: str) -> TargetBalance:
try:
token_name, value = to_parse.split(":")
except Exception as exception:
raise Exception(f"Could not parse target balance '{to_parse}'") from exception
token = Token.find_by_symbol(self.tokens, token_name)
# The value we have may be an int (like 27), a fraction (like 0.1) or a percentage
# (like 25%). In all cases we want the number as a number, but we also want to know if
# we have a percent or not
values = value.split("%")
numeric_value_string = values[0]
try:
numeric_value = Decimal(numeric_value_string)
except Exception as exception:
raise Exception(
f"Could not parse '{numeric_value_string}' as a decimal number. It should be formatted as a decimal number, e.g. '2.345', with no surrounding spaces.") from exception
if len(values) > 2:
raise Exception(
f"Could not parse '{value}' as a decimal percentage. It should be formatted as a decimal number followed by a percentage sign, e.g. '30%', with no surrounding spaces.")
if len(values) == 1:
return FixedTargetBalance(token, numeric_value)
else:
return PercentageTargetBalance(token, numeric_value)
# # 🥭 sort_changes_for_trades function
#
# It's important to process SELLs first, so we have enough funds in the quote balance for the
# BUYs.
#
# It looks like this function takes size into account, but it doesn't really - 2 ETH is
# smaller than 1 BTC (for now?) but the value 2 will be treated as bigger than 1. We don't
# really care that much as long as we have SELLs before BUYs. (We could, later, take price
# into account for this sorting but we don't need to now so we don't.)
#
def sort_changes_for_trades(changes: typing.List[TokenValue]) -> typing.List[TokenValue]:
return sorted(changes, key=lambda change: change.value)
# # 🥭 calculate_required_balance_changes function
#
# Takes a list of current balances, and a list of desired balances, and returns the list of changes required to get us to the desired balances.
#
def calculate_required_balance_changes(current_balances: typing.List[TokenValue], desired_balances: typing.List[TokenValue]) -> typing.List[TokenValue]:
changes: typing.List[TokenValue] = []
for desired in desired_balances:
current = TokenValue.find_by_token(current_balances, desired.token)
change = TokenValue(desired.token, desired.value - current.value)
changes += [change]
return changes
# # 🥭 FilterSmallChanges class
#
# Allows us to filter out changes that aren't worth the effort.
#
# For instance, if our desired balance requires changing less than 1% of our total balance,
# it may not be worth bothering with right not.
#
# Calculations are based on the total wallet balance, rather than the magnitude of the
# change per-token, because a change of 0.01 of one token may be worth more than a change
# of 10 in another token. Normalising values to our wallet balance makes these changes
# easier to reason about.
#
class FilterSmallChanges:
def __init__(self, action_threshold: Decimal, balances: typing.List[TokenValue], prices: typing.List[TokenValue]):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.prices: typing.Dict[str, TokenValue] = {}
total = Decimal(0)
for balance in balances:
price = TokenValue.find_by_token(prices, balance.token)
self.prices[f"{price.token.mint}"] = price
total += price.value * balance.value
self.total_balance = total
self.action_threshold_value = total * action_threshold
self.logger.info(
f"Wallet total balance of {total} gives action threshold value of {self.action_threshold_value}")
def allow(self, token_value: TokenValue) -> bool:
price = self.prices[f"{token_value.token.mint}"]
value = price.value * token_value.value
absolute_value = value.copy_abs()
result = absolute_value > self.action_threshold_value
self.logger.info(
f"Value of {token_value.token.name} trade is {absolute_value}, threshold value is {self.action_threshold_value}. Is this worth doing? {result}.")
return result
# # 🥭 WalletBalancers
#
# We want two types of this class:
# * A 'null' implementation that adheres to the interface but doesn't do anything, and
# * A 'live' implementation that actually does the balancing.
#
# This allows us to have code that implements logic including wallet balancing, without
# having to worry about whether the user wants to re-balance or not - we can just plug
# in the 'null' variant and the logic all still works.
#
# To have this work we define an abstract base class `WalletBalancer` which defines the
# interface, then a `NullWalletBalancer` which adheres to this interface but doesn't
# perform any action, and finally the real `LiveWalletBalancer` which can perform the
# balancing action.
#
# # 🥭 WalletBalancer class
#
# This is the abstract class which defines the interface.
#
class WalletBalancer(metaclass=abc.ABCMeta):
@abc.abstractmethod
def balance(self, prices: typing.List[TokenValue]):
raise NotImplementedError("WalletBalancer.balance() is not implemented on the base type.")
# # 🥭 NullWalletBalancer class
#
# This is the 'empty', 'no-op', 'dry run' wallet balancer which doesn't do anything but
# which can be plugged into algorithms that may want balancing logic.
#
class NullWalletBalancer(WalletBalancer):
def balance(self, prices: typing.List[TokenValue]):
pass
# # 🥭 LiveWalletBalancer class
#
# This is the high-level class that does much of the work.
#
class LiveWalletBalancer(WalletBalancer):
def __init__(self, context: Context, wallet: Wallet, group: Group, trade_executor: TradeExecutor, action_threshold: Decimal, tokens: typing.List[Token], target_balances: typing.List[TargetBalance]):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.context: Context = context
self.wallet: Wallet = wallet
self.group: Group = group
self.trade_executor: TradeExecutor = trade_executor
self.action_threshold: Decimal = action_threshold
self.tokens: typing.List[Token] = tokens
self.target_balances: typing.List[TargetBalance] = target_balances
def balance(self, prices: typing.List[TokenValue]):
padding = "\n "
def balances_report(balances) -> str:
return padding.join(list([f"{bal}" for bal in balances]))
current_balances = self._fetch_balances()
total_value = Decimal(0)
for bal in current_balances:
price = TokenValue.find_by_token(prices, bal.token)
value = bal.value * price.value
total_value += value
self.logger.info(f"Starting balances: {padding}{balances_report(current_balances)} - total: {total_value}")
resolved_targets: typing.List[TokenValue] = []
for target in self.target_balances:
price = TokenValue.find_by_token(prices, target.token)
resolved_targets += [target.resolve(price.value, total_value)]
balance_changes = calculate_required_balance_changes(current_balances, resolved_targets)
self.logger.info(f"Full balance changes: {padding}{balances_report(balance_changes)}")
dont_bother = FilterSmallChanges(self.action_threshold, current_balances, prices)
filtered_changes = list(filter(dont_bother.allow, balance_changes))
self.logger.info(f"Filtered balance changes: {padding}{balances_report(filtered_changes)}")
if len(filtered_changes) == 0:
self.logger.info("No balance changes to make.")
return
sorted_changes = sort_changes_for_trades(filtered_changes)
self._make_changes(sorted_changes)
updated_balances = self._fetch_balances()
self.logger.info(f"Finishing balances: {padding}{balances_report(updated_balances)}")
def _make_changes(self, balance_changes: typing.List[TokenValue]):
self.logger.info(f"Balance changes to make: {balance_changes}")
quote = self.group.shared_quote_token.token.symbol
for change in balance_changes:
market_symbol = f"{change.token.symbol}/{quote}"
if change.value < 0:
self.trade_executor.sell(market_symbol, change.value.copy_abs())
else:
self.trade_executor.buy(market_symbol, change.value.copy_abs())
def _fetch_balances(self) -> typing.List[TokenValue]:
balances: typing.List[TokenValue] = []
for token in self.tokens:
balance = TokenValue.fetch_total_value(self.context, self.wallet.address, token)
balances += [balance]
return balances
|
# Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import math
import re
from neutron_lib import exceptions
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.linux import ip_lib
from neutron.common import constants
from neutron.services.qos import qos_consts
LOG = logging.getLogger(__name__)
ROOT_QDISC = "root"
INGRESS_QDISC = "ingress"
INGRESS_QDISC_HEX = "ffff:fff1"
INGRESS_QDISC_HANDLE = "ffff:"
QDISC_TYPE_HTB = "htb"
QDISC_TYPE_DEFAULT = "pfifo_fast"
SI_BASE = 1000
IEC_BASE = 1024
BW_LIMIT_UNIT = "kbit" # kilobits per second in tc's notation
BURST_UNIT = "kbit" # kilobits in tc's notation
# Those are RATES (bits per second) and SIZE (bytes) unit names from tc manual
UNITS = {
"k": 1,
"m": 2,
"g": 3,
"t": 4
}
class InvalidUnit(exceptions.NeutronException):
message = _("Unit name '%(unit)s' is not valid.")
class InvalidPolicyClassParameters(exceptions.NeutronException):
message = _("'rate' or 'ceil' parameters must be defined")
def kilobits_to_bits(value, base):
return value * base
def bits_to_kilobits(value, base):
return int(math.ceil(float(value) / base))
def bytes_to_bits(value):
return value * 8
def bits_to_bytes(value):
return int(value / 8)
def convert_to_kilo(value, base):
value = value.lower()
if "bit" in value:
input_in_bits = True
value = value.replace("bit", "")
else:
input_in_bits = False
value = value.replace("b", "")
# if it is now bare number then it is in bits, so we return it simply
if value.isdigit():
value = int(value)
if input_in_bits:
return bits_to_kilobits(value, base)
else:
bits_value = bytes_to_bits(value)
return bits_to_kilobits(bits_value, base)
unit = value[-1:]
if unit not in UNITS.keys():
raise InvalidUnit(unit=unit)
val = int(value[:-1])
if input_in_bits:
bits_value = val * (base ** UNITS[unit])
else:
bits_value = bytes_to_bits(val * (base ** UNITS[unit]))
return bits_to_kilobits(bits_value, base)
class TcCommand(ip_lib.IPDevice):
def _execute_tc_cmd(self, cmd, **kwargs):
cmd = ['tc'] + cmd
ip_wrapper = ip_lib.IPWrapper(self.namespace)
return ip_wrapper.netns.execute(cmd, run_as_root=True, **kwargs)
@staticmethod
def get_ingress_qdisc_burst_value(bw_limit, burst_limit):
"""Return burst value used in ingress qdisc.
If burst value is not specified given than it will be set to default
rate to ensure that limit for TCP traffic will work well
"""
if not burst_limit:
return int(float(bw_limit) * qos_consts.DEFAULT_BURST_RATE)
return burst_limit
def set_bw(self, max, burst, min, direction):
max = kilobits_to_bits(max, SI_BASE) if max else max
burst = (bits_to_bytes(kilobits_to_bits(burst, IEC_BASE)) if burst
else burst)
min = kilobits_to_bits(min, SI_BASE) if min else min
if direction == constants.EGRESS_DIRECTION:
return self._set_ingress_bw(max, burst, min)
else:
raise NotImplementedError()
def delete_bw(self, direction):
if direction == constants.EGRESS_DIRECTION:
return self._delete_ingress()
else:
raise NotImplementedError()
def get_limits(self, direction):
if direction == constants.EGRESS_DIRECTION:
return self._get_ingress_limits()
else:
raise NotImplementedError()
def _set_ingress_bw(self, max, burst, min):
self._add_policy_qdisc(INGRESS_QDISC, INGRESS_QDISC_HANDLE)
self._configure_ifb(max=max, burst=burst, min=min)
def _delete_ingress(self):
ifb = self._find_mirrored_ifb()
if ifb:
self._del_ifb(ifb)
self._del_policy_qdisc(INGRESS_QDISC)
def _add_policy_qdisc(self, parent, handle, qdisc_type=None, dev=None):
def check_qdisc(qdisc, qdisc_type, handle, parent, device):
if not qdisc or qdisc.get('type') == QDISC_TYPE_DEFAULT:
return False
elif ((qdisc_type and (qdisc.get('type') != qdisc_type or
qdisc.get('handle') != handle)) or
(not qdisc_type and qdisc.get('handle') != handle)):
self._del_policy_qdisc(parent, dev=device)
return False
return True
device = str(dev) if dev else self.name
qdisc = self._show_policy_qdisc(parent, dev=device)
if check_qdisc(qdisc, qdisc_type, handle, parent, device):
return
cmd = ['qdisc', 'add', 'dev', device]
if parent in [ROOT_QDISC, INGRESS_QDISC]:
cmd += [parent]
else:
cmd += ['parent', parent]
cmd += ['handle', handle]
if qdisc_type:
cmd += [qdisc_type]
LOG.debug("Add policy qdisc cmd: %s", cmd)
return self._execute_tc_cmd(cmd)
def _del_policy_qdisc(self, parent, dev=None):
device = str(dev) if dev else self.name
if not self._show_policy_qdisc(parent, dev=device):
return
cmd = ['qdisc', 'del', 'dev', device]
if parent in [ROOT_QDISC, INGRESS_QDISC]:
cmd += [parent]
else:
cmd += ['parent', parent]
LOG.debug("Delete policy qdisc cmd: %s", cmd)
self._execute_tc_cmd(cmd)
def _list_policy_qdisc(self, dev=None):
device = str(dev) if dev else self.name
cmd = ['qdisc', 'show', 'dev', device]
LOG.debug("List policy qdisc cmd: %s", cmd)
result = self._execute_tc_cmd(cmd)
pat = re.compile(r'qdisc (\w+) (\w+\:) (root|parent (\w*\:\w+))')
qdiscs = collections.defaultdict(dict)
for match in (pat.match(line) for line in result.splitlines()
if pat.match(line)):
qdisc = {}
qdisc['type'] = match.groups()[0]
qdisc['handle'] = match.groups()[1]
if match.groups()[2] == ROOT_QDISC:
qdisc['parentid'] = ROOT_QDISC
else:
qdisc['parentid'] = match.groups()[3]
qdisc_ref = INGRESS_QDISC if qdisc['parentid'] == \
INGRESS_QDISC_HEX else qdisc['parentid']
qdiscs[qdisc_ref] = qdisc
LOG.debug("List of policy qdiscs: %s", qdiscs)
return qdiscs
def _show_policy_qdisc(self, parent, dev=None):
device = str(dev) if dev else self.name
return self._list_policy_qdisc(device).get(parent)
def _add_policy_class(self, parent, classid, qdisc_type, rate=None,
ceil=None, burst=None, dev=None):
"""Add new TC class"""
device = str(dev) if dev else self.name
policy = self._show_policy_class(classid, dev=device)
if policy:
rate = (kilobits_to_bits(policy['rate'], SI_BASE) if not rate
else rate)
ceil = (kilobits_to_bits(policy['ceil'], SI_BASE) if not ceil
else ceil)
burst = (bits_to_bytes(kilobits_to_bits(policy['burst'], IEC_BASE))
if not burst else burst)
if not rate and not ceil:
raise InvalidPolicyClassParameters
if not rate:
rate = ceil
cmd = self._cmd_policy_class(classid, qdisc_type, rate, device, parent,
ceil, burst)
LOG.debug("Add/replace policy class cmd: %s", cmd)
return self._execute_tc_cmd(cmd)
def _cmd_policy_class(self, classid, qdisc_type, rate, device, parent,
ceil, burst):
cmd = ['class', 'replace', 'dev', device]
if parent:
cmd += ['parent', parent]
rate = 8 if rate < 8 else rate
cmd += ['classid', classid, qdisc_type, 'rate', rate]
if ceil:
ceil = rate if ceil < rate else ceil
cmd += ['ceil', ceil]
if burst:
cmd += ['burst', burst]
return cmd
def _list_policy_class(self, dev=None):
device = str(dev) if dev else self.name
cmd = ['class', 'show', 'dev', device]
result = self._execute_tc_cmd(cmd, check_exit_code=False)
if not result:
return {}
classes = collections.defaultdict(dict)
pat = re.compile(r'class (\S+) ([0-9a-fA-F]+\:[0-9a-fA-F]+) '
r'(root|parent ([0-9a-fA-F]+\:[0-9a-fA-F]+))'
r'( prio ([0-9]+))* rate (\w+) ceil (\w+) burst (\w+)'
r' cburst (\w+)')
for match in (pat.match(line) for line in result.splitlines()
if pat.match(line)):
_class = {}
_class['type'] = match.groups()[0]
classid = match.groups()[1]
if match.groups()[2] == ROOT_QDISC:
_class['parentid'] = None
else:
_class['parentid'] = match.groups()[3]
_class['prio'] = match.groups()[5]
_class['rate'] = convert_to_kilo(match.groups()[6], SI_BASE)
_class['ceil'] = convert_to_kilo(match.groups()[7], SI_BASE)
_class['burst'] = convert_to_kilo(match.groups()[8], IEC_BASE)
_class['cburst'] = convert_to_kilo(match.groups()[9], IEC_BASE)
classes[classid] = _class
LOG.debug("Policy classes: %s", classes)
return classes
def _show_policy_class(self, classid, dev=None):
device = str(dev) if dev else self.name
return self._list_policy_class(device).get(classid)
def _add_policy_filter(self, parent, protocol, filter, dev=None,
action=None):
"""Add a new filter"""
device = str(dev) if dev else self.name
cmd = ['filter', 'add', 'dev', device, 'parent', parent]
cmd += ['protocol'] + protocol
cmd += filter
if action:
cmd += ['action'] + action
LOG.debug("Add policy filter cmd: %s", cmd)
return self._execute_tc_cmd(cmd)
def _list_policy_filters(self, parent, dev=None):
"""Returns the output of showing the filters in a device"""
device = dev if dev else self.name
cmd = ['filter', 'show', 'dev', device, 'parent', parent]
LOG.debug("List policy filter cmd: %s", cmd)
return self._execute_tc_cmd(cmd)
def _add_ifb(self, dev_name):
"""Create a new IFB device"""
ns_ip = ip_lib.IPWrapper(namespace=self.namespace)
if self._find_mirrored_ifb():
ifb = ip_lib.IPDevice(dev_name, namespace=self.namespace)
if not ifb.exists():
self._del_ifb(dev_name=dev_name)
ifb = ns_ip.add_ifb(dev_name)
else:
self._del_ifb(dev_name=dev_name)
ifb = ns_ip.add_ifb(dev_name)
ifb.disable_ipv6()
ifb.link.set_up()
return ifb
def _del_ifb(self, dev_name):
"""Delete a IFB device"""
ns_ip = ip_lib.IPWrapper(namespace=self.namespace)
devices = ns_ip.get_devices(exclude_loopback=True)
for device in (dev for dev in devices if dev.name == dev_name):
ns_ip.del_ifb(device.name)
def _find_mirrored_ifb(self):
"""Return the name of the IFB device where the traffic is mirrored"""
ifb_name = self.name.replace("tap", "ifb")
ifb = ip_lib.IPDevice(ifb_name, namespace=self.namespace)
if not ifb.exists():
return None
return ifb_name
def _configure_ifb(self, max=None, burst=None, min=None):
ifb = self._find_mirrored_ifb()
if not ifb:
ifb = self.name.replace("tap", "ifb")
self._add_ifb(ifb)
protocol = ['all', 'u32']
filter = ['match', 'u32', '0', '0']
action = ['mirred', 'egress', 'redirect', 'dev', '%s' % ifb]
self._add_policy_filter(INGRESS_QDISC_HANDLE, protocol, filter,
dev=self.name, action=action)
self._add_policy_qdisc(ROOT_QDISC, "1:", qdisc_type=QDISC_TYPE_HTB,
dev=ifb)
self._add_policy_class("1:", "1:1", QDISC_TYPE_HTB, rate=min,
ceil=max, burst=burst, dev=ifb)
def _get_ingress_limits(self):
ifb = self._find_mirrored_ifb()
if ifb:
policy = self._show_policy_class("1:1", dev=ifb)
if policy:
return policy['ceil'], policy['burst'], policy['rate']
return None, None, None
|
<filename>src/email_verification/views.py
import base64
import io
import os
import re
import json
import time
from datetime import datetime
import qrcode
import requests
from django.http import (
JsonResponse,
HttpResponse,
HttpResponseRedirect,
HttpResponseBadRequest,
)
from django.template import loader
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from .forms import EmailForm
from .models import Verification, SessionState
import logging
logger = logging.getLogger(__name__)
AGENT_URL = os.environ.get("AGENT_URL")
API_KEY = os.environ.get("AGENT_ADMIN_API_KEY", "")
def index(request):
template = loader.get_template("index.html")
return HttpResponse(template.render({"form": EmailForm()}, request))
def submit(request):
if request.method == "POST":
form = EmailForm(request.POST)
if form.is_valid():
response = requests.post(f"{AGENT_URL}/connections/create-invitation",headers={"x-api-key": API_KEY})
invite = response.json()
connection_id = invite["connection_id"]
invite_url = invite["invitation_url"]
form.instance.connection_id = connection_id
form.instance.invite_url = invite_url
form.save()
email = form.instance.email
redirect_url = f"{os.environ.get('SITE_URL')}/verify/{connection_id}"
template = loader.get_template("email.html")
email_html = template.render({"redirect_url": redirect_url}, request)
send_mail(
"BC Email Verification Invite",
(
"Follow this link to connect with our "
f"verification service: {redirect_url}"
),
"Email Verification Service <<EMAIL>>",
[email],
fail_silently=False,
html_message=email_html,
)
SessionState.objects.get_or_create(
connection_id=connection_id, state="invite-created"
)
return HttpResponseRedirect(f"/thanks?email={form.instance.email}")
else:
return HttpResponseBadRequest()
def thanks(request):
try:
email = request.GET["email"]
except Exception:
return HttpResponseBadRequest()
template = loader.get_template("thanks.html")
return HttpResponse(template.render({"email": email}, request))
def state(request, connection_id):
state = SessionState.objects.get(connection_id=connection_id)
resp = {"state": state.state}
try:
attendee = Verification.objects.get(connection_id=connection_id)
resp["email"] = attendee.email
except Exception:
pass
return JsonResponse(resp)
def in_progress(request, connection_id):
state = SessionState.objects.get(connection_id=connection_id)
template = loader.get_template("in_progress.html")
return HttpResponse(
template.render({"connection_id": connection_id, state: state.state}, request)
)
def verify_redirect(request, connection_id):
verification = get_object_or_404(Verification, connection_id=connection_id)
invitation_url = verification.invite_url
didcomm_url = re.sub(r"^https?:\/\/\S*\?", "didcomm://invite?", invitation_url)
template = loader.get_template("verify.html")
stream = io.BytesIO()
qr_png = qrcode.make(invitation_url)
qr_png.save(stream, "PNG")
qr_png_b64 = base64.b64encode(stream.getvalue()).decode("utf-8")
return HttpResponse(
template.render(
{
"qr_png": qr_png_b64,
"didcomm_url": didcomm_url,
"invitation_url": invitation_url,
"connection_id": verification.connection_id,
},
request,
)
)
@csrf_exempt
def webhooks(request, topic):
message = json.loads(request.body)
logger.info(f"webhook recieved - topic: {topic} body: {request.body}")
if topic == "connections" and message["state"] == "request":
connection_id = message["connection_id"]
SessionState.objects.filter(connection_id=connection_id).update(
state="connection-request-received"
)
# Handle new invites, send cred offer
if topic == "connections" and message["state"] == "response":
credential_definition_id = cache.get("credential_definition_id")
assert credential_definition_id is not None
connection_id = str(message["connection_id"])
SessionState.objects.filter(connection_id=connection_id).update(
state="connection-formed"
)
time.sleep(5)
logger.info(
f"Sending credential offer for connection {connection_id} "
f"and credential definition {credential_definition_id}"
)
verification = get_object_or_404(Verification, connection_id=connection_id)
request_body = {
"auto_issue": True,
"connection_id": connection_id,
"cred_def_id": credential_definition_id,
"credential_preview": {
"attributes": [
{
"name": "email",
"value": verification.email,
"mime-type": "text/plain",
},
{
"name": "time",
"value": str(datetime.utcnow()),
"mime-type": "text/plain",
},
]
},
}
try:
response = requests.post(
f"{AGENT_URL}/issue-credential/send-offer",headers={"x-api-key": API_KEY}, json=request_body
)
response.raise_for_status()
except Exception:
logger.exception("Error sending credential offer:")
SessionState.objects.filter(connection_id=connection_id).update(
state="offer-error"
)
else:
SessionState.objects.filter(connection_id=connection_id).update(
state="offer-sent"
)
return HttpResponse()
# Handle completion of credential issue
if topic == "issue_credential" and message["state"] == "credential_issued":
credential_exchange_id = message["credential_exchange_id"]
connection_id = message["connection_id"]
logger.info(
"Completed credential issue for credential exchange "
f"{credential_exchange_id} and connection {connection_id}"
)
SessionState.objects.filter(connection_id=connection_id).update(
state="credential-issued"
)
return HttpResponse()
logger.warning(f"Webhook for topic {topic} and state {message['state']} is not implemented")
return HttpResponse()
|
# coding=utf-8
# Copyright (c) 2020 Alibaba PAI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, text_a, text_b=None, label=None, guid=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.text_a = text_a
self.text_b = text_b
self.label = label
self.guid = guid
class InputFeatures(object):
"""A single set of features of data for text classification/match."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.seq_length = seq_length
self.label_id = label_id
self.guid = guid
class LabelingFeatures(object):
"""A single set of features of data for sequence labeling."""
def __init__(self, input_ids, input_mask, segment_ids, all_tokens, label_ids,
tok_to_orig_index, seq_length=None, guid=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.all_tokens = all_tokens
self.seq_length = seq_length
self.label_ids = label_ids
self.tok_to_orig_index = tok_to_orig_index
self.guid = guid
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def bert_cls_convert_example_to_feature(example, tokenizer, max_seq_length, label_map=None):
""" Convert `InputExample` into `InputFeature` For classification task
Args:
example (`InputExample`): an input example
tokenizer (`BertTokenizer`): BERT Tokenizer
max_seq_length (`int`): Maximum sequence length while truncating
label_map (`dict`): a map from label_value --> label_idx,
"regression" task if it is None else "classification"
Returns:
feature (`InputFeatures`): an input feature
"""
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
seq_length = len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_type = "classification" if label_map else None
if label_type == "classification":
label_id = label_map[example.label]
else:
try:
label_id = float(example.label)
except:
label_id = None
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
seq_length=seq_length,
guid=example.guid)
return feature
def bert_labeling_convert_example_to_feature(example, tokenizer, max_seq_length, label_map=None):
""" Convert `InputExample` into `InputFeature` For sequence labeling task
Args:
example (`InputExample`): an input example
tokenizer (`BertTokenizer`): BERT Tokenizer
max_seq_length (`int`): Maximum sequence length while truncating
label_map (`dict`): a map from label_value --> label_idx,
"regression" task if it is None else "classification"
Returns:
feature (`InputFeatures`): an input feature
"""
content_tokens = example.text_a.split(" ")
if example.label is not None:
label_tags = example.label.split(" ")
else:
label_tags = None
all_tokens = ["[CLS]"]
all_labels = [""]
tok_to_orig_index = [-100]
for i, token in enumerate(content_tokens):
sub_tokens = tokenizer.tokenize(token)
all_tokens.extend(sub_tokens)
tok_to_orig_index.extend([i] * len(sub_tokens))
if label_tags is None:
all_labels.extend(["" for _ in range(len(sub_tokens))])
else:
all_labels.extend([label_tags[i] for _ in range(len(sub_tokens))])
all_tokens = all_tokens[:max_seq_length - 1]
all_labels = all_labels[:max_seq_length - 1]
all_tokens.append("[SEP]")
all_labels.append("")
tok_to_orig_index.append(-100)
input_ids = tokenizer.convert_tokens_to_ids(all_tokens)
segment_ids = [0] * len(input_ids)
input_mask = [1] * len(input_ids)
label_ids = [label_map[label] if label else -100 for label in all_labels]
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(-100)
feature = LabelingFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
all_tokens=all_tokens,
seq_length=max_seq_length,
tok_to_orig_index=tok_to_orig_index,
guid=example.guid)
return feature |
# Copyright 2018 BigBitBus Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, time, pickle, boto3, botocore
from os.path import join, getsize, exists
from botocore.client import ClientError
from traceback import format_exc
from pprint import pprint
from exercizer import Exercizer
class AWSExercizer(Exercizer):
def __init__(
self,
env_credentials= {},
region_name ='us-east-1',
container_name='blobtester',
fileSizeskb = [],
localDir= '/tmp/localDir',
storageClass = 'STANDARD',
numIters = 1):
Exercizer.__init__(
self,
fileSizeskb,
localDir,
numIters)
if region_name == 'us-east-1': # aws quirk - you can't specify us-east-1 explicitly
self.storage_client = boto3.client('s3',
aws_access_key_id = os.environ.get(env_credentials['account']),
aws_secret_access_key = os.environ.get(env_credentials['secret']),
)
else:
self.storage_client = boto3.client('s3',
aws_access_key_id = os.environ.get(env_credentials['account']),
aws_secret_access_key = os.environ.get(env_credentials['secret']),
region_name=region_name)
self.region_name = region_name
self.container_name = container_name
self.storageClass = storageClass
def UploadObjectsToContainer(self):
# create bucket if it does not exist
try:
self.storage_client.head_bucket(Bucket=self.container_name)
except ClientError:
if self.region_name != 'us-east-1':
container = self.storage_client.create_bucket(
Bucket=self.container_name,
CreateBucketConfiguration={
'LocationConstraint': self.region_name
})
else:
container = self.storage_client.create_bucket(
Bucket=self.container_name)
list_uploadData = []
for eachFile in self.manifest:
filePath, intfilesize = eachFile
print "U",
print eachFile
self.makeOneRandomBinFile(filePath, intfilesize)
self.startTimer()
try:
self.storage_client.upload_file(
filePath, self.container_name, filePath,
ExtraArgs = {
'StorageClass': self.storageClass}
)
list_uploadData.append(
(self.endTimer(), getsize(filePath), 'aws_upload'))
except:
print ('Failure uploading {}'.format(filePath))
print (format_exc())
self.endTimer()
os.remove(filePath)
return list_uploadData
def ListObjectsInContainer(self):
'''
Return generator with the list of blobs
'''
objList = self.storage_client.list_objects_v2(
Bucket=self.container_name)
if 'Contents' in objList:
return objList['Contents']
else:
return []
def DownloadObjectsFromContainer(self):
if not exists(self.localDir):
os.makedirs(self.localDir)
list_downloadData = []
blobList = self.ListObjectsInContainer()
for aBlob in blobList:
self.startTimer()
localPath = join(self.localDir,aBlob['Key'].split('/')[-1])
self.storage_client.download_file(
self.container_name,
aBlob['Key'], localPath)
blobsize = getsize(localPath)
list_downloadData.append(
(self.endTimer(), blobsize, 'aws_download'))
print "D",
print (localPath, blobsize)
os.remove(localPath)
self.startTimer()
self.storage_client.delete_object(
Bucket = self.container_name,
Key = aBlob['Key'])
list_downloadData.append((self.endTimer(), blobsize, 'aws_delete'))
return list_downloadData
def DeleteContainer(self, container_name='blobtester'):
self.startTimer()
blobList = self.ListObjectsInContainer()
deleteList = []
for aBlob in blobList:
deleteList.append({'Key': aBlob['Key']})
if len(deleteList) > 0:
self.storage_client.delete_objects(
Bucket = self.container_name,
Delete = { 'Objects': deleteList })
self.storage_client.delete_bucket(Bucket = self.container_name)
return {self.container_name: self.endTimer(), 'operation':'Deleted'}
if __name__=="__main__":
# These are names of the environmental variables (not the actual values)
env_credentials = {
'account': 'S3KEY',
'secret':'S3SECRET'
}
awsex = AWSExercizer(
env_credentials = env_credentials,
localDir = sys.argv[1],
storageClass = sys.argv[2],
numIters = sys.argv[3],
fileSizeskb = sys.argv[4:],
region_name ='ca-central-1') # us-east-1
pickle.dump(
awsex.UploadObjectsToContainer(),
open('/tmp/outputdata/objbench/aws_upload.pkl','wb'))
# Download
time.sleep(100)
pickle.dump(
awsex.DownloadObjectsFromContainer(),
open('/tmp/outputdata/objbench/aws_download.pkl','wb'))
# print "Delete bucket"
pprint(awsex.DeleteContainer()) |
import appdaemon.plugins.hass.hassapi as hass
class PresenceAggregator(hass.Hass):
"""App to determine current presence for a person.
Args:
trackers: List of device_trackers to use to determine presence. Each should use a unique method of tracking.
presence_select: The input_select object to change when home
home_consensus[optional]: Number of trackers that must agree before we switch from away to home. Defaults to 1.
away_consensus[optional]: Number of trackers that must agree before we switch from home to away. Defaults to all.
home_delay[optional]: Amount of time to delay before someone is listed as home. No delay is the default.
away_delay[optional]: Amount of time to delay before someone is listed as not home. No delay is the default.
home_state[optional]: The "Home" state to select when we are home (defaults to "Home")
away_state[optional]: The "Away" state to select when we are away (defaults to "Away")
Release Notes
Version 1.0:
Initial Version
"""
def initialize(self):
self.log("Starting Presence Aggregator.")
self.home_delay_timer = None
self.away_delay_timer = None
self.home_count = 0
self.away_count = 0
# Check required Params
if not "trackers" in self.args or not self.args["trackers"]:
self.error("No tracker(s) specified, doing nothing.")
return
# Also check if each one is a device_tracker object?
if not "presence_select" in self.args:
self.error("No input_select specified to change, doing nothing.")
return
# Handle optional/defaulted params
self.log("Args: %s" % self.args)
if "home_consensus" in self.args:
self.home_consensus = self.args["home_consensus"]
else:
self.home_consensus = 1
if "away_consensus" in self.args:
self.away_consensus = self.args["away_consensus"]
else:
self.away_consensus = 1
if "home_state" in self.args:
self.home_state = self.args["home_state"]
else:
self.home_state = "Home"
if "away_state" in self.args:
self.away_state = self.args["away_state"]
else:
self.away_state = "Away"
self.log("Number of trackers: %s" % len(self.args["trackers"]))
# Subscribe to trackers
for (tracker, tracker_home) in self.args["trackers"].items():
# Listen for trackers arriving home
self.log("Registering a Home arrival tracker for %s, arriving in %s state." % (tracker, tracker_home))
self.listen_state(self.tracker_is_home, tracker, new=tracker_home)
# Listen for trackers leaving home
self.log("Registering a Home departure tracker for %s, leaving %s state." % (tracker, tracker_home))
self.listen_state(self.tracker_is_away, tracker, old=tracker_home)
cur_state = self.get_state(entity = tracker, attribute = "state")
self.log("Current state of %s is %s." % (tracker, cur_state))
if cur_state == tracker_home:
self.home_count += 1
else:
self.away_count += 1
self.log("Away count is %d, Home count is %d." % (self.away_count, self.home_count))
if self.home_count >= self.home_consensus:
self.log("Home consensus reached at init (%s of %s, needed %s), setting presence to %s." %
(self.home_count, len(self.args["trackers"]), self.home_consensus, self.home_state))
self.select_option(self.args["presence_select"], self.home_state)
else:
self.log("Home consensus not reached at init (%s of %s, needed %s), setting presence to %s." %
(self.home_count, len(self.args["trackers"]), self.home_consensus, self.away_state))
self.select_option(self.args["presence_select"], self.away_state)
def tracker_is_home(self, entity, attribute, old, new, kwargs):
# If the old state was previously home as well, we should do nothing
if old == self.args["trackers"][entity]:
#self.log("Old and new stats are both home, not marking %s as home." % (entity))
return
# Change the away/home balance
self.away_count -= 1
self.home_count += 1
self.log("Tracker %s is now at Home, away count is %d, home count is %d." %
(entity, self.away_count, self.home_count))
# If are below the consensus for away, and we were running an away timer, cancel it
if self.away_count < self.away_consensus and self.away_delay_timer:
self.cancel_timer(self.away_delay_timer)
self.away_delay_timer = None
self.log("Dropped below Away Concensus %d < %d, cancelling timer." %
(self.away_count, self.away_consensus))
# If we are now above the consensus for home...
if self.home_count >= self.home_consensus:
# If we above consensus and not on a delay, change now
if "home_delay" not in self.args:
self.select_option(self.args["presence_select"], self.home_state)
# If we are on a delay and don't have a timer, start one
elif not self.home_delay_timer:
self.home_delay_timer = self.run_in(self.home_consensus_delay_callback, self.args["home_delay"])
def tracker_is_away(self, entity, attribute, old, new, kwargs):
# If the new state is home as well, we should do nothing
if new == self.args["trackers"][entity]:
return
# Change the away/home balance
self.away_count += 1
self.home_count -= 1
# If are below the consensus for away, and we were running an away timer, cancel it
self.log("Tracker %s is now not at Home, away count is %d, home count is %d." %
(entity, self.away_count, self.home_count))
if self.home_count < self.home_consensus and self.home_delay_timer:
self.cancel_timer(self.home_delay_timer)
self.home_delay_timer = None
self.log("Dropped below Home Concensus %d < %d, cancelling timer." %
(self.home_count, self.home_consensus))
# If we are now above the consensus for away...
if self.away_count >= self.away_consensus:
# If we above consensus and not on a delay, change now
if "away_delay" not in self.args:
self.select_option(self.args["presence_select"], self.away_state)
# If we are on a delay and don't have a timer, start one
elif not self.away_delay_timer:
self.away_delay_timer = self.run_in(self.away_consensus_delay_callback, self.args["away_delay"])
def home_consensus_delay_callback(self, kwargs):
self.home_delay_timer = None
if self.home_count >= self.home_consensus:
self.log("Home consensus delay completed, changing state to %s" % self.home_state)
self.select_option(self.args["presence_select"], self.home_state)
else:
self.log("Home consensus delay completed, but no longer have consensus, no change.")
def away_consensus_delay_callback(self, kwargs):
self.away_delay_timer = None
if self.away_count >= self.away_consensus:
self.log("Away consensus delay completed, changing state to %s" % self.away_state)
self.select_option(self.args["presence_select"], self.away_state)
else:
self.log("Away consensus delay completed, but no longer have consensus, no change.")
|
<filename>sc-project/SC101_Assignment3_DS/stanCodoshop.py
"""
File: stanCodoshop.py
----------------------------------------------
SC101_Assignment3
Adapted from <NAME>'s
Ghost assignment by <NAME>.
-----------------------------------------------
The code in the function solve(images) mainly uses a double for loop to manipulate each pixel on result.
Before processing result, another for loop is used to get pixels on (x, y) on each image stored in images,
and then a list pixels is created to store all the pixels get from (x, y) on each image.
Next, the function get_best_pixel() is used to pick the best pixel within pixels.
Finally, blank image result is filled by all the best pixel picked form steps above,
and then a new image with 'ghost' effect is created.
"""
import os
import sys
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): color distance between red, green, and blue pixel values
"""
dist = ((pixel.red-red)**2+(pixel.green-green)**2+(pixel.blue-blue)**2)**0.5
return dist
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
total_red = 0
total_green = 0
total_blue = 0
rgb = []
for pixel in pixels:
total_red += pixel.red
total_green += pixel.green
total_blue += pixel.blue
avg_red = total_red//len(pixels)
avg_green = total_green//len(pixels)
avg_blue = total_blue//len(pixels)
rgb.append(avg_red)
rgb.append(avg_green)
rgb.append(avg_blue)
return rgb
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
avg = get_average(pixels)
red = avg[0]
green = avg[1]
blue = avg[2]
min_dist = float('Inf')
for pixel in pixels:
dist = get_pixel_dist(pixel, red, green, blue)
if dist < min_dist:
best = pixel
min_dist = dist
return best
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
######## YOUR CODE STARTS HERE #########
# Write code to populate image and create the 'ghost' effect
for x in range(width):
for y in range(height):
result_p = result.get_pixel(x, y)
pixels = []
for image in images:
pixels.append(image.get_pixel(x, y))
best_p = get_best_pixel(pixels)
result_p.red = best_p.red
result_p.green = best_p.green
result_p.blue = best_p.blue
######## YOUR CODE ENDS HERE ###########
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main()
|
<reponame>harewei/reinforcement_learning<filename>agents/DDQN.py
# Double DQN. Compared to DQN, it uses q_network rather than target_q_network
# when selecting next action when extracting next q value.
import numpy as np
import os
import random
from agent import Agent
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow.keras.layers import Dense, Input, Activation
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tqdm import trange
from utils.logger import Logger
from utils.visualizer import visualize
class DDQN(Agent):
def __init__(self, config, env):
self.gamma = config["gamma"] # reward discount
self.learning_rate = config["learning_rate"]
self.memory_size = config["memory_size"]
self.epsilon = config["epsilon"] # Exploration rate
self.epsilon_min = config["epsilon_min"]
self.epsilon_decay = config["epsilon_decay"]
self.batch_size = config["batch_size"]
self.update_frequency = config["update_frequency"]
self.num_actions = env.action_space.n
self.num_states = env.observation_space.shape[0]
self.max_episode = config["max_episode"]
self.max_step = config["max_step"]
self.render_environment = config["render_environment"]
self.result_path = config["result_path"]
self.memory = []
self.env = env
self.q_network = self.build_agent()
self.target_q_network = self.build_agent()
self.logger = Logger(config["slide_window"])
def build_agent(self):
input = Input(shape=(self.num_states,))
layer = Dense(24, activation='relu')(input)
layer = Dense(self.num_actions)(layer)
output = Activation('linear')(layer)
model = Model(input, output)
adam = Adam(lr=self.learning_rate)
model.compile(loss='mse', optimizer=adam)
return model
# Save <s, a ,r, s'> of each step
def store_memory(self, state, action, reward, next_state, done):
if len(self.memory) > self.memory_size:
self.memory.pop(0)
self.memory.append([state, action, reward, next_state, done])
def train(self):
# Initialize q_network and target_q_network
self.target_q_network.set_weights(self.q_network.get_weights())
max_episode = 100000
max_step = 10000
slide_window = 100
# Populate memory first
state = self.env.reset()
print("Warming up...")
while len(self.memory) < self.batch_size:
action = self.env.action_space.sample()
next_state, reward, done, info = self.env.step(action)
self.store_memory(state, action, reward, next_state, done)
if done:
state = self.env.reset()
print("Warm up complete.")
t = trange(self.max_episode)
for episode_count in t:
state = self.env.reset()
current_step = 0
episode_reward = 0
while True:
if self.render_environment:
self.env.render()
# Network predict
q_values = self.q_network.predict(np.reshape(state, (1, self.num_states))).ravel()
# Decide if exploring or not
if np.random.rand() >= self.epsilon:
action = np.argmax(q_values)
else:
action = random.randrange(self.num_actions)
# Perform action
next_state, reward, done, info = self.env.step(action)
# Store transition
episode_reward += reward
self.store_memory(state, action, reward, next_state, done)
# Decrease exploration
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# Sample minibatch from memory
minibatch = random.sample(self.memory, self.batch_size)
# Transform the minibatch for processing
minibatch = list(zip(*minibatch))
# Calculate all td_targets for current minibatch
states, actions, rewards, next_states, dones = minibatch
batch_q_values = self.q_network.predict_on_batch(np.array(states))
batch_next_q_values = self.target_q_network.predict_on_batch(np.array(next_states))
next_actions = np.argmax(self.q_network.predict_on_batch(np.array(next_states)), axis=1) # Main difference between DDQN and DQN
td_targets = batch_q_values.copy()
for i in range(self.batch_size):
td_targets[i][actions[i]] = rewards[i] + self.gamma * (1 - dones[i]) * batch_next_q_values[i][next_actions[i]]
# Train network
self.q_network.train_on_batch(np.array(states), np.array(td_targets))
# Hard copy q_network to target_q_network
if done or current_step % self.update_frequency is 0:
self.target_q_network.set_weights(self.q_network.get_weights())
# For logging and visualizing data
if done or current_step > self.max_step:
self.logger.log_history(episode_reward, episode_count)
self.logger.show_progress(t, episode_reward, episode_count)
if episode_count % self.logger.slide_window == 0:
visualize(self.logger.rewards,
self.logger.running_rewards,
self.logger.episode_counts,
os.path.join(self.result_path, "DDQN.png"))
break
state = next_state
current_step += 1
if __name__ == '__main__':
agent = DDQN()
agent.train() |
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import os
import pandas as pd
from sklearn.utils import shuffle
from cabby.geo import regions
from cabby.geo import util as gutil
class RUNDataset:
def __init__(self, data_dir: str, s2level: int, lines: bool = False):
train_ds, valid_ds, test_ds, ds = self.load_data(data_dir, lines=lines)
# Get labels.
map_1 = regions.get_region("RUN-map1")
map_2 = regions.get_region("RUN-map2")
map_3 = regions.get_region("RUN-map3")
logging.info(map_1.polygon.wkt)
logging.info(map_2.polygon.wkt)
logging.info(map_3.polygon.wkt)
unique_cellid_map_1 = gutil.cellids_from_polygon(map_1.polygon, s2level)
unique_cellid_map_2 = gutil.cellids_from_polygon(map_2.polygon, s2level)
unique_cellid_map_3 = gutil.cellids_from_polygon(map_3.polygon, s2level)
unique_cellid = (
unique_cellid_map_1 + unique_cellid_map_2 + unique_cellid_map_3)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.ds = ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
def load_data(self, data_dir: str, lines: bool):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
ds['instructions'] = ds.groupby(
['id'])['instruction'].transform(lambda x: ' '.join(x))
ds = ds.drop_duplicates(subset='id', keep="last")
columns_keep = ds.columns.difference(
['map', 'id', 'instructions', 'end_point', 'start_point'])
ds.drop(columns_keep, 1, inplace=True)
ds = shuffle(ds)
ds.reset_index(inplace=True, drop=True)
dataset_size = ds.shape[0]
logging.info(f"Size of dataset: {ds.shape[0]}")
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
return train_ds, valid_ds, test_ds, ds
class RVSDataset:
def __init__(self, data_dir: str, s2level: int, region: str, lines: bool = True):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
logging.info(f"Size of dataset before removal of duplication: {ds.shape[0]}")
ds = pd.concat([ds.drop(['geo_landmarks'], axis=1), ds['geo_landmarks'].apply(pd.Series)], axis=1)
lengths = ds.end_point.apply(lambda x: x if len(x) == 3 else "").tolist()
ds['end_osmid'] = ds.end_point.apply(lambda x: x[1])
ds['start_osmid'] = ds.start_point.apply(lambda x: x[1])
ds['end_pivot'] = ds.end_point
ds['end_point'] = ds.end_point.apply(lambda x: x[3])
ds['start_point'] = ds.start_point.apply(lambda x: x[3])
ds = ds.drop_duplicates(subset=['end_osmid', 'start_osmid'], keep='last')
logging.info(f"Size of dataset after removal of duplication: {ds.shape[0]}")
dataset_size = ds.shape[0]
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
# Get labels.
active_region = regions.get_region(region)
unique_cellid = gutil.cellids_from_polygon(active_region.polygon, s2level)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
|
<gh_stars>0
import flatbuffers
import File
import numpy
from websocket import create_connection
from time import time
# 1.99 GB
#BUFFER_SIZE = 1990000000
# 1.99 MB
BUFFER_SIZE = 1990000
# 50 MB
#BUFFER_SIZE = 500000000
START_TIME = time()
# TO-DO:
# filen är seg som satan
# timea de olika delarna och kolla vad som är långsammast
# 100% CPU usage just nu
# kanske håller bytearray på och resizar arrayen hela tiden?
# kanske är det flatbuffer som är väldigt långsam?
# main function, turn file -> flatbuffers and send them
def send_fb_file(filename):
file = open(filename, "rb")
output_file = open("test.zip", "wb")
packet_count = 0
byte = file.read(BUFFER_SIZE)
debug_use_flatbuffer(byte, filename, packet_count, output_file)
packet_count += 1
while byte:
#print("progress: ", packet_count, "/?")
byte = file.read(BUFFER_SIZE)
# skicka flatbuffer med byte i
debug_use_flatbuffer(byte, filename, packet_count, output_file)
packet_count += 1
debug_use_flatbuffer(bytearray(), filename, packet_count, output_file, True)
file.close()
'''
table File {
filename:string;
packetnumber:int;
eof:bool;
data:[byte];
}
'''
def debug_use_flatbuffer(byte, filename, nr, output_file, eof=False):
func_start_time = time()
output = build_flatbuffer(byte, filename, nr, eof)
print("build_flatbuffers", time() - func_start_time)
# open flatbuffer and write data to file:
fb_file = File.File.GetRootAsFile(output, 0)
name = fb_file.Filename()
file_eof = fb_file.Eof()
print("OPEN FLATBUFFERS", time() - func_start_time)
print("writing data to ", name, ", eof: ", file_eof, ", data len: ", fb_file.DataLength(), "...")
#arr = []
#arr = bytearray()
#for i in range(fb_file.DataLength()):
#if file.Data(i):
#print("data=", (file.Data(i)).to_bytes(1, "big", signed=True))
#arr.append((fb_file.Data(i)).to_bytes(1, "big", signed=True))
# arr.append(fb_file.Data(i))
#print(fb_file.Data(i))
#else:
# print("could not write, data=", (file.Data(i)).to_bytes(1, "big", signed=True))
# arr = [fb_file.Data(i) for i in range(fb_file.DataLength())]
if file_eof:
output_file.close()
print("closed file.")
else:
arr = fb_file.DataAsNumpy()
print("MAKE ARR", time() - func_start_time)
output_file.write(bytearray(arr))
print("WRITE TO FILE", time() - func_start_time)
#output_file.write(arr)
print("debug_use_flatbuffers", time() - func_start_time)
def build_flatbuffer(byte, filename, nr, eof=False):
builder = flatbuffers.Builder(0)
fname = builder.CreateString(filename)
data = builder.CreateByteVector(byte)
# if something is not working,
# check that function names are correct!!!
File.FileStart(builder)
File.FileAddFilename(builder, fname)
File.FileAddPacketnumber(builder, nr)
File.FileAddEof(builder, eof)
File.FileAddData(builder, data)
ready_file = File.FileEnd(builder)
builder.Finish(ready_file)
return builder.Output()
#ws connectio
if __name__ == "__main__":
send_fb_file("portal2.zip")
print("final time", time() - START_TIME)
print("done!! enjoy")
|
"""
File: chapter04/mqtt_led.py
A full life-cycle Python + MQTT program to control an LED.
Dependencies:
pip3 install paho-mqtt gpiozero pigpio
Built and tested with Python 3.7 on Raspberry Pi 4 Model B
"""
import logging
import signal
import sys
import json
from time import sleep
from gpiozero import Device, PWMLED
from gpiozero.pins.pigpio import PiGPIOFactory
import paho.mqtt.client as mqtt # (1)
# Initialize Logging
logging.basicConfig(level=logging.WARNING) # Global logging configuration
logger = logging.getLogger("main") # Logger for this module
logger.setLevel(logging.INFO) # Debugging for this file.
# Initialize GPIO
Device.pin_factory = PiGPIOFactory() # Set GPIOZero to use PiGPIO by default.
# Global Variables
LED_GPIO_PIN = 21
BROKER_HOST = "localhost" # (2)
BROKER_PORT = 1883
CLIENT_ID = "LEDClient" # (3)
TOPIC = "led" # (4)
client = None # MQTT client instance. See init_mqtt() # (5)
led = None # PWMLED Instance. See init_led()
"""
GPIO Related Functions
"""
def init_led():
"""Create and initialise an LED Object"""
global led
led = PWMLED(LED_GPIO_PIN)
led.off()
def set_led_level(data): # (6)
"""Set LED State to one of On, Blink or Off (Default)
'data' expected to be a dictionary with the following format:
{
"level": a number between 0 and 100,
}
"""
level = None # a number 0..100
if "level" in data:
level = data["level"]
if isinstance(level, int) or isinstance(level, float) or level.isdigit():
# State is a number
level = max(0, min(100, int(level))) # Bound state to range 0..100
led.value = level / 100 # Scale 0..100% back to 0..1
logger.info("LED at brightness {}%".format(level))
else:
logger.info("Request for unknown LED level of '{}'. We'll turn it Off instead.".format(level))
led.value = 0 # 0% = Led off.
else:
logger.info("Message '{}' did not contain property 'level'.".format(data))
"""
MQTT Related Functions and Callbacks
"""
def on_connect(client, user_data, flags, connection_result_code): # (7)
"""on_connect is called when our program connects to the MQTT Broker.
Always subscribe to topics in an on_connect() callback.
This way if a connection is lost, the automatic
re-connection will also results in the re-subscription occurring."""
if connection_result_code == 0: # (8)
# 0 = successful connection
logger.info("Connected to MQTT Broker")
else:
# connack_string() gives us a user friendly string for a connection code.
logger.error("Failed to connect to MQTT Broker: " + mqtt.connack_string(connection_result_code))
# Subscribe to the topic for LED level changes.
client.subscribe(TOPIC, qos=2) # (9)
def on_disconnect(client, user_data, disconnection_result_code): # (10)
"""Called disconnects from MQTT Broker."""
logger.error("Disconnected from MQTT Broker")
def on_message(client, userdata, msg): # (11)
"""Callback called when a message is received on a subscribed topic."""
logger.debug("Received message for topic {}: {}".format( msg.topic, msg.payload))
data = None
try:
data = json.loads(msg.payload.decode("UTF-8")) # (12)
except json.JSONDecodeError as e:
logger.error("JSON Decode Error: " + msg.payload.decode("UTF-8"))
if msg.topic == TOPIC: # (13)
set_led_level(data) # (14)
else:
logger.error("Unhandled message topic {} with payload " + str(msg.topic, msg.payload))
def signal_handler(sig, frame):
"""Capture Control+C and disconnect from Broker."""
global led_state
logger.info("You pressed Control + C. Shutting down, please wait...")
client.disconnect() # Graceful disconnection.
led.off()
sys.exit(0)
def init_mqtt():
global client
# Our MQTT Client. See PAHO documentation for all configurable options.
# "clean_session=True" means we don"t want Broker to retain QoS 1 and 2 messages
# for us when we"re offline. You"ll see the "{"session present": 0}" logged when
# connected.
client = mqtt.Client( # (15)
client_id=CLIENT_ID,
clean_session=False)
# Route Paho logging to Python logging.
client.enable_logger() # (16)
# Setup callbacks
client.on_connect = on_connect # (17)
client.on_disconnect = on_disconnect
client.on_message = on_message
# Connect to Broker.
client.connect(BROKER_HOST, BROKER_PORT) # (18)
# Initialise Module
init_led()
init_mqtt()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler) # Capture Control + C # (19)
logger.info("Listening for messages on topic '" + TOPIC + "'. Press Control + C to exit.")
client.loop_start() # (20)
signal.pause()
|
# pylint: disable=unnecessary-pass,logging-fstring-interpolation,logging-format-interpolation,raise-missing-from, unused-argument, line-too-long, too-many-arguments, no-self-use,missing-function-docstring,protected-access
"""Common functions used in BDD"""
import json
import boto3
import time
import botocore.exceptions
import logging
LOGGER = logging.getLogger(name="TestHelpers")
LOGGER.setLevel(logging.INFO)
def count_s3_files_at_prefix(bucket, prefix, account_alias, s3_client=boto3.client('s3')):
"""
Count S3 files in a bucket with a certain prefix
Args:
bucket: S3 buckets
prefix: file prefix
account_alias: account_alias
s3_client: s3 boto client
Returns: num files with prefix
"""
response = s3_client.list_objects(
Bucket=bucket,
Prefix="{}/{}".format(account_alias, prefix)
)
if response.get('Contents', None):
return len(response['Contents'])
else:
return 0
def round_down_to_nearest_power_of_2(input_num):
"""
Round up to nearest power of 2
Args:
input_num: input num
Returns: nearest power of 2
"""
if input_num > 1:
last_power = 1
for i in range(1, int(input_num)):
if 2 ** i > input_num:
return int(last_power)
elif 2 ** i == input_num:
return int(input_num)
else:
last_power = 2 ** i
else:
return 1
def delete_existing_vpcs(cf_client, stack_to_delete=None):
"""
Delete all VPCx stacks in account or a specific stack id specified
Args:
cf_client:
stack_to_delete: optional stack name to limit to single delete. Deletes all vpcx stacks if default
Returns:
"""
# List stacks
response = cf_client.list_stacks()
LOGGER.info(f"Trying to delete VPC of {stack_to_delete}")
if not stack_to_delete:
# Iterate over stacks
stacks = response.get('StackSummaries', [])
sorted_stacks = list(filter(lambda x: x.get('DeletionTime', None) is None, stacks))
sorted_stacks.sort(key=lambda summary: summary.get('CreationTime', ""), reverse=True)
for stack in sorted_stacks:
stack_name = stack['StackName']
# Delete any primary or hpc VPC stacks
if 'vpc-primary' in stack_name or 'vpc-hpc' in stack_name:
LOGGER.info(f"Trying to delete stack {stack_name} in status {stack['StackStatus']}")
# Delete stack
cf_client.delete_stack(
StackName=stack_name
)
# Wait for stack to delete
waiter = cf_client.get_waiter('stack_delete_complete')
waiter.wait(
StackName=stack_name,
WaiterConfig={
'Delay': 5,
'MaxAttempts': 60
}
)
else:
for stack in response.get('StackSummaries', None):
if stack['StackName'] == stack_to_delete and stack['StackStatus'] != "DELETE_COMPLETE":
LOGGER.info(f"Deleting stack {stack['StackName']} in status {stack['StackStatus']} created {stack['CreationTime']}")
if stack['StackStatus'] == "DELETE_IN_PROGRESS":
LOGGER.info(f"Delete in progress for {stack['StackId']}")
else:
cf_client.delete_stack(StackName=stack_to_delete)
try:
LOGGER.info(f"Waiting on delete of {stack['StackId']}")
waiter = cf_client.get_waiter('stack_delete_complete')
waiter.wait(
StackName=stack_to_delete,
WaiterConfig={
'Delay': 5,
'MaxAttempts': 60
}
)
except botocore.exceptions.WaiterError as error:
LOGGER.info(f"Failed to delete {stack_to_delete}{stack['StackId']}: {error}")
def mock_log_buckets(credentials, test_account, regions, metadata):
"""
Create and update mock data for log buckets in Accounts Table
Args:
credentials: AWS credentials
test_account: test account alias
regions: list of regions to create an S3 bucket
metadata: current metadata from Accounts Table
Returns: updated metadata
"""
# Form new metadata
new_buckets = {}
# Init s3 client
s3_client = boto3.client(
's3',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
# List buckets
buckets = s3_client.list_buckets()
# Process response
bucket_list = []
for item in buckets.get('Buckets'):
bucket_list.append(item['Name'])
# Check if target bucket is created in every region
for region in regions:
# Form mock bucket name
log_bucket_name = "{}-temp-log-{}".format(test_account, region)
new_buckets[region] = log_bucket_name
# If bucket does not exist, then create it
if log_bucket_name not in bucket_list:
s3_client.create_bucket(
ACL='private',
Bucket=log_bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
# Set log buckets
metadata['log_buckets'] = json.dumps(new_buckets)
return metadata
|
# coding=utf-8
# @Author : zhzhx2008
# @Time : 18-10-9
# Reference: https://github.com/airalcorn2/Recurrent-Convolutional-Neural-Network-Text-Classifier
import os
import warnings
import jieba
import keras.backend as K
import numpy as np
from keras import Input
from keras import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Embedding, Dense, concatenate, LSTM, TimeDistributed, Lambda
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
seed = 2019
np.random.seed(seed)
def get_labels_datas(input_dir):
datas_word = []
datas_char = []
labels = []
label_dirs = os.listdir(input_dir)
for label_dir in label_dirs:
txt_names = os.listdir(os.path.join(input_dir, label_dir))
for txt_name in txt_names:
with open(os.path.join(input_dir, label_dir, txt_name), 'r') as fin:
content = fin.readline() # 只取第一行
content = content.strip().replace(' ', '')
datas_word.append(' '.join(jieba.cut(content)))
datas_char.append(' '.join(list(content)))
labels.append(label_dir)
return labels, datas_word, datas_char
def get_label_id_map(labels):
labels = set(labels)
id_label_map = {}
label_id_map = {}
for index, label in enumerate(labels):
id_label_map[index] = label
label_id_map[label] = index
return id_label_map, label_id_map
input_dir = './data/THUCNews'
labels, datas_word, datas_char = get_labels_datas(input_dir)
id_label_map, label_id_map = get_label_id_map(labels)
labels, labels_test, datas_word, datas_word_test, datas_char, datas_char_test = train_test_split(labels, datas_word, datas_char, test_size=0.3, shuffle=True, stratify=labels)
labels_train, labels_dev, datas_word_train, datas_word_dev, datas_char_train, datas_char_dev = train_test_split(labels, datas_word, datas_char, test_size=0.1, shuffle=True, stratify=labels)
y_train = [label_id_map.get(x) for x in labels_train]
y_dev = [label_id_map.get(x) for x in labels_dev]
y_test = [label_id_map.get(x) for x in labels_test]
num_classes = len(set(y_train))
y_train_index = to_categorical(y_train, num_classes)
y_dev_index = to_categorical(y_dev, num_classes)
y_test_index = to_categorical(y_test, num_classes)
# keras extract feature
tokenizer = Tokenizer()
tokenizer.fit_on_texts(datas_word_train)
vocabulary_length = len(tokenizer.word_index)
# feature5: word index for deep learning
x_train_word_index = tokenizer.texts_to_sequences(datas_word_train)
x_dev_word_index = tokenizer.texts_to_sequences(datas_word_dev)
x_test_word_index = tokenizer.texts_to_sequences(datas_word_test)
# RCNN
# dict{vocabulary_length}=0,0,0,0,......
x_train_word_index_left = [[vocabulary_length + 1] + x[:-1] for x in x_train_word_index]
x_dev_word_index_left = [[vocabulary_length + 1] + x[:-1] for x in x_dev_word_index]
x_test_word_index_left = [[vocabulary_length + 1] + x[:-1] for x in x_test_word_index]
x_train_word_index_right = [x[1:] + [vocabulary_length + 1] for x in x_train_word_index]
x_dev_word_index_right = [x[1:] + [vocabulary_length + 1] for x in x_dev_word_index]
x_test_word_index_right = [x[1:] + [vocabulary_length + 1] for x in x_test_word_index]
max_word_length = max(
max([len(x) for x in x_train_word_index]),
max([len(x) for x in x_train_word_index_left]),
max([len(x) for x in x_train_word_index_right])
)
x_train_word_index_left = pad_sequences(x_train_word_index_left, maxlen=max_word_length)
x_dev_word_index_left = pad_sequences(x_dev_word_index_left, maxlen=max_word_length)
x_test_word_index_left = pad_sequences(x_test_word_index_left, maxlen=max_word_length)
x_train_word_index_right = pad_sequences(x_train_word_index_right, maxlen=max_word_length)
x_dev_word_index_right = pad_sequences(x_dev_word_index_right, maxlen=max_word_length)
x_test_word_index_right = pad_sequences(x_test_word_index_right, maxlen=max_word_length)
x_train_word_index = pad_sequences(x_train_word_index, maxlen=max_word_length)
x_dev_word_index = pad_sequences(x_dev_word_index, maxlen=max_word_length)
x_test_word_index = pad_sequences(x_test_word_index, maxlen=max_word_length)
input = Input(shape=(max_word_length,), dtype='int32')
input_left = Input(shape=(max_word_length,), dtype='int32')
input_right = Input(shape=(max_word_length,), dtype='int32')
embedding = Embedding(vocabulary_length + 1 + 1, 100)
embedding_input = embedding(input)
embedding_input_left = embedding(input_left)
embedding_input_right = embedding(input_right)
# SimpleRNN, GRU(CuDNNGRU), or LSTM(CuDNNLSTM)
forward = LSTM(128, return_sequences=True)(embedding_input_left)
backward = LSTM(128, return_sequences=True, go_backwards=True)(embedding_input_right)
# Keras returns the output sequences in reverse order.
backward = Lambda(lambda x: K.reverse(x, axes=1))(backward)
together = concatenate([forward, embedding_input, backward], axis=2)
# semantic = Conv1D(128, kernel_size = 1, activation = "tanh")(together)
semantic = TimeDistributed(Dense(128, activation='tanh'))(together)
pool_rnn = Lambda(lambda x: K.max(x, axis=1), output_shape=(128,))(semantic)
output = Dense(num_classes, activation='softmax')(pool_rnn)
model = Model(inputs=[input, input_left, input_right], outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model_weight_file = './model_rcnn.h5'
model_file = './model_rcnn.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit([x_train_word_index, x_train_word_index_left, x_train_word_index_right],
y_train_index,
batch_size=32,
epochs=1000,
verbose=2,
callbacks=[early_stopping, model_checkpoint],
validation_data=([x_dev_word_index, x_dev_word_index_left, x_dev_word_index_right], y_dev_index),
shuffle=True)
model.load_weights(model_weight_file)
model.save(model_file)
evaluate = model.evaluate([x_test_word_index, x_test_word_index_left, x_test_word_index_right], y_test_index, batch_size=32, verbose=2)
print('loss value=' + str(evaluate[0]))
print('metrics value=' + str(evaluate[1]))
# loss value=1.4838370917335388
# metrics value=0.45238095332705786 |
<reponame>nikolarobottesla/PyLTSpice<gh_stars>0
# -------------------------------------------------------------------------------
# Name: Histogram.py
# Purpose: Make an histogram plot based on the results of LTSpice.py
#
# Author: <NAME> (<EMAIL>)
#
# Created: 17-01-2017
# Licence: Free
# -------------------------------------------------------------------------------
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright 2017, Fribourg Switzerland"
#!/usr/bin/env python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from optparse import OptionParser
usage = "usage: %prog [options] LOG_FILE TRACE"
opts = OptionParser(usage=usage, version="%prog 0.1")
#opts.add_option('v', "var", action="store", type="string", dest="trace", help="The trace to be used in the histogram")
opts.add_option('-s',"--sigma", action ="store", type="int", dest="sigma", default=3, help="Sigma to be used in the distribution fit. Default=3")
opts.add_option('-n', "--nbins", action="store", type="int", dest="nbins", default=20, help="Number of bins to be used in the histogram. Default=20")
opts.add_option('-c', "--condition", action="append", type="string", dest="filters",
help="Filter condition writen in python. More than one expression can be added but each expression should be preceded by -f.\n" +
"EXAMPLE: -c V(N001)>4 -c parameter==1 -c I(V1)<0.5" )
opts.add_option('-f', "--format", action="store", type="string", dest="format", help="Format string for the X axis. Example: -f %3.4f")
#opts.add_option('-p', "--scaling",action="store", type="string", dest="prescaling", help="Prescaling function to be applied to the input value.")
opts.add_option('-t', "--title", action="store", type="string", dest="title", help="Title to appear on the top of the histogram.")
opts.add_option('-r', "--range", action="store", type="string", dest="range", help="Range of the X axis to use for the histogram in the form min:max. Example: -r -1:1")
opts.add_option('-C', "--clipboard", action="store_true", dest="clipboard", help="If the data from the clipboard is to be used.")
#opts.add_option('-x', "--xname", action="store", dest="xname", help="Name for the variable displayed")
opts.add_option('-i', "--image", action="store", type="string", dest="imagefile", help="Name of the image File. extension 'png'")
(options, args) = opts.parse_args()
values = []
if options.clipboard:
try:
import clipboard
except ImportError:
print("Failed to load clipboard package. Use PiP to install it.")
exit(1)
if len(args) > 0:
TRACE = args[-1]
else:
TRACE = "var"
text = clipboard.paste()
for line in text.split('\n'):
try:
values.append(float(line))
except ValueError:
print("Failed to process ")
print(line)
elif len(args)==0:
opts.print_help()
exit(-1)
else:
if len(args) < 2:
opts.error("Wrong number of parameters.")
opts.print_help()
exit(-1)
# if (len(args)==1): # This will search for the most recent file
# newer_date = 0
# filename = None
# for f in os.listdir():
# date = os.path.getmtime(f)
# if date > newer_date and f.endswith(".tlog"):
# newer_date = date
# filename = f
# if filename == None:
# opts.error("A LOG_FILE should be given")
TRACE = args[1]
logfile = args[0]
if not options.filters is None:
print("Filters Applied:", options.filters)
else:
print("No filters defined")
log = open(logfile,'r')
header = log.readline().rstrip('\n')
vars = header.split('\t')
try:
sav_col = vars.index(TRACE)
except ValueError:
log.close()
print("File '%s' doesn't have trace '%s'" % (logfile, TRACE))
print("LOG FILE contains %s" % vars)
exit(-1)
if (options.filters is None) or (len(options.filters) == 0):
for line in log:
#print(line)
vs = line.split('\t')
values.append(float(vs[sav_col]))
else:
for line in log:
vs = map(float,line.split('\t'))
env = dict(zip(vars,vs))
for expression in options.filters:
test = eval(expression, None, env)
if test == False:
break
else:
values.append(float(env[TRACE]))
log.close()
if len(values) == 0:
print("No elements found")
elif len(values) < options.nbins:
print("Not enough elements for an histogram")
else:
x = np.array(values, dtype=float)
mu = x.mean()
mn = x.min()
mx = x.max()
sd = np.std(x)
sigmin = mu - options.sigma*sd
sigmax = mu + options.sigma*sd
if options.range is None:
# Automatic calculation of the range
axisXmin = mu - (options.sigma+1)*sd
axisXmax = mu + (options.sigma + 1) * sd
if mn < axisXmin:
axisXmin = mn
if mx > axisXmax:
axisXmax = mx
else:
try:
smin, smax = options.range.split(":")
axisXmin = float(smin)
axisXmax = float(smax)
except:
opts.error("Invalid range setting")
exit(-1)
if options.format:
fmt = options.format
else:
fmt = "%f"
print("Collected %d elements" % len(values))
print("Distributing in %d bins" % options.nbins)
print("Minimum is " + fmt % mn)
print("Maximum is " + fmt % mx)
print("Mean is " + fmt % mu)
print("Standard Deviation is " + fmt % sd)
print(("Sigma %d boundaries are " + fmt + " and " + fmt) % (options.sigma, sigmin, sigmax))
n, bins, patches = plt.hist(x, options.nbins, normed=True, facecolor='green', alpha=0.75, range=(axisXmin, axisXmax))
axisYmax = n.max() * 1.1
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sd)
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.axvspan(mu - options.sigma*sd, mu + options.sigma*sd, alpha=0.2, color="cyan")
plt.xlabel(TRACE)
plt.ylabel('Distribution [Normalised]')
if options.title is None:
title = (r'$\mathrm{Histogram\ of\ %s:}\ \mu='+fmt+r',\ stdev='+fmt+r',\ \sigma=%d$') % (TRACE, mu, sd, options.sigma)
else:
title = options.title
plt.title(title)
plt.axis([axisXmin, axisXmax, 0, axisYmax ])
plt.grid(True)
if options.imagefile is not None:
plt.savefig(options.imagefile)
else:
plt.show() |
import os
from skimage.filters import gaussian
from PIL import Image
import numpy as np
import cv2
def compress_JPG_image(image, path_original, size=(1920, 1080)) -> str:
"""Convert a given file to JPG file"""
width, height = size
name = os.path.basename(path_original).split(".")
first_name = os.path.join(os.path.dirname(path_original), name[0] + ".jpg")
if image.size[0] > width and image.size[1] > height:
image.thumbnail(size, Image.ANTIALIAS)
image.save(first_name, quality=85)
elif image.size[0] > width:
wpercent = width / float(image.size[0])
height = int((float(image.size[1]) * float(wpercent)))
image = image.resize((width, height), Image.ANTIALIAS)
image.save(first_name, quality=85)
elif image.size[1] > height:
wpercent = height / float(image.size[1])
width = int((float(image.size[0]) * float(wpercent)))
image = image.resize((width, height), Image.ANTIALIAS)
image.save(first_name, quality=85)
else:
image.save(first_name, quality=85)
return first_name
def convert_to_JPG(path_original) -> str:
"""Convert a given file to JPG file"""
img = Image.open(path_original)
name = os.path.basename(path_original).split(".")
first_name = os.path.join(os.path.dirname(path_original), name[0] + ".jpg")
if img.format == "JPEG":
image = img.convert("RGB")
compress_JPG_image(image, path_original)
img.close()
elif img.format == "GIF":
i = img.convert("RGBA")
bg = Image.new("RGBA", i.size)
image = Image.composite(i, bg, i)
compress_JPG_image(image, path_original)
img.close()
elif img.format == "PNG":
try:
image = Image.new("RGB", img.size, (255, 255, 255))
image.paste(img, img)
compress_JPG_image(image, path_original)
except ValueError:
image = img.convert("RGB")
compress_JPG_image(image, path_original)
img.close()
elif img.format == "BMP":
image = img.convert("RGB")
compress_JPG_image(image, path_original)
img.close()
return path_original
def blur_image(image, x0, x1, y0, y1, sigma=1, multichannel=True):
"""Square blur in image"""
y0, y1 = min(y0, y1), max(y0, y1)
x0, x1 = min(x0, x1), max(x0, x1)
im = image.copy()
sub_im = im[y0:y1, x0:x1].copy()
blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel)
blur_sub_im = np.round(255 * blur_sub_im)
im[y0:y1, x0:x1] = blur_sub_im
return im
def draw_segment(baseImg, matImg):
width, height = baseImg.size
dummyImg = np.zeros([height, width, 4], dtype=np.uint8)
for x in range(width):
for y in range(height):
color = matImg[y, x]
(r, g, b) = baseImg.getpixel((x, y))
if color == 0:
dummyImg[y, x, 3] = 0
else:
dummyImg[y, x] = [r, g, b, 255]
img = Image.fromarray(dummyImg)
return img
|
import json
import time
from random import uniform
import pandas as pd
from processing.utils import infer_gender_image, infer_gender_name, download_images
import os
import re
from bs4 import BeautifulSoup
import pandas as pd
import requests
from constants import *
import urllib
from pathlib import Path
import datetime
def visit_language(list_path: str,
html_path: str,
language: str,
file_regex: str = "([0-9]+)\_([0-9]+)"):
files = set([file for file in os.listdir(html_path + language) if ~file.startswith('.')])
teachers = []
# Read teachers lists
list_files = set([file for file in os.listdir(list_path + language) if ~file.startswith('.')])
lists = {}
for l in list_files:
lists[l.replace('.json', '')] = pd.read_json(os.path.join(list_path + language, l))
for file in files:
match = re.match(file_regex, file)
pos = match.group(1)
date = match.group(2)
if date in lists:
soup = generate_soup(os.path.join(html_path + language, file), pos, language, lists[date])
if soup:
info = crawl_teacher(pos, date, language, lists[date], soup)
teachers.append(info)
df = pd.DataFrame(teachers)
return df
def generate_soup(file_path: str,
position: int,
language: str,
prior_info: dict):
soup = BeautifulSoup(open(file_path), "html.parser")
url = prior_info[prior_info['position'] == int(position)]['url'].values[0]
if language.lower() in url:
return None
elif '?ssr=true' in url:
url = url.replace('/?ssr=true', '')
attempts = 0
while soup.find("h2", {'class': 'name___10LsT'}) is None and attempts < 3:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, file_path)
soup = BeautifulSoup(open(file_path), "html.parser")
time.sleep(uniform(1, 2))
attempts += 1
if soup.find("h2", {'class': 'name___10LsT'}) is None:
return None
else:
return soup
def crawl_teacher_old(position: int,
date: int,
language: str,
prior_info: dict,
soup):
info = {}
info["language"] = language
info["position"] = position
info["retrieval_date"] = date
info['is_featured'] = prior_info[prior_info['position'] == int(position)]['is_featured'].values[0]
try:
info["user_name"] = soup.find("span", {"class": "ecom-name"}).text.strip()
except:
info['user_name'] = 'ERROR'
try:
info["url"] = "https:" + soup.find("link", {"rel": "alternate"}).get('href')
except:
info['url'] = 'ERROR'
try:
info['nationality'] = soup.find("span", {"class": "hint"}).text.strip()
except:
info['nationality'] = 'ERROR'
try:
info['avg_rating'] = soup.find("span", {"class": "ts-tutor-rating--total"}).text.strip()
except:
if soup.find("div", {"class": "tutor-stats__item tutor-stats__item--new-tutor"}):
info['avg_rating'] = 'NEW TUTOR'
else:
info['avg_rating'] = 'ERROR'
try:
info['num_ratings'] = soup.findAll("div", {"class": "box__title box__title--md"})[-1].text.split()[1]
except:
if soup.find("div", {"class": "tutor-stats__item tutor-stats__item--new-tutor"}):
info['num_ratings'] = 'NEW TUTOR'
else:
info['num_ratings'] = 'ERROR'
try:
teaches = soup.findAll("div", {"class": "tabs__item"})
info['teaches'] = [lang.text.strip() for lang in teaches if 'language' in lang.text]
except:
info['teaches'] = 'ERROR'
try:
subjects = soup.findAll("h6", {"class": "p-subjects__title"})
if len(subjects):
info['subjects'] = [s.text.strip() for s in subjects]
else:
info['subjects'] = None
except:
info['subjects'] = 'ERROR'
try:
speaks = {}
for _ in soup.findAll("span", {"data-qa-group": "tutor-speaks-elements"}):
l = _.text.split()
speaks[l[0]] = l[1]
info['speaks'] = speaks
except:
info['speaks'] = 'ERROR'
try:
lessons = soup.findAll("div", {"class": "list-icon__text"})
for l in lessons:
if len(l.text.split()) == 2:
info['lessons'] = l.text.split()[0]
break
except:
info['lessons'] = 'ERROR'
try:
info['price'] = soup.find("span", {"class": "tutor-price__value"}).text
except:
info['price'] = 'ERROR'
try:
info['price_currency'] = soup.find("span", {"class": "tutor-price__currency"}).text
except:
info['price_currency'] = 'ERROR'
try:
info['avatar_url'] = re.findall(r'(https?://[^\s\)\']+)',
soup.find("div", {"class": "avatar avatar--profile avatar--square"}).get('style'))[0]
except:
info['avatar_url'] = 'ERROR'
return info
def crawl_teacher(position: int,
date: int,
language: str,
prior_info: dict,
soup):
info = {}
info["language"] = language
info["position"] = position
info["retrieval_date"] = date
info['is_featured'] = prior_info[prior_info['position'] == int(position)]['is_featured'].values[0]
try:
info["user_name"] = soup.find("h2", {'class': 'name___10LsT'}).text.strip()
except:
info['user_name'] = 'ERROR'
try:
info["url"] = soup.find("meta", {'property': 'og:url'}).get('content')
except:
info['url'] = 'ERROR'
try:
info['nationality'] = soup.find("img", {'class': 'flag___26DQj'}).get('alt')
except:
info['nationality'] = 'ERROR'
try:
info['avg_rating'] = soup.find("div", {'class': 'RatingIndicatorRating___374zP'}).text
except:
if soup.find("div", {"class": "NewTutorBadge___sVfwo"}):
info['avg_rating'] = 'NEW TUTOR'
else:
info['avg_rating'] = 'ERROR'
try:
info['num_ratings'] = soup.find("div", {'class': 'ReviewsNumber___1enrU'}).text
except:
if soup.find("div", {"class": "NewTutorBadge___sVfwo"}):
info['avg_rating'] = 'NEW TUTOR'
else:
info['avg_rating'] = 'ERROR'
try:
teaches = soup.findAll("a", {"class": "item___2xHv5"})
info['teaches'] = [lang.text.strip() for lang in teaches if 'language' in lang.text]
if len(info['teaches']) == 0:
teaches = soup.findAll("li", {"class": "item___2xHv5"})
info['teaches'] = [lang.text.strip() for lang in teaches if 'language' in lang.text]
if len(info['teaches']) == 0:
info['teaches'] = None
except:
info['teaches'] = 'ERROR'
try:
subjects = soup.findAll("h5", {"class": "bold___1WVGs"})
if len(subjects):
info['subjects'] = [s.text.strip().replace('blank', '') for s in subjects]
else:
info['subjects'] = None
except:
info['subjects'] = 'ERROR'
try:
speaks = {}
for s in soup.findAll("li", {"class": "item___18Wix"}):
level = s.find('span').text
lang = s.text.replace(level, '')
speaks[lang] = level
info['speaks'] = speaks
except:
info['speaks'] = 'ERROR'
try:
info['lessons'] = soup.find("span", {'class': 'totalLessons___1m96F'}).text
except:
info['lessons'] = 'ERROR'
try:
price = soup.find("div", {'class': 'PriceIndicatorPrice___w9jW1'}).text.split()
info['price'] = price[0]
info['price_currency'] = price[1]
except:
info['price'] = 'ERROR'
try:
info['avatar_url'] = soup.find("img", {'class': 'AvatarImg___2dRk2 AvatarImgLoaded___1em79'}).get('src')
except:
info['avatar_url'] = 'ERROR'
return info
def obtain_teachers_info(teachers, crawler):
teacher_info = []
for teacher in teachers:
teacher_info.append(crawl_teacher(teacher, crawler))
time.sleep(uniform(2, 5))
return teacher_info
def read_teachers(data_path):
with open(data_path, "r") as read_file:
teachers_dict = json.load(read_file)
return teachers_dict
def infer_gender(df, column_name, prob_bound, img_url_col, images_path):
df = infer_gender_name(df, column_name)
df_not_ready = df[df['gender_name_prob'] <= prob_bound]
images = list(df_not_ready[img_url_col].unique())
download_images(images, images_path, delete_folder=True)
img_gender = infer_gender_image(images_path)
img_gender = img_gender.rename(columns={'image': 'avatar_url'})
img_gender = img_gender.set_index('avatar_url')
# Transform df for join
df['avatar_url'] = df['avatar_url'].str.replace(
'https://res.cloudinary.com/verbling/image/fetch/c_fill,f_png,f_auto,g_face,h_150,w_150/', '')
result = df.join(img_gender, on='avatar_url', how='left')
return result
def map_nationality(df):
codes = pd.read_csv(country_codes_path)
df = df.rename(columns={'nationality': 'nationality_full'})
df = pd.merge(df, codes, how='left', left_on='nationality_full', right_on='Name')
df = df.drop(columns='Name').rename(columns={'Code': 'nationality'})
df[df['nationality'].isnull()][['nationality', 'nationality_full']].drop_duplicates().to_csv(os.path.join(DATA_PATH, '/wrong_codes.csv'), mode='a', header=False)
return df
def main():
list_path = os.path.join(DATA_PATH, "preply/teachers_list/")
html_path = os.path.join(DATA_PATH, "preply/teachers_html/")
output_path = os.path.join(DATA_PATH, "preply/results/")
languages = set([lang for lang in os.listdir(list_path) if not lang.startswith('.')])
done = set([name for name in os.listdir(os.path.join(DATA_PATH, "preply/results/"))])
for language in languages-done:
df = visit_language(list_path, html_path, language)
Path(os.path.join(output_path, language)).mkdir(parents=True, exist_ok=True)
df = map_nationality(df)
df.to_csv(output_path+language+"/{}.csv".format(datetime.datetime.today().strftime('%Y%m%d')))
if __name__ == "__main__":
main() |
<filename>aphla/gui/elempickdlg.py
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
"""
:author: <NAME> <<EMAIL>>
A dialog for picking elements.
"""
# Copyright (c) 2011 Lingyun Yang @ BNL.
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import (Qt, SIGNAL)
class ElementPickDlg(QtGui.QDialog):
def __init__(self, allelems, parent=None, **kwargs):
"""elemobj"""
super(ElementPickDlg, self).__init__(parent)
title = kwargs.get("title", 'Choose Elements:')
extra_cols = kwargs.get("extra_cols", [])
self.setWindowTitle(title)
self.elemlst = QtGui.QTreeWidget()
# enable multi-selection
self.elemlst.setHeaderLabels(["Name"] + [v[0] for v in extra_cols])
for i,row in enumerate(allelems):
name, status = row
w = QtGui.QTreeWidgetItem()
w.setFlags(w.flags() | Qt.ItemIsUserCheckable)
w.setText(0, name)
for j,c in enumerate(extra_cols):
w.setText(j+1, str(c[1][i]))
w.setCheckState(0, status)
self.elemlst.addTopLevelItem(w)
#self.elemlst.setSortingEnabled(True)
elemLabel = QtGui.QLabel(title)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok|
QtGui.QDialogButtonBox.Cancel)
btnAll = QtGui.QPushButton("Select All")
btnNone = QtGui.QPushButton("Select None")
btnInv = QtGui.QPushButton("Invert Selection")
hbox = QtGui.QHBoxLayout()
hbox.addWidget(btnAll)
hbox.addWidget(btnNone)
hbox.addWidget(btnInv)
hbox.addStretch()
layout = QtGui.QGridLayout()
layout.addWidget(elemLabel, 0, 0)
layout.addWidget(self.elemlst, 1, 0)
layout.addLayout(hbox, 2, 0)
layout.addWidget(buttonBox, 3, 0)
self.setLayout(layout)
self.connect(btnAll, SIGNAL("clicked()"), self._sel_all)
self.connect(btnNone, SIGNAL("clicked()"), self._sel_none)
self.connect(btnInv, SIGNAL("clicked()"), self._sel_inv)
self.connect(buttonBox, SIGNAL("accepted()"), self.accept)
self.connect(buttonBox, SIGNAL("rejected()"), self.reject)
def _sel_all(self):
for i in range(self.elemlst.topLevelItemCount()):
it = self.elemlst.topLevelItem(i)
it.setCheckState(0, Qt.Checked)
def _sel_none(self):
for i in range(self.elemlst.topLevelItemCount()):
it = self.elemlst.topLevelItem(i)
it.setCheckState(0, Qt.Unchecked)
def _sel_inv(self):
for i in range(self.elemlst.topLevelItemCount()):
it = self.elemlst.topLevelItem(i)
if it.checkState(0) == Qt.Checked:
it.setCheckState(0, Qt.Unchecked)
elif it.checkState(0) == Qt.Unchecked:
it.setCheckState(0, Qt.Checked)
def checkStates(self):
#print self.elemlst.selectedItems()
ret = []
for i in range(self.elemlst.topLevelItemCount()):
it = self.elemlst.topLevelItem(i)
ret.append(it.checkState(0))
return ret
def checkedNames(self):
ret = []
for i in range(self.elemlst.topLevelItemCount()):
it = self.elemlst.topLevelItem(i)
if it.checkState(0) != Qt.Checked: continue
ret.append(str(it.data(0, Qt.DisplayRole).toString()))
return ret
def checkedIndices(self):
return [ i for i in range(self.elemlst.topLevelItemCount())
if self.elemlst.topLevelItem(i).checkState(0) == Qt.Checked]
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
form = ElementPickDlg([('elem 1', Qt.Unchecked), ('elem 2', Qt.Checked)],
extra_cols=[("s", [0, 1])])
form.show()
app.exec_()
print("selected: ", form.checkedNames())
print("selected: ", form.checkedIndices())
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import HTTPError
from tornado.gen import coroutine
from .base_handlers import BaseHandler
from qiita_db.study import Study
from qiita_db.artifact import Artifact
from qiita_db.util import get_artifacts_information
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_core.util import execute_as_transaction
from qiita_pet.util import EBI_LINKIFIER
from qiita_pet.handlers.util import doi_linkifier, pubmed_linkifier
class PublicHandler(BaseHandler):
@coroutine
@execute_as_transaction
def get(self):
study_id = self.get_argument("study_id", None)
artifact_id = self.get_argument("artifact_id", None)
if study_id is None and artifact_id is None:
raise HTTPError(
422, reason='You need to specify study_id or artifact_id')
self.finish()
elif study_id is not None:
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
raise HTTPError(
422, reason="Study %s doesn't exist" % study_id)
self.finish()
artifact_ids = [a.id for a in study.artifacts()
if a.visibility == 'public']
else:
try:
artifact = Artifact(int(artifact_id))
except QiitaDBUnknownIDError:
raise HTTPError(
422, reason="Artifact %s doesn't exist" % artifact_id)
self.finish()
if artifact.visibility != 'public':
raise HTTPError(
422, reason="Artifact %s is not public" % artifact_id)
self.finish()
study = artifact.study
if study is None:
raise HTTPError(422, reason="Artifact %s doesn't belong to "
"a study" % artifact_id)
self.finish()
artifact_ids = [artifact.id]
if study.status != 'public':
raise HTTPError(
422, reason='Not a public study')
self.finish()
study_info = study.info
study_info['study_id'] = study.id
study_info['study_title'] = study.title
study_info['shared_with'] = [s.id for s in study.shared_with]
study_info['status'] = study.status
study_info['ebi_study_accession'] = study.ebi_study_accession
study_info['ebi_submission_status'] = study.ebi_submission_status
# Clean up StudyPerson objects to string for display
email = '<a href="mailto:{email}">{name} ({affiliation})</a>'
pi = study.info['principal_investigator']
study_info['principal_investigator'] = email.format(**{
'name': pi.name,
'email': pi.email,
'affiliation': pi.affiliation})
study_info['owner'] = study.owner.id
# Add needed info that is not part of the initial info pull
study_info['publications'] = []
for pub, is_doi in study.publications:
if is_doi:
study_info['publications'].append(pubmed_linkifier([pub]))
else:
study_info['publications'].append(doi_linkifier([pub]))
study_info['publications'] = ', '.join(study_info['publications'])
if study_info['ebi_study_accession']:
links = ''.join([EBI_LINKIFIER.format(a) for a in study_info[
'ebi_study_accession'].split(',')])
study_info['ebi_study_accession'] = '%s (%s)' % (
links, study_info['ebi_submission_status'])
self.render("public.html", study_info=study_info,
artifacts_info=get_artifacts_information(
artifact_ids, False))
|
<gh_stars>0
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import loader
import json
import logging
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Max
from django.db import transaction
from website.models import Browser_setting
logger = logging.getLogger('mylogger')
# 系统配置-浏览器设置
def browser_setting(request):
template = loader.get_template('website/pages/browserSetting.html')
return HttpResponse(template.render({}, request))
# 获取系统配置-浏览器配置表,列表数据
def get_browser_settings(request):
griddata = {"total": 0, "rows": []}
rows = [] # 用于存储记录行
# 获取总记录数
envs = Browser_setting.objects.all().order_by('-order').values()
griddata["total"] = len(envs)
page_num = request.GET.get('page') # 记录请求的是第几页数据
rows_num = request.GET.get('rows') # 记录请求每页的记录数
paginator = Paginator(envs, rows_num) # 设置每页展示的数据
try:
page = paginator.page(page_num)
except PageNotAnInteger as e: # 如果请求的页面编号不存在,返回第一页数据
logger.warn('%s' % e)
page = paginator.page(1)
except EmptyPage as e: # 如果请求页面,超出页面范围,返回最后一页数据
logger.warn('%s' % e)
page = paginator.page(paginator.num_pages)
objs = page.object_list
for obj in objs:
rows.append(obj)
griddata["rows"] = rows
griddata = json.dumps(griddata)
return HttpResponse(griddata)
# 新增浏览器配置
def add_browser_setting(request):
try:
params = request.POST
browser_name = params['browser']
order = params['order']
if not browser_name:
return HttpResponse('浏览器名称不能为空')
if_name_exists = Browser_setting.objects.filter(browser=browser_name).exists()
if if_name_exists:
# logger.error('error, 环境名称(%s)已存在' % browser_name)
return HttpResponse('环境名称(%s)已存在' % browser_name)
if order == '': # 如果顺序为空,表明是新增
all_objects = Browser_setting.objects.all()
if all_objects.exists():
max_order = all_objects.aggregate(Max('order'))['order__max']
order = max_order + 1
else:
order = 1
obj = Browser_setting(browser = browser_name, order=order)
obj.save()
else: #表明是插入
logger.info('即将插入新记录,正在调整记录的顺序') # 插入记录所在行上方的记录都+1
all_objects = Browser_setting.objects.filter(order__gte=order)
try:
with transaction.atomic():
for item in all_objects:
item.order = item.order + 1
item.save()
obj = Browser_setting(browser = browser_name, order=order)
obj.save()
except Exception as e:
logger.error('%s' % e)
return HttpResponse('%s' % e)
return HttpResponse('success')
except Exception as e:
return HttpResponse('%s' % e)
# 系统设置-浏览器配置,修改浏览器名称
def edit_browser_setting(request):
try:
params = request.POST
id = params['id']
name = params['browser']
if not name:
return HttpResponse('浏览器名称不能为空')
if_name_exists = Browser_setting.objects.filter(browser=name).exclude(id=id).exists()
if if_name_exists:
# logger.error('error, 浏览器名称(%s)已存在' % name)
return HttpResponse('浏览器名称(%s)已存在' % name)
obj = Browser_setting.objects.get(id=id)
obj.browser = name
obj.save()
return HttpResponse('success')
except Exception as e:
logger.error('%s' % e)
return HttpResponse('%s' % e)
|
<gh_stars>10-100
import json
from rest_framework import authentication
from django.http import *
from rest_framework.authentication import *
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import *
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from api.serializers import RequestSerializer
from app.models import CustomUser, Message, Vehicle, VehicleSharing, Request, Follow, Profile
from broadcast.models import *
from .serializers import UserSerializer,BroadcastSerializer,MessageSerializer,VehicleSerializer, \
VehicleSharingSerializer, VehicleSerializerAdd, VehicleSharingSerializerAdd
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from rest_framework.authtoken.models import Token
class UserList(APIView):
def get(self,request):
users = CustomUser.objects.all()
serializer = UserSerializer(users,many=True)
return Response(serializer.data)
class UserDetail(APIView):
authentication_classes = (BasicAuthentication,TokenAuthentication)
permission_classes = [IsAuthenticated]
def get(self,request):
user = request.user.username
data = CustomUser.objects.get(username=user)
serializer = UserSerializer(data)
print(request.user)
return JsonResponse(serializer.data)
class BroadcastList(APIView):
def get(self,request):
broadcasts = Broadcast.objects.select_subclasses()
serializer = BroadcastSerializer(broadcasts,many=True)
return Response(serializer.data)
class MessageList(APIView):
def get(self,request):
messages = Message.objects.all()
serializer = MessageSerializer(messages,many=True)
return Response(serializer.data)
class Login(APIView):
def post(self,request):
data = str(request.POST['json'])
dd = json.loads(data)
email = dd['email']
password = dd['password']
user = authenticate(email=email, password=password)
if user is not None:
token = Token.objects.get_or_create(user=user)
print(token[0])
login(request, user)
data = {
'message': 'valid',
'token': str(token[0])
}
else:
data = {
'message': 'invalid'
}
return JsonResponse(data)
@api_view(['POST'])
@csrf_exempt
def process_login(request):
data = str(request.POST['json'])
dd = json.loads(data)
username = dd['username']
password = dd['password']
user = authenticate(username=username,password=password)
if user is not None:
token = Token.objects.get_or_create(user=user)
print(token[0])
login(request,user)
data = {
'message':'valid',
'token':str(token[0])
}
else:
data = {
'message':'invalid'
}
return JsonResponse(data)
class DashStuff(APIView):
authentication_classes = (BasicAuthentication,TokenAuthentication)
permission_classes = [IsAuthenticated]
def get(self,request):
if request.user.user_type == 'Driver':
vehicle = Vehicle.objects.filter(user=request.user).count()
request_in = Request.objects.filter(ride__user=request.user).count()
# notifications = Notification.objects.filter(user=request.user).order_by('pk').reverse()
vehicle_share = VehicleSharing.objects.filter(user=request.user).count()
followers = Follow.objects.filter(followee=request.user).count();
data = {
'vehicles':vehicle,
'ride_requests':request_in,
'shared_rides':vehicle_share,
'followers':followers,
}
return JsonResponse(data)
@api_view(['POST'])
@authentication_classes((BasicAuthentication,TokenAuthentication))
@permission_classes((IsAuthenticated,))
@csrf_exempt
def addride(request):
print(request.user)
params = request.POST['data']
params = json.loads(params)
params['user'] = request.user.id
serializer = VehicleSerializerAdd(data=params)
result = "0"
if serializer.is_valid():
result = "1"
serializer.save()
print(serializer.errors)
else:
result = "-1"
return HttpResponse(str(serializer.error_messages))
class UserVehicles(APIView):
authentication_classes = (SessionAuthentication, BasicAuthentication,TokenAuthentication)
permission_classes = [IsAuthenticated]
def get(self,request):
user = request.user
data = Vehicle.objects.filter(user=user)
serializer = VehicleSerializer(data,many=True)
return Response(serializer.data)
def delete(self,request):
params = request.data
params = json.loads(params['data'])
ride_id = params['id']
vehicle = Vehicle.objects.get(pk=ride_id)
result = 0
if vehicle:
if vehicle.user == request.user:
vehicle.delete()
result = 1
else:
result = -1
return HttpResponse(result)
class UserSharedVehicles(APIView):
authentication_classes = (SessionAuthentication, BasicAuthentication, TokenAuthentication)
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
data = VehicleSharing.objects.filter(user=user).order_by('date').reverse()
serializer = VehicleSharingSerializer(data, many=True)
return Response(serializer.data)
def post(self,request):
params = request.POST['data']
params = json.loads(params)
print(params)
serializer = VehicleSharingSerializerAdd(data=params)
if serializer.is_valid():
serializer.save()
print(serializer.errors)
else:
print(serializer.errors)
return HttpResponse('OK')
class Requests(APIView):
authentication_classes = (SessionAuthentication, BasicAuthentication, TokenAuthentication)
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
user = CustomUser.objects.get(pk=request.user.id)
print(request.GET)
if int(request.GET['id']) > 0:
ride = VehicleSharing.objects.get(pk=request.GET['id'])
pass_requests = Request.objects.filter(ride__user=user,ride=ride).order_by('reg_date').reverse()
else:
pass_requests = Request.objects.filter(ride__user=user).order_by('reg_date').reverse()
serializer = RequestSerializer(pass_requests, many=True,context={'request': request})
return Response(serializer.data)
#
# def process_login(request):
# """
# Default handler to login user
# :param request: HttpRequest
# """
# code = request.GET.get('code', '')
# json_header = {'content-type': 'application/json'}
# token_url = 'https://%s/oauth/token' % settings.AUTH0_DOMAIN
#
# token_payload = {
# 'client_id': settings.AUTH0_CLIENT_ID,
# 'client_secret': settings.AUTH0_SECRET,
# 'redirect_uri': settings.AUTH0_CALLBACK_URL,
# 'code': code,
# 'grant_type': 'authorization_code'
# }
#
# token_info = requests.post(token_url,
# data=json.dumps(token_payload),
# headers=json_header).json()
#
# url = 'https://%s/userinfo?access_token=%s'
# user_url = url % (settings.AUTH0_DOMAIN, token_info['access_token'])
# user_info = requests.get(user_url).json()
#
# # We're saving all user information into the session
# request.session['profile'] = user_info
# json_data = user_info
#
# try:
# user = authenticate(**user_info)
# except:
# return redirect('app:login')
#
#
# if user.last_login is None:
# createuser(user,json_data)
#
# if user:
# login(request, user)
#
# return redirect(settings.AUTH0_SUCCESS_URL)
#
# return HttpResponse(status=400)
#
#
# def createuser(user,data):
# # person = CustomUser.objects.get(user=user)
# print('this will handle a new login')
# print(user.last_login)
#
# the_user = CustomUser.objects.get(pk=user.id)
# try:
# the_user.full_name = data['name']
# the_user.short_name = data['given_name']
# the_user.user_type = 'Passenger'
# the_user.sex = str(data['gender']).capitalize()
# the_user.address = data['location']['name']
#
# except:
# pass
# the_user.save()
#
# url = 'http://localhost:8000/api/upload/'
# image = requests.get(data['picture_large']).content
# files = {'file': image}
# payload = {'name':data['context']['id']}
# r = requests.post(url,files=files,data=payload)
#
# profile = Profile()
# profile.user = user
# profile.picture.name = r.content
#
# profile.save()
#
#
# from django.views.decorators.csrf import csrf_exempt
# @csrf_exempt
# def upload_file(request):
# if request.method == 'POST':
# file = request.FILES['file']
# name = request.POST['name']
# destination = open(settings.BASE_DIR+settings.MEDIA_URL+name, 'wb+')
# for chunk in file.chunks():
# destination.write(chunk)
#
#
# return HttpResponse(name,content_type="text/plain")
|
import re
import tempfile
import pandas as pd
import camelot
import pandas as pd
import requests
import us
import textract
from can_tools.scrapers.official.base import StateDashboard
from can_tools.scrapers import variables, CMU
from typing import Any, Dict
class FloridaCountyVaccine(StateDashboard):
has_location = False
source = "https://floridahealthcovid19.gov/#latest-stats"
location_type = "county"
state_fips = int(us.states.lookup("Florida").fips)
fetch_url = "http://ww11.doh.state.fl.us/comm/_partners/covid19_report_archive/vaccine/vaccine_report_latest.pdf"
source_name = "Florida Department of Health"
variables = {
"total_people_vaccinated": variables.INITIATING_VACCINATIONS_ALL,
"series_complete_total": variables.FULLY_VACCINATED_ALL,
}
def fetch(self) -> camelot.core.TableList:
return camelot.read_pdf(self.fetch_url, pages="2-end", flavor="stream")
def normalize(self, data: Any) -> pd.DataFrame:
# locate where the table starts and parse data
# loop is needed for if table overflows onto second page
dfs = []
for chunk in data:
df = chunk.df
header_loc = df.index[df.iloc[:, 0] == "County of residence"].values[0]
df = df.iloc[header_loc + 2 :, :].reset_index(drop=True)
dfs.append(df)
# combine and set colnames
df = pd.concat(dfs)
df.columns = [
"location_name",
"first_dose_new",
"series_complete_new",
"total_people_vaccinated_new",
"first_dose_total",
"series_complete_total",
"total_people_vaccinated",
]
df = self._rename_or_add_date_and_location(
data=df,
location_name_column="location_name",
location_names_to_drop=["Unknown", "Out-Of-State", "Total"],
location_names_to_replace={"Desoto": "DeSoto", "Dade": "Miami-Dade"},
date=self._get_date(),
)
out = self._reshape_variables(data=df, variable_map=self.variables)
return out
def _get_date(self):
"""
retrieve the date that the PDF was last updated minus one day, return as date.
if connection to source cannot be made, use yesterday's date.
"""
res = requests.get(self.fetch_url)
# if the connection fails, use yesterday's date as date
if not res.ok:
dt = self._retrieve_dtm1d("US/Eastern")
else:
dt = pd.to_datetime(
res.headers["Last-Modified"], format="%a, %d %b %Y %H:%M:%S GMT"
) - pd.Timedelta(days=1)
return dt.date()
class FloridaCountyVaccineDemographics(FloridaCountyVaccine):
variables = {
"series_complete_total": variables.FULLY_VACCINATED_ALL,
"total_people_vaccinated_total": variables.INITIATING_VACCINATIONS_ALL,
}
fetch_url_for_counties = (
"http://ww11.doh.state.fl.us/comm/_partners/covid19_report_archive/vaccine-county"
"/vaccine_county_report_latest.pdf"
)
def fetch(self) -> Dict[str, camelot.core.TableList]:
county_names = []
results = requests.get(self.fetch_url_for_counties)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = "{}/vaccine_county_report_latest.pdf".format(tmp_dir)
with open(tmp_file, "wb") as f:
f.write(results.content)
pdf_pages_headers = textract.process(tmp_file)
county_names = re.findall(
r"COVID-19: (?P<countyName>.*?) vaccine summary",
pdf_pages_headers.decode("utf-8"),
)
county_names = [x.replace(" County", "") for x in county_names]
county_demographics_data = camelot.read_pdf(
self.fetch_url_for_counties, pages="1-end", flavor="stream", row_tol=9
)
return {
"county_demographics_data": county_demographics_data,
"county_names": county_names,
"headers": pdf_pages_headers,
}
def normalize(self, data):
dfs = []
if "county_demographics_data" in data:
for dataset, name in zip(
data["county_demographics_data"], data["county_names"]
):
df = dataset.df
dfs.append(self._truncate_demographics_age_data(df, name))
dfs.append(self._truncate_demographics_race_data(df, name))
dfs.append(self._truncate_demographics_sex_data(df, name))
dfs.append(self._truncate_demographics_etn_data(df, name))
out = (
pd.concat(dfs, axis=0, ignore_index=True)
.dropna()
.drop(["first_dose_total"], axis="columns")
.melt(
id_vars=["location_name", "age", "race", "ethnicity", "sex"],
)
.pipe(
self.extract_CMU,
skip_columns=["age", "race", "ethnicity", "sex"],
cmu=self.variables,
)
.assign(
dt=self._get_date(),
location_type="county",
vintage=self._retrieve_vintage(),
value=lambda x: x["value"].str.replace(",", "").astype(int),
)
.replace({"location_name": {"Desoto": "DeSoto", "Dade": "Miami-Dade"}})
)
out.loc[out["location_name"] == "Florida", "location_type"] = "state"
return out
def _truncate_demographics_age_data(self, data, county_name):
out = data.copy()
out.columns = [
"location_name",
"age",
"first_dose_total",
"series_complete_total",
"total_people_vaccinated_total",
]
out.loc[:, "location_name"] = county_name
startIndex = out.query("age == 'Age group'").index[0] + 1
result = out[startIndex : startIndex + 9]
result["race"] = result["ethnicity"] = result["sex"] = "all"
age_replace = {
"12-14 years": "12-14",
"15-24 years": "15-24",
"25-34 years": "25-34",
"35-44 years": "35-44",
"45-54 years": "45-54",
"55-64 years": "55-64",
"65-74 years": "65-74",
"75-84 years": "75-84",
"85+ years": "85_plus",
"Age Unknown": "unknown",
}
result["age"] = result["age"].map(age_replace)
return result
def _truncate_demographics_race_data(self, data, county_name):
out = data.copy()
out.columns = [
"location_name",
"race",
"first_dose_total",
"series_complete_total",
"total_people_vaccinated_total",
]
out.loc[:, "location_name"] = county_name
startIndex = out.query("race == 'Race'").index[0] + 1
result = out[startIndex : startIndex + 6]
result.drop(result[result.race == ""].index, inplace=True)
result["age"] = result["ethnicity"] = result["sex"] = "all"
race_replace = {
"American Indian/Alaskan": "ai_an",
"Unknown": "unknown",
"Black": "black",
"Other": "other",
"White": "white",
}
result["race"] = result["race"].map(race_replace)
return result
def _truncate_demographics_sex_data(self, data, county_name):
out = data.copy()
out.columns = [
"location_name",
"sex",
"first_dose_total",
"series_complete_total",
"total_people_vaccinated_total",
]
out.loc[:, "location_name"] = county_name
startIndex = out.query("sex == 'Gender'").index[0] + 1
result = out[startIndex : startIndex + 3]
result["age"] = result["ethnicity"] = result["race"] = "all"
gender_replace = {
"Female": "female",
"Male": "male",
"Unknown": "unknown",
}
result["sex"] = result["sex"].map(gender_replace)
return result
def _truncate_demographics_etn_data(self, data, county_name):
out = data.copy()
out.columns = [
"location_name",
"ethnicity",
"first_dose_total",
"series_complete_total",
"total_people_vaccinated_total",
]
out.loc[:, "location_name"] = county_name
startIndex = out.query("ethnicity == 'Ethnicity'").index[0] + 1
result = out[startIndex : startIndex + 3]
result["age"] = result["sex"] = result["race"] = "all"
ethnicity_replace = {
"Hispanic": "hispanic",
"Non-Hispanic": "non-hispanic",
"Unknown": "unknown",
}
result["ethnicity"] = result["ethnicity"].map(ethnicity_replace)
return result
|
<filename>src/Modules/Computer/Mqtt/mqtt_xml.py
"""
@name: PyHouse/src/Modules/Computer/Mqtt/mqtt_xml.py
@author: <NAME>
@contact: <EMAIL>
@copyright: (c) 2015-2016 by <NAME>
@license: MIT License
@note: Created on Jun 4, 2015
@Summary:
"""
# Import system type stuff
import xml.etree.ElementTree as ET
# Import PyMh files
from Modules.Core.data_objects import MqttBrokerData
from Modules.Computer import logging_pyh as Logger
from Modules.Utilities.xml_tools import PutGetXML, XmlConfigTools
LOG = Logger.getLogger('PyHouse.Mqtt_Xml ')
DIVISION = 'ComputerDivision'
SECTION = 'MqttSection'
BROKER = 'Broker'
class Xml(object):
@staticmethod
def _read_one_broker(p_xml):
"""
@param p_xml: XML information for one Broker.
@return: an IrrigationZone object filled in with data from the XML passed in
"""
l_obj = MqttBrokerData()
try:
XmlConfigTools.read_base_object_xml(l_obj, p_xml)
l_obj.BrokerAddress = PutGetXML.get_text_from_xml(p_xml, 'BrokerAddress')
l_obj.BrokerPort = PutGetXML.get_int_from_xml(p_xml, 'BrokerPort')
l_obj.UserName = PutGetXML.get_text_from_xml(p_xml, 'BrokerUser')
l_obj.Password = PutGetXML.get_text_from_xml(p_xml, 'BrokerPassword')
except Exception:
pass
return l_obj
@staticmethod
def read_mqtt_xml(p_pyhouse_obj):
"""Read all the broker information.
Allow for several brokers.
@return: a dict of broker objects keys = 0, 1, 2...
"""
l_dict = {}
l_count = 0
try:
l_section = p_pyhouse_obj.Xml.XmlRoot.find(DIVISION)
if l_section == None:
return l_dict
l_section = l_section.find(SECTION)
if l_section == None:
return l_dict
except AttributeError as e_err:
LOG.error('Reading MQTT Configuration information - {}'.format(e_err))
l_section = None
try:
for l_xml in l_section.iterfind(BROKER):
l_broker = Xml._read_one_broker(l_xml)
l_broker.Key = l_count
l_dict[l_count] = l_broker
l_count += 1
except AttributeError as e_err:
LOG.error('Mqtt Errors: {}'.format(e_err))
return l_dict
@staticmethod
def _write_one_broker(p_mqtt):
"""
@param p_obj: is one broker object.
@return: the XML for one Broker System
"""
l_entry = XmlConfigTools.write_base_object_xml('Broker', p_mqtt)
PutGetXML().put_int_element(l_entry, 'BrokerAddress', p_mqtt.BrokerAddress)
PutGetXML().put_int_element(l_entry, 'BrokerPort', p_mqtt.BrokerPort)
PutGetXML().put_text_element(l_entry, 'BrokerUser', p_mqtt.UserName)
PutGetXML().put_text_element(l_entry, 'BrokerPassword', p_mqtt.Password)
return l_entry
def write_mqtt_xml(self, p_obj):
"""
@param p_obj: is the Mqtt sub-object in p_pyhouse_obj
@return: XML for the MqttSection
"""
l_count = 0
l_xml = ET.Element(SECTION)
if p_obj == {}:
LOG.info('No MQTT congig to write.')
return l_xml
try:
for l_obj in p_obj.itervalues():
l_sys = Xml._write_one_broker(l_obj)
l_xml.append(l_sys)
l_count += 1
except AttributeError as e_err:
LOG.error('Writing MQTT XML {}'.format(e_err))
return l_xml
LOG.info('Wrote {} Mqyy XML entries'.format(l_count))
return l_xml
# ## END DBK
|
# -*- coding: utf-8 -*-
# 1 2 3 4 5 6 7 |
# 23456789012345678901234567890123456789012345678901234567890123456789012
#
# sim-C-on-Cu-w-Img.py
# jrm 2015-07-08 - use mc3 to simulate C on Cu
import sys
sys.packageManager.makeJavaPackage("gov.nist.microanalysis.NISTMonte.Gen3", "CharacteristicXRayGeneration3, BremsstrahlungXRayGeneration3, FluorescenceXRayGeneration3, XRayTransport3", None)
import gov.nist.microanalysis.EPQLibrary as epq
import gov.nist.microanalysis.EPQLibrary.Detector as epd
import gov.nist.microanalysis.NISTMonte as nm
import gov.nist.microanalysis.NISTMonte.Gen3 as nm3
import gov.nist.microanalysis.EPQTools as et
import dtsa2.mcSimulate3 as mc3
import dtsa2.jmGen as jmg
import java.util as jutil
import java.io as jio
import java.nio.charset as cs
import os
import shutil
def ensureDir(d):
"""ensureDir(d)
Check if the directory, d, exists, and if not create it."""
if not os.path.exists(d):
os.makedirs(d)
tNmC = 25.0 # nm of C on Cu
nTraj = 10000 # num Traj to run per pt 250 for a long run
charF = True # include characteristic fluorescence
bremF = True # include continuum fluorescence
pc = 2.5 # nA
lt = 100.0 # sec
e0 = 7.0 # keV
imgSize = 512 # pixel size for images
imgSzUm = 1.0 # image size in microns
vmrlEl = 40 # number of el for VMRL
dose = pc * lt # nA sec
gitDir = os.environ['GIT_HOME']
relPrj = "/dtsa2Scripts/sim-C-on-Cu-w-Img"
datDir = gitDir + relPrj + "/msa"
csvDir = gitDir + relPrj + "/csv"
simDir = gitDir + relPrj + "/"
ensureDir(simDir)
ensureDir(datDir)
wd = gitDir + relPrj + "/py"
os.chdir(wd)
pyrDir = wd + "/sim-C-on-Cu-w-Img Results"
det = findDetector("Oxford p4 05eV 2K")
print(det)
# start clean
DataManager.clearSpectrumList()
# create the materials
c = epq.Material(epq.Composition([epq.Element.C], [1.0],"C"), epq.ToSI.gPerCC(2.25))
cu = epq.Material(epq.Composition([epq.Element.Cu],[1.0],"Cu"), epq.ToSI.gPerCC(8.96))
# define the desired transitions
xrts=mc3.suggestTransitions("CCu")
# set up the extra parameters
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts, charAccum=charF, charFluorAccum=charF, bremFluorAccum=bremF))
# note that the image size on the specimen is in meters...
xtraParams.update(mc3.configureEmissionImages(xrts, imgSzUm*1.0e-6, imgSize))
xtraParams.update(mc3.configurePhiRhoZ(imgSzUm*1.0e-6))
xtraParams.update(mc3.configureTrajectoryImage(imgSzUm*1.0e-6, imgSize))
xtraParams.update(mc3.configureVRML(nElectrons = vmrlEl))
xtraParams.update(mc3.configureOutput(simDir))
print(xtraParams)
# treat the substrate as 10 um C
sLay = [[c, tNmC/1.0e9], [cu, 10.0/1.0e6]]
sSpc = mc3.multiFilm(sLay, det, e0=e0, withPoisson=True, nTraj=nTraj, dose=dose, sf=charF, bf=bremF, xtraParams=xtraParams)
display(sSpc)
# clean up cruft
shutil.rmtree(pyrDir)
print "Done!"
|
"""
Module that extracts product values from pyccd results.
All functions assume the proleptic Gregorian ordinal,
where January 1 of year 1 has ordinal 1.
"""
import numpy as np
from datetime import date
def lastchange(pyccd_result, ordinal):
"""
Number of days since last detected change.
Defaults to 0 in cases where the given ordinal day to calculate from
is either < 1 or no change was detected before it.
Args:
pyccd_result: dict return from pyccd
ordinal: ordinal day to calculate from
Returns:
int
"""
ret = 0
if ordinal > 0:
break_dates = []
for segment in pyccd_result['change_models']:
if segment['change_probability'] == 1:
break_dates.append(segment['break_day'])
diff = [(ordinal - d) for d in break_dates if (ordinal - d) > 0]
if diff:
ret = min(diff)
return ret
def changemag(pyccd_result, ordinal):
"""
The magnitude of change if it occurred in the same calendar year.
Defaults to 0 in cases where the given ordinal day to calculate from
is either < 1 or no change was detected in the same year.
Args:
pyccd_result: dict return from pyccd
ordinal: ordinal day to calculate from
Returns:
int
"""
ret = 0
if ordinal > 0:
query_date = date.fromordinal(ordinal)
for segment in pyccd_result['change_models']:
break_date = date.fromordinal(segment['break_day'])
if (query_date.year == break_date.year) and (segment['change_probability'] == 1):
magnitudes = [pyccd_result[b]['magnitude'] for b
in ('nir', 'swir1', 'swir2', 'green', 'red')]
ret = np.linalg.norm(magnitudes)
break
return ret
def changedate(pyccd_result, ordinal):
"""
The day of year of change if it occurred in the same calendar year.
Defaults to 0 in cases where the given ordinal day to calculate from
is either < 1 or no change was detected in the same year.
Args:
pyccd_result: dict return from pyccd
ordinal: ordinal day to calculate from
Returns:
int
"""
ret = 0
if ordinal > 0:
query_date = date.fromordinal(ordinal)
for segment in pyccd_result['change_models']:
break_date = date.fromordinal(segment['break_day'])
if (query_date.year == break_date.year) and segment['change_probability'] == 1:
ret = break_date.timetuple().tm_yday
break
return ret
def seglength(pyccd_result, ordinal, series_start):
"""
The number of days since the beginning of the segment that the ordinal
intersects with. The days between and around segments identified through
the change detection process comprise valid segments for this. This why we
need to know when the actual start ordinal, as the segments identified
through change detection might not include it.
Defaults to 0 in cases where the given ordinal day to calculate from
is either < 1 or is before the start of the time series.
Args:
pyccd_result: dict return from pyccd
ordinal: ordinal day to calculate from
series_start: ordinal day when the change detection was started from
Returns:
int
"""
ret = 0
if ordinal > 0:
all_dates = [series_start]
for segment in pyccd_result['change_models']:
all_dates.append(segment['start_day'])
all_dates.append(segment['end_day'])
diff = [(ordinal - d) for d in all_dates if (ordinal - d) > 0]
if diff:
ret = min(diff)
return ret
def curveqa(pyccd_result, ordinal):
"""
Curve fit information for the segment in which the ordinal intersects with.
Defaults to 0 in cases where the given ordinal day to calculate from
is either < 1 or it does not intersect with a segment identified in
pyccd.
Args:
pyccd_result: dict return from pyccd
ordinal: ordinal day to calculate from
Returns:
int
"""
ret = 0
if ordinal > 0:
for segment in pyccd_result['change_models']:
if segment['start_day'] <= ordinal <= segment['end_day']:
ret = segment['curve_qa']
break
return ret |
import cv2
import numpy as np
def find_lane_pixels(image):
histogram = np.sum(image[image.shape[0] // 2:, :], axis=0)
out_img = np.dstack((image, image, image)) * 255
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
margin = 100
minpix = 50
window_height = np.int(image.shape[0] // nwindows)
nonzero = image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = image.shape[0] - (window + 1) * window_height
win_y_high = image.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 4)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 4)
# Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
# Fit a poly to perform a directed search in well known areas
def fit_poly(img_shape, leftx, lefty, rightx, righty):
# Fit a second order polynomial to each with np.polyfit()
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calc both polynomials using ploty, left_fit and right_fit
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def search_around_poly(image):
margin = 100
# Grab activated pixels
nonzero = image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx, lefty, rightx, righty, out_img = find_lane_pixels(image)
if ((len(leftx) == 0) or (len(rightx) == 0) or (len(righty) == 0) or (len(lefty) == 0)):
out_img = np.dstack((image, image, image)) * 255
left_curverad = 0
right_curverad = 0
else:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
### Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[
2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[
2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = fit_poly(image.shape, leftx, lefty, rightx, righty)
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 650 # meters per pixel in x dimension
# Calculate the curvature
left_fit_cr = np.polyfit(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)
y_eval = np.max(ploty)
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (
2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((image, image, image)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate and draw a poly to illustrate the lane area
left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
points = np.hstack((left, right))
out_img = cv2.fillPoly(out_img, np.int_(points), (0, 200, 255))
return out_img, left_curverad, right_curverad |
from turtle import *
import math
w = Screen()
w.tracer(0)
siz = 20
def turset(t) : t.ht() ; t.color("black") ; t.pu()
def multitask() : w.update()
def A(x=0,y=0,t = Turtle(),tt = Turtle(),ttt = Turtle(),z = siz) :
T = (t,tt,ttt)
for _ in T : turset(_)
t.goto(x,y+z) ; t.setheading(t.towards(x+(z/2),y-z))
tt.goto(x-(z/2),y-z) ; tt.setheading(tt.towards(x,y+z))
ttt.goto(x+(z/4),y) ; ttt.setheading(ttt.towards(x-(z/4),y))
for _ in T : _.pd()
for _ in range(2*z) :
if _ == 0 or _ == z : t.fd(2.5*z/50)
t.fd(1)
tt.fd(1)
if _ <= 7*z/8 : ttt.fd(1)
multitask()
def I(x=0,y=0,t = Turtle(),tt = Turtle(),ttt = Turtle(),z = siz) :
T = (t,tt,ttt)
for _ in T : turset(_)
t.goto(x,y+z) ; t.setheading(-90)
tt.goto(x-(z/2),y+z) ; tt.setheading(0)
ttt.goto(x+(z/2),y-z); ttt.setheading(180)
for _ in T : _.pd()
for _ in range(2*z):
t.fd(1)
if _ < z : tt.fd(1) ; ttt.fd(1)
multitask()
def L(x=0,y=0,t = Turtle(),tt = Turtle(),z = siz) :
T = (t,tt)
for _ in T : turset(_)
t.goto(x-(z/2),y+z) ; t.setheading(-90)
tt.goto(x+(z/2),y-z) ; tt.setheading(180)
for _ in T : _.pd()
for _ in range(2*z):
t.fd(1)
if _ < z : tt.fd(1)
multitask()
def N(x=0,y=0,t = Turtle(),tt = Turtle(),ttt = Turtle(),z = siz) :
T = (t,tt,ttt)
for _ in T : turset(_)
t.goto(x-(z/2),y+z) ; t.setheading(t.towards(x-(z/2),y-z))
tt.goto(x+(z/2),y-z) ; tt.setheading(tt.towards(x+(z/2),y+z))
ttt.goto(x+(z/2),y-z) ; ttt.setheading(ttt.towards(x-(z/2),y+z))
for _ in T : _.pd()
for _ in range(2*z) :
#if _ == 0 or _ == z : t.fd(2.5*z/50)
t.fd(1)
tt.fd(1)
if math.sin(math.radians(ttt.towards(x-(z/2),y+z)))*_ <= 2*z :
ttt.fd(1.12)
multitask()
def Q(x=0,y=0,t = Turtle(),tt = Turtle(),z = siz) :
T = (t,tt)
ang = 45
exten = z/10
for _ in T : turset(_)
t.goto(x,y) ; t.setheading(-ang)
tt.goto(x,y-z) ; tt.setheading(0)
for _ in T : _.pd()
for _ in range(360):
if _ < 360*z*math.sin(math.radians(ang)) : t.fd(z/360) ; t.fd(siz/1000)
tt.circle(z,1)
if _%5 == 0 : multitask()
def R(x=0,y=0,t = Turtle(),tt = Turtle(),ttt = Turtle(),z = siz) :
T = (t,tt,ttt)
for _ in T : turset(_)
t.goto(x,y+z) ; t.setheading(-90)
tt.goto(x,y+z) ; tt.setheading(0)
ttt.goto(x,y) ; ttt.setheading(ttt.towards(x+(3*z/4),y-z))
for _ in T : _.pd()
for _ in range(360) :
t.fd(z/180)
if _ > 90 and _ < 270 : tt.circle(-z/2,1)
else : tt.fd(z/180)
if math.sin(math.radians(ttt.towards(x+(3*z/4),y-z)))*_ <= z :
ttt.fd(z/180)
if _%5 == 0 : multitask()
def T(x=0,y=0,t = Turtle(),tt = Turtle(),z = siz) :
T = (t,tt)
for _ in T : turset(_)
t.goto(x,y+z) ; t.setheading(-90)
tt.goto(x-(z/2),y+z) ; tt.setheading(0)
for _ in T : _.pd()
for _ in range(2*z):
t.fd(1)
if _ < z : tt.fd(1)
multitask()
def U(x=0,y=0,t = Turtle(),z = siz) :
turset(t)
t.goto(x-(z/2),y+z) ; t.setheading(-90)
t.pd()
zl = z/2
for _ in range(int(z+zl)):
t.fd(1)
if _%2 == 0 : multitask()
for _ in range(180) :
t.circle(zl,1)
if _%5 == 0 : multitask()
for _ in range(int(z+zl)):
t.fd(1)
if _%2 == 0 : multitask()
def Y(x=0,y=0,t = Turtle(),tt = Turtle(),z = siz) :
T = (t,tt)
for _ in T : turset(_)
t.goto(x,y+z) ; t.setheading(t.towards(x+z/2,y))
tt.goto(x+z,y+z) ; tt.setheading(tt.towards(x+z/2,y))
for _ in T : _.pd()
for _ in range(int(9*z/8)):
t.fd(1)
tt.fd(2)
out = [ T , R , Y , A , L , I , N , Q , U ]
__ = - 50
for _ in out : _(__) ; __ += 45
|
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from pathlib import Path
from _pytest.logging import LogCaptureFixture
from snooze.parser import SnoozeMatch, SnoozeParser
def test__file_ext() -> None:
assert SnoozeParser._file_ext(Path("foo/bar.py")) == "py"
assert SnoozeParser._file_ext(Path("foo")) is None
def test__mk_snooze_regex() -> None:
regex = SnoozeParser._mk_snooze_regex("py")
assert regex.match(" # snooze: 1900-01-01")
assert regex.match("print('foo') # snooze: 1900-01-01")
assert not regex.match("dummy")
assert not regex.match("# dummy")
assert not regex.match(" snooze: 1900-01-01")
def test__matches_in_file(test_resources: Path) -> None:
hw_py = test_resources / "hello_world.py"
matches = list(
SnoozeParser._matches_in_file(
hw_py, now=datetime(year=1900, month=1, day=2), root=test_resources
)
)
assert matches == [
SnoozeMatch(
path=Path("hello_world.py"),
line='print("Hello, world!") # snooze: 1900-01-01',
line_nb=1,
time=datetime(1900, 1, 1, 0, 0),
)
]
matches = list(
SnoozeParser._matches_in_file(
hw_py, now=datetime(year=1900, month=1, day=1), root=test_resources
)
)
assert matches == [
SnoozeMatch(
path=Path("hello_world.py"),
line='print("Hello, world!") # snooze: 1900-01-01',
line_nb=1,
time=datetime(1900, 1, 1, 0, 0),
)
]
matches = list(
SnoozeParser._matches_in_file(
hw_py, now=datetime(year=1899, month=12, day=31), root=test_resources
)
)
assert len(matches) == 0
def test__try_parse_time(caplog: LogCaptureFixture) -> None:
regex = SnoozeParser._mk_snooze_regex("py")
time = SnoozeParser._try_parse_time(regex=regex, line="", path=Path("/dummy"), i=42)
assert time is None
time = SnoozeParser._try_parse_time(
regex=regex, line="foo # snooze: 1900-01-01", path=Path("/dummy"), i=42
)
assert time == datetime(1900, 1, 1)
# with pytest.warns(Warning, match=r"Could not parse time \"1900-33-33\""):
with caplog.at_level(logging.WARNING):
time = SnoozeParser._try_parse_time(
regex=regex, line="foo # snooze: 1900-33-33", path=Path("/dummy.py"), i=42
)
assert time is None
assert caplog.text.strip().startswith("WARNING root:parser.py:")
caplog.text.strip().endswith(
'Could not parse time "1900-33-33" in /dummy.py:42'
)
def test_search_all_files(test_resources: Path) -> None:
matches = list(
SnoozeParser.search_all_files(
test_resources, now=datetime(year=1900, month=1, day=1)
)
)
assert sorted(matches) == sorted(
[
SnoozeMatch(
path=Path("hello_world.py"),
line='print("Hello, world!") # snooze: 1900-01-01',
line_nb=1,
time=datetime(1900, 1, 1, 0, 0),
),
SnoozeMatch(
path=Path("hello_world.c"),
line=' printf("Hello, World!"); // snooze: 1900-01-01',
line_nb=3,
time=datetime(1900, 1, 1, 0, 0),
),
SnoozeMatch(
path=Path("hello_world.js"),
line='console.log("Hello, World!"); // snooze: 1900-01-01',
line_nb=1,
time=datetime(1900, 1, 1, 0, 0),
),
SnoozeMatch(
path=Path("hello_world.cpp"),
line=' std::cout << "Hello World!"; // snooze: 1900-01-01',
line_nb=3,
time=datetime(1900, 1, 1, 0, 0),
),
SnoozeMatch(
path=Path("hello_world.java"),
line=' System.out.println("Hello, World!"); // snooze: 1900-01-01',
line_nb=3,
time=datetime(1900, 1, 1, 0, 0),
),
]
)
def test_sort() -> None:
matches = [
SnoozeMatch(
path=Path("/path/2"), line="line1", line_nb=1, time=datetime(1900, 1, 1)
),
SnoozeMatch(
path=Path("/path/1"), line="line2", line_nb=2, time=datetime(1900, 1, 1)
),
SnoozeMatch(
path=Path("/path/1"), line="line1", line_nb=1, time=datetime(1900, 1, 1)
),
]
sorted_matches = sorted(matches)
assert [(m.path, m.line) for m in sorted_matches] == [
(Path("/path/1"), "line1"),
(Path("/path/1"), "line2"),
(Path("/path/2"), "line1"),
]
|
import math
from pyspark import SparkConf, SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel
def para_set(training_RDD,validation_for_predict_RDD):
res_rank = []
iterations = 5
seed = 5L
regularization_parameter = 0.1
for rank in range(1,10):
model = ALS.train(training_RDD, rank=rank, seed=seed, iterations=iterations, lambda_=regularization_parameter)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
print 'For rank %s the RMSE is %s' % (rank, error)
res_rank.append((rank,error))
best_rank = sorted(res_rank,key=lambda x:x[1])[0][0]
res_iteration = []
for iterations in range(2,20):
model = ALS.train(training_RDD, rank=best_rank, seed=seed, iterations=iterations, lambda_=regularization_parameter)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
print 'For iteration %s the RMSE is %s' % (iterations, error)
res_iteration.append((iterations,error))
best_iteration = sorted(res_iteration,key=lambda x:x[1])[0][0]
res_lambda = []
for lambda_ in [i/10.0 for i in range(1,90)]:
model = ALS.train(training_RDD, rank=best_rank, seed=seed, iterations=best_iteration, lambda_=lambda_)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
print 'For lambda %s the RMSE is %s' % (lambda_, error)
res_lambda.append((lambda_,error))
best_lambda = sorted(res_lambda,key=lambda x:x[1])[0][0]
print best_iteration,best_rank,best_lambda
return best_iteration,best_rank,best_lambda,res_rank,res_iteration,res_lambda
conf = SparkConf().setAppName("LYCA").set("spark.executor.memory", "8g")
sc = SparkContext(conf=conf)
# Load the complete dataset file
data = sc.textFile("data.csv")
header = data.take(1)[0]
# Parse
data = data.filter(lambda line: line!=header).map(lambda line: line.split(",")).map(lambda tokens: (tokens[0],tokens[1],tokens[2]))
training_RDD, validation_RDD, test_RDD= data.randomSplit([7, 2, 1], seed=0L)
validation_for_predict_RDD = validation_RDD.map(lambda x: (x[0], x[1]))
test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1]))
iteration,rank,lambda_ = para_set(training_RDD,validation_for_predict_RDD)
model = ALS.train(training_RDD, rank=rank, seed=5L, iterations=iteration, lambda_=lambda_)
predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
model.save(sc, "CollaborativeFilter")
#sameModel = MatrixFactorizationModel.load(sc, "/tmp/data/myCollaborativeFilter")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fontforge
import psMat
import os
import sys
import math
import glob
from datetime import datetime
ASCENT = 1600
DESCENT = 400
SOURCE = os.getenv("JULIAMONO_SB2_SOURCE_FONTS_PATH", "./sourceFonts")
LICENSE = open("./LICENSE.txt").read()
COPYRIGHT = "Copyright (c) 2021, <NAME> (<EMAIL>)"
VERSION = "0.0.5"
FAMILY = "JuliaMono_Sb2"
fonts = [
{
"family": FAMILY,
"name": FAMILY + "-Regular",
"filename": FAMILY + "-Regular.ttf",
"weight": 400,
"weight_name": "Regular",
"style_name": "Regular",
"juliamono": "JuliaMono_wo_lg-Regular.ttf",
"mgen_plus": "mgenplus-1m-regular.ttf",
"hack_weight_reduce": 0,
"mgen_weight_add": 0,
"italic": False,
},
{
"family": FAMILY,
"name": FAMILY + "-RegularItalic",
"filename": FAMILY + "-RegularItalic.ttf",
"weight": 400,
"weight_name": "Regular",
"style_name": "Italic",
"juliamono": "JuliaMono_wo_lg-RegularItalic.ttf",
"mgen_plus": "mgenplus-1m-regular.ttf",
"hack_weight_reduce": 0,
"mgen_weight_add": 0,
"italic": True,
},
{
"family": FAMILY,
"name": FAMILY + "-Bold",
"filename": FAMILY + "-Bold.ttf",
"weight": 700,
"weight_name": "Bold",
"style_name": "Bold",
"juliamono": "JuliaMono_wo_lg-Bold.ttf",
"mgen_plus": "mgenplus-1m-bold.ttf",
"hack_weight_reduce": 0,
"mgen_weight_add": 0,
"italic": False,
},
{
"family": FAMILY,
"name": FAMILY + "-BoldItalic",
"filename": FAMILY + "-BoldItalic.ttf",
"weight": 700,
"weight_name": "Bold",
"style_name": "Bold Italic",
"juliamono": "JuliaMono_wo_lg-BoldItalic.ttf",
"mgen_plus": "mgenplus-1m-bold.ttf",
"hack_weight_reduce": 0,
"mgen_weight_add": 0,
"italic": True,
},
]
def log(_str):
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(now + " " + _str)
def remove_glyph_from_juliamono(_font):
"""Rounded Mgen+を採用したいグリフをJuliaMonoから削除"""
glyphs = (
list(range(0x3001, 0x3040))
+ list(range(0xFE10, 0xFE49))
+ list(range(0xFF01, 0xFF66))
+ list(range(0xFFE0, 0xFFE8))
)
for g in glyphs:
_font.selection.select(g)
_font.clear()
return _font
def remove_glyph_from_mgenplus(_font):
"""JuliaMonoを採用したいグリフをmgenplusから削除"""
glyphs = (
list(range(0x0000, 0x2E52))
+ list(range(0x1D538, 0x1D7FF))
+ list(range(0x110000, 0x110254))
)
for g in glyphs:
_font.selection.select(g)
_font.clear()
return _font
def check_files():
err = 0
for f in fonts:
if not os.path.isfile(os.path.join(SOURCE, f.get("juliamono"))):
log("%s not exists." % f)
err = 1
if not os.path.isfile(os.path.join(SOURCE, f.get("mgen_plus"))):
log("%s not exists." % f)
err = 1
if err > 0:
sys.exit(err)
def set_os2_values(_font, _info, _org):
weight = _info.get("weight")
style_name = _info.get("style_name")
_font.os2_weight = weight
_font.os2_width = _org.os2_width
_font.os2_fstype = _org.os2_fstype
if style_name == "Regular":
_font.os2_stylemap = 64
elif style_name == "Bold":
_font.os2_stylemap = 32
elif style_name == "Italic":
_font.os2_stylemap = 1
elif style_name == "Bold Italic":
_font.os2_stylemap = 33
_font.os2_vendor = _org.os2_vendor # 'TMNM'
_font.os2_version = _org.os2_version # 1
_font.os2_winascent = _org.os2_winascent # ASCENT
_font.os2_winascent_add = _org.os2_winascent_add # False
_font.os2_windescent = _org.os2_windescent # DESCENT
_font.os2_windescent_add = _org.os2_windescent_add # False
_font.os2_typoascent = _org.os2_typoascent # -150
_font.os2_typoascent_add = _org.os2_typoascent_add # True
_font.os2_typodescent = _org.os2_typodescent # 100
_font.os2_typodescent_add = _org.os2_typodescent_add # True
_font.os2_typolinegap = _org.os2_typolinegap # 0
_font.hhea_ascent = -150
_font.hhea_ascent_add = True
_font.hhea_descent = 100
_font.hhea_descent_add = True
_font.hhea_linegap = 0
_font.os2_panose = (
_org.os2_panose
) # (2, 11, int(weight / 100), 9, 2, 2, 3, 2, 2, 7)
return _font
def align_to_center(_g):
width = 0
if _g.width > 1600:
width = 2400
else:
width = 1200
_g.width = width
_g.left_side_bearing = _g.right_side_bearing = (
_g.left_side_bearing + _g.right_side_bearing
) / 2
_g.width = width
return _g
def align_to_left(_g):
width = _g.width
_g.left_side_bearing = 0
_g.width = width
def align_to_right(_g):
width = _g.width
bb = _g.boundingBox()
left = width - (bb[2] - bb[0])
_g.left_side_bearing = left
_g.width = width
def reiwa(_f, _weight):
reiwa = fontforge.open(os.path.join(SOURCE, "reiwa_Sb2.sfd"))
if _weight == "Bold":
reiwa.close()
reiwa = fontforge.open(os.path.join(SOURCE, "reiwa_Sb2-Bold.sfd"))
for g in reiwa.glyphs():
if g.isWorthOutputting:
# g.transform((1.82,0,0,1.82,0,0))
g = align_to_center(g)
reiwa.selection.select(0x00)
reiwa.copy()
_f.selection.select(0x32FF)
_f.paste()
reiwa.close()
return _f
def fix_overflow(glyph):
"""上が1600を超えている、または下が-400を超えているグリフを
2000x2400の枠にはまるように修正する
※全角のグリフのみに実施する
"""
if glyph.width < 2400:
return glyph
if glyph.isWorthOutputting:
bb = glyph.boundingBox()
height = bb[3] - bb[1]
if height > 2000:
# resize
scale = 2000 / height
glyph.transform(psMat.scale(scale, scale))
bb = glyph.boundingBox()
bottom = bb[1]
top = bb[3]
if bottom < -400:
glyph.transform(psMat.translate(0, -400 - bottom))
elif top > 1600:
glyph.transform(psMat.translate(0, 1600 - top))
return glyph
def build_font(_f):
juliamono = fontforge.open(os.path.join(SOURCE, _f.get("juliamono")))
mgenplus = fontforge.open(os.path.join(SOURCE, _f.get("mgen_plus")))
log("remove_glyph_from_mgenplus()")
mgenplus = remove_glyph_from_mgenplus(mgenplus)
if _f.get("mgen_weight_add") != 0:
for g in mgenplus.glyphs():
# g.changeWeight(_f.get('mgen_weight_add'), 'auto', 0, 0, 'auto')
g.stroke(
"caligraphic",
_f.get("mgen_weight_add"),
_f.get("mgen_weight_add"),
45,
"removeinternal",
)
# g.stroke("circular", _f.get('mgen_weight_add'), 'butt', 'round', 'removeinternal')
ignoring_center = [
0x3001,
0x3002,
0x3008,
0x3009,
0x300A,
0x300B,
0x300C,
0x300D,
0x300E,
0x300F,
0x3010,
0x3011,
0x3014,
0x3015,
0x3016,
0x3017,
0x3018,
0x3019,
0x301A,
0x301B,
0x301D,
0x301E,
0x3099,
0x309A,
0x309B,
0x309C,
]
FULLWIDTH_LEFT_BRACKETS = (0xFF08, 0xFF3B, 0xFF5B, 0xFF5F)
FULLWIDTH_RIGHT_BRACKETS = (0xFF09, 0xFF3D, 0xFF5D, 0xFF60)
log("transform Mgen+")
for g in mgenplus.glyphs():
g.transform((1.82, 0, 0, 1.82, 0, 0))
full_half_threshold = 1600
if _f.get("italic"):
g.transform(psMat.skew(0.25))
skew_amount = g.font.ascent * 0.91 * 0.25
g.width = g.width + skew_amount
full_half_threshold += skew_amount
if g.width > full_half_threshold:
width = 2400
else:
width = 1200
g.transform(psMat.translate((width - g.width) / 2, 0))
g.width = width
if g.encoding in ignoring_center:
pass
else:
g = align_to_center(g)
if g.encoding in FULLWIDTH_LEFT_BRACKETS:
# 全角左カッコ→右寄せ
width = g.width # 1200
# g.right_side_bearing = g.right_side_bearing - 300
g.transform(psMat.translate(500, 0))
g.width = width
elif g.encoding in FULLWIDTH_RIGHT_BRACKETS:
# 全角右カッコ→左寄せ
width = g.width # 1200
# g.left_side_bearing = g.left_side_bearing - 300
g.transform(psMat.translate(-500, 0))
g.width = width
log("modify border glyphs")
for g in mgenplus.glyphs():
if g.isWorthOutputting:
if _f.get("italic"):
g.transform(psMat.skew(0.25))
if g.encoding >= 0x2500 and g.encoding <= 0x257F:
# 全角の罫線を0xf0000以降に退避
mgenplus.selection.select(g.encoding)
mgenplus.copy()
mgenplus.selection.select(g.encoding + 0xF0000)
mgenplus.paste()
mgenplus.selection.select(g.encoding)
mgenplus.copy()
try:
juliamono.selection.select(g.encoding)
juliamono.paste()
except Exception as ex:
log("WARN: " + str(ex))
juliamono = reiwa(juliamono, _f.get("weight_name"))
log("fix_overflow()")
for g in juliamono.glyphs():
g = fix_overflow(g)
juliamono.ascent = ASCENT
juliamono.descent = DESCENT
juliamono.upos = 45
juliamono.fontname = _f.get("family")
juliamono.familyname = _f.get("family")
juliamono.fullname = _f.get("name")
juliamono.weight = _f.get("weight_name")
# juliamono = set_os2_values(juliamono, _f, juliamono)
juliamono.appendSFNTName(0x411, 0, COPYRIGHT)
juliamono.appendSFNTName(0x411, 1, _f.get("family"))
juliamono.appendSFNTName(0x411, 2, _f.get("style_name"))
# juliamono.appendSFNTName(0x411,3, "")
juliamono.appendSFNTName(0x411, 4, _f.get("name"))
juliamono.appendSFNTName(0x411, 5, "Version " + VERSION)
juliamono.appendSFNTName(0x411, 6, _f.get("family") + "-" + _f.get("weight_name"))
# juliamono.appendSFNTName(0x411,7, "")
# juliamono.appendSFNTName(0x411,8, "")
# juliamono.appendSFNTName(0x411,9, "")
# juliamono.appendSFNTName(0x411,10, "")
# juliamono.appendSFNTName(0x411,11, "")
# juliamono.appendSFNTName(0x411,12, "")
juliamono.appendSFNTName(0x411, 13, LICENSE)
# juliamono.appendSFNTName(0x411,14, "")
# juliamono.appendSFNTName(0x411,15, "")
juliamono.appendSFNTName(0x411, 16, _f.get("family"))
juliamono.appendSFNTName(0x411, 17, _f.get("style_name"))
juliamono.appendSFNTName(0x409, 0, COPYRIGHT)
juliamono.appendSFNTName(0x409, 1, _f.get("family"))
juliamono.appendSFNTName(0x409, 2, _f.get("style_name"))
juliamono.appendSFNTName(
0x409, 3, VERSION + ";" + _f.get("family") + "-" + _f.get("style_name")
)
juliamono.appendSFNTName(0x409, 4, _f.get("name"))
juliamono.appendSFNTName(0x409, 5, "Version " + VERSION)
juliamono.appendSFNTName(0x409, 6, _f.get("name"))
# juliamono.appendSFNTName(0x409,7, "")
# juliamono.appendSFNTName(0x409,8, "")
# juliamono.appendSFNTName(0x409,9, "")
# juliamono.appendSFNTName(0x409,10, "")
# juliamono.appendSFNTName(0x409,11, "")
# juliamono.appendSFNTName(0x409,12, "")
juliamono.appendSFNTName(0x409, 13, LICENSE)
# juliamono.appendSFNTName(0x409,14, "")
# juliamono.appendSFNTName(0x409,15, "")
juliamono.appendSFNTName(0x409, 16, _f.get("family"))
juliamono.appendSFNTName(0x409, 17, _f.get("style_name"))
fontpath = "./dist/%s" % _f.get("filename")
juliamono.generate(fontpath)
mgenplus.close()
juliamono.close()
def main():
check_files()
for _f in fonts:
log("Started: dist/" + _f["filename"])
build_font(_f)
log("Finished: dist/" + _f["filename"])
log("")
if __name__ == "__main__":
main()
|
import sys
sys.path.append('/home/ggoyal/code/yarp/build/lib/python3')
import yarp
import numpy as np
import cv2
import experimenting
import event_library as el
import torch
from os.path import join
from experimenting.utils.visualization import plot_skeleton_2d, plot_skeleton_3d, plot_skeleton_2d_lined
from experimenting.utils.skeleton_helpers import Skeleton
from experimenting.dataset.factory import Joints3DConstructor, BaseDataFactory, SimpleReadConstructor, \
MinimalConstructor
from experimenting.utils import utilities
from experimenting import utils
import matplotlib.pyplot as plt
# setting the directories and variables
# Depends on the input camera!
class GlHpeModule(yarp.RFModule):
def __init__(self):
yarp.RFModule.__init__(self)
self.input_port = yarp.BufferedPortImageMono()
self.output_port = yarp.Port()
self.counter = 0
self.image_w = 400 # Size of image expected from the framer.
self.image_h = 300 #
# self.np_input = np.ones((self.image_h, self.image_w), dtype=np.uint8)
self.yarp_image = yarp.ImageMono()
self.yarp_image_out = yarp.ImageRgb()
self.datadir = "/media/ggoyal/Shared/data/dhp19_sample/"
self.ch_idx = 3
self.P_mat_dir = join(self.datadir, 'P_matrices/')
self.checkpoint_path = "/media/ggoyal/Shared/data/checkpoint_dhp19"
self.resultsPath = join(self.datadir, 'outputs/')
self.image_w_model = 346 # Size of the image expected by the model
self.image_h_model = 260 #
self.output_w = 640 # Size of the image expected by the model
self.output_h = 480 #
self.model = None
self.read_image = None
def configure(self, rf):
# Initialise YARP
yarp.Network.init()
if not yarp.Network.checkNetwork(2):
print("Could not find network! Run yarpserver and try again.")
exit(-1)
# set the module name used to name ports
self.setName((rf.check("name", yarp.Value("/glHpeModule")).asString()))
# open io ports
if not self.input_port.open(self.getName() + "/img:i"):
print("Could not open input port")
return False
if not self.output_port.open(self.getName() + "/img:o"):
print("Could not open output port")
return False
#
# read flags and parameters
self.dhpcore = experimenting.dataset.DHP19Core('test', data_dir=join(self.datadir,
'time_count_dataset/movements_per_frame'), \
joints_dir=join(self.datadir,
"time_count_dataset/labels_full_joints/"), \
hm_dir="", labels_dir="", preload_dir="", n_joints=13,
n_classes=33, \
partition='cross-subject', n_channels=1, cams=[1, 3],
movements=None, test_subjects=[6, 7])
# example_flag = rf.check("example_flag") and rf.check("example_flag", yarp.Value(True)).asBool()
# default_value = 0.1
# example_parameter = rf.check("example_parameter", yarp.Value(default_value)).asDouble()
#
# # do any other set-up required here
self.model = utilities.load_model(self.checkpoint_path, "MargiposeEstimator",
core=self.dhpcore).eval().double()
if self.ch_idx == 0:
self.P_mat_cam = np.load(join(self.P_mat_dir, 'P1.npy'))
elif self.ch_idx == 3:
self.P_mat_cam = np.load(join(self.P_mat_dir, 'P2.npy'))
elif self.ch_idx == 2:
self.P_mat_cam = np.load(join(self.P_mat_dir, 'P3.npy'))
elif self.ch_idx == 1:
self.P_mat_cam = np.load(join(self.P_mat_dir, 'P4.npy'))
self.extrinsics_matrix, self.camera_matrix = utils.decompose_projection_matrix(self.P_mat_cam)
return True
def getPeriod(self):
return 0 # period of synchronous thread, return 0 update module called as fast as it can
def interruptModule(self):
# interrupting all the ports
self.input_port.interrupt()
return True
def close(self):
# closing ports
self.input_port.close()
cv2.destroyAllWindows()
return True
def updateModule(self):
# synchronous update called every get period seconds.
print("Press space at the image window to end the program.")
# Preparing input and output image buffers
np_input = np.ones((self.image_h, self.image_w), dtype=np.uint8)
self.yarp_image.resize(self.image_w, self.image_h)
self.yarp_image.setExternal(np_input.data, np_input.shape[1], np_input.shape[0])
np_output = np.ones((self.output_h, self.output_w, 3), dtype=np.uint8)
self.yarp_image_out.resize(self.output_w, self.output_h)
self.yarp_image_out.setExternal(np_output.data, np_output.shape[1], np_output.shape[0])
# Read the image
read_image = self.input_port.read()
self.counter += 1 # can be used to interrupt the program
self.yarp_image.copy(read_image)
input_image = np.copy(np_input[:self.image_h_model, :self.image_w_model]) / 255.0
if len(input_image.shape) == 2:
input_image = np.expand_dims(input_image, -1)
input_image = np.expand_dims(input_image, 0)
# Predict the pose
torch_image = torch.from_numpy(input_image)
preds, outs = self.model(torch_image.permute(0, -1, 1, 2))
pred_sk = Skeleton(preds[0].detach().numpy()).denormalize(260, 346,
camera=torch.tensor(self.camera_matrix)). \
reproject_onto_world(torch.tensor(self.extrinsics_matrix))
pred_joints = pred_sk.get_2d_points(260, 346, p_mat=torch.tensor(self.P_mat_cam))
# Obtain the 2D prediction as an image
fig2D = plot_skeleton_2d_lined(input_image[0].squeeze(), pred_joints, return_figure=True)
fig2D.canvas.draw()
img = np.fromstring(fig2D.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(fig2D.canvas.get_width_height()[::-1] + (3,))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Visualize the result
cv2.imshow("output", img)
print(img.shape)
k = cv2.waitKey(10)
# cv2.imwrite(os.path.join(self.resultsPath,'input_'+str(self.counter),'.png'), input_image)
# cv2.imwrite(os.path.join(self.resultsPath,'output_2D_'+str(self.counter),'.png'), img)
np_output[:,:] = img
# self.output_port.write(self.yarp_image_out)
if k == 32:
return False
return True
if __name__ == '__main__':
# prepare and configure the resource finder
rf = yarp.ResourceFinder()
rf.setVerbose(False)
rf.setDefaultContext("event-driven")
# rf.setDefaultConfigFile("exampleModule.ini")
rf.configure(sys.argv)
# create the module
module = GlHpeModule()
module.runModule(rf)
|
<reponame>prculley/GeoFinder
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019. <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.ttk import *
from geofinder import CachedDictionary, AppStyle
from geofinder import TKHelper as Widge
BUTTON_WIDTH = 6
# tags to aletrnate colors in list box
odd_tag = ('odd',)
even_tag = ('even',)
class ListboxFrame:
"""
ListboxFrame - GUI to display and remove lines from a Cached Dictionary
Displays a scrolling list box of data with read/write from a CachedDictionary
Allows user to delete items from list
Defines overall grid layout for derived classes
"""
def __init__(self, frame, title, dir_name, cache_filename):
self.logger = logging.getLogger(__name__)
# Col, Row, padx, pady
self.grd = {"title_label": [0, 0, 5, 5, "EW"],
"listbox": [0, 1, 5, 5, "EW"], "scrollbar": [1, 1, 0, 5, "WNS"],
"status": [0, 2, 5, 5, "EW"], "load_button": [2, 2, 5, 5, "W"], "remove_button": [2, 2, 5, 5, "W"],
"pad": [0, 3, 5, 5, "EW"],
"add_label": [0, 4, 5, 5, "EW"], "add_button": [2, 4, 5, 5, "W"],
"add_entry": [0, 5, 5, 5, "EW"], "listbox_all_countries": [0, 5, 5, 5, "EW"], "scrollbar2": [1, 5, 0, 5, "WNS"],
"country_label": [0, 5, 5, 5, "EW"],
"country_entry": [0, 7, 5, 5, "W"], "country_button": [2, 6, 5, 5, "W"],
"country_label2": [0, 6, 5, 5, "EW"],
"country_entry2": [0, 9, 5, 5, "W"],
"country_label3": [0, 8, 5, 5, "EW"],
}
self.title = title
self.frame = frame
self.default = []
self.separator = " :: "
self.dirty_flag = False # Flag to track if data was modified
self.odd = False
self.title_label = Widge.CLabel(self.frame, text=self.title, width=80, style='Info.TLabel')
self.status = Widge.CLabel(self.frame, text="Highlight items above to remove and click Remove.", width=80, style='Info.TLabel')
self.scrollbar = Scrollbar(self.frame)
self.tree = ttk.Treeview(self.frame, style="Plain.Treeview") # , selectmode="browse")
self.tree.tag_configure('odd', background=AppStyle.ODD_ROW_COLOR)
self.tree.tag_configure('even', background='white')
self.tree["columns"] = ("pre",)
self.tree.column("#0", width=400, minwidth=100, stretch=tk.NO)
self.tree.column("pre", width=500, minwidth=50, stretch=tk.NO)
self.tree.heading("#0", text="Name", anchor=tk.W)
self.tree.heading("pre", text=" ", anchor=tk.W)
self.tree.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.tree.yview)
self.remove_button = ttk.Button(self.frame, text="remove", command=self.delete_handler, width=BUTTON_WIDTH)
# Load in list from cache file
self.directory = dir_name
self.cache = CachedDictionary.CachedDictionary(dir_name, cache_filename)
self.cache.read()
self.dict = self.cache.dict
self.logger.debug(f'{self.title}')
# Configure buttons and widgets
self.configure_widgets(frame)
# Display data
self.load_handler()
def list_insert(self, tree, col1, col2):
self.odd = not self.odd
if self.odd:
tag = odd_tag
else:
tag = even_tag
tree.insert(parent='', index="end", iid=None, text=col1, values=(col2,), tags=tag)
def clear_display_list(self, tree):
self.odd = False
for row in tree.get_children():
tree.delete(row)
def load_handler(self):
# Load in list and display
self.clear_display_list(self.tree)
for item in sorted(self.dict):
if len(self.dict[item]) > 1:
self.list_insert(self.tree, f"{item}", f"{self.dict[item]}")
else:
self.list_insert(self.tree, f"{item}", '')
def set_default(self, lst):
self.default = lst
def load_defaults(self):
for item in self.default:
self.dict[item] = "" # Add item to dict
self.logger.debug(f'add {item}')
self.add_handler()
def delete_handler(self):
# Delete selected items in list
self.delete_items(self.tree, self.dict)
self.dirty_flag = True
def add_handler(self):
# add item to list
self.load_handler() # Reload listbox with new data
self.dirty_flag = True
"""
def get_list_selection(self):
# Get the items the user selected in list (tree)
col1 = (self.tree.item(self.tree.selection(), "text"))
col2 = (self.tree.item(self.tree.selection())['values'][0])
return f'{prefix}, {loc}'
"""
def delete_items(self, tree, dct):
# Delete any items in the list that the user selected
items = tree.selection()
for line in items:
col1 = self.tree.item(line, "text")
# col2 =
self.logger.debug(f'DEL {col1}')
dct.pop(col1, None)
self.load_handler() # Reload display
def is_dirty(self):
return self.dirty_flag # Tells whether the cache was modified
def configure_widgets(self, frm):
Widge.TKHelper.set_grid_position(self.title_label, "title_label", grd=self.grd)
Widge.TKHelper.set_grid_position(self.status, "status", grd=self.grd)
Widge.TKHelper.set_grid_position(self.tree, "listbox", grd=self.grd)
Widge.TKHelper.set_grid_position(self.scrollbar, "scrollbar", grd=self.grd)
Widge.TKHelper.set_grid_position(self.remove_button, "remove_button", grd=self.grd)
def write(self):
# Write out cache file
self.cache.write()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.