input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
import argparse
import logging
import math
import sys
import time
import random
import os
import json
import numpy as np
import pickle
import six
import threading
import pdb
from tqdm import tqdm
import torch
import torch.nn as nn
from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer
import data_handler as dh
from model.mmseq2seq_model import MMSeq2SeqModel
from model.multimodal_encoder import MMEncoder
from model.lstm_encoder import LSTMEncoder
from model.hlstm_encoder import HLSTMEncoder
from model.hlstm_decoder import HLSTMDecoder
def fetch_batch(dh, data, index, separate_caption, result):
result.append(dh.make_batch(data, index, separate_caption=separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his))
# Evaluation routine
def evaluate(model, data, indices, parallel=False):
start_time = time.time()
eval_loss = 0.
eval_num_words = 0
model.eval()
with torch.no_grad():
# fetch the first batch
batch = [dh.make_batch(data, indices[0], separate_caption=args.separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his)]
# evaluation loop
it = tqdm(six.moves.range(len(indices)), desc="evaluation", ncols=0)
for j in it:
#if args.separate_caption:
# x_batch, h_batch, q_batch, a_batch_in, a_batch_out, c_batch = batch.pop()
#else:
# x_batch, h_batch, q_batch, a_batch_in, a_batch_out = batch.pop()
b = batch.pop()
if j < len(indices)-1:
prefetch = threading.Thread(target=fetch_batch,
args=([dh, data, indices[j+1], args.separate_caption, batch]))
prefetch.start()
# propagate for training
x = [torch.from_numpy(x) for x in b[0]]
if args.concat_his:
h = [torch.from_numpy(h_i) for h_i in b[1]]
else:
h = [[torch.from_numpy(h) for h in hb] for hb in b[1]]
q = [torch.from_numpy(q) for q in b[2]]
ai = [torch.from_numpy(ai) for ai in b[3]]
ao = [torch.from_numpy(ao) for ao in b[4]]
if args.separate_caption:
c = [torch.from_numpy(c) for c in b[5]]
else:
c = None
if args.pretrained_elmo or args.pretrained_bert:
if args.pretrained_all:
context_q, context_h, context_ai = b[-3:]
else:
context_q = b[-1]
context_h = None
context_ai = None
else:
context_q = None
context_h = None
context_ai = None
if args.exclude_video:
x = None
if parallel:
_, _, loss = model.module.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
else:
_, _, loss = model.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
num_words = sum([len(s) for s in ao])
eval_loss += loss.cpu().data.numpy() * num_words
eval_num_words += num_words
prefetch.join()
model.train()
wall_time = time.time() - start_time
return math.exp(eval_loss/eval_num_words), wall_time
##################################
# main
if __name__ =="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=0, type=int,
help='GPU ID (negative value indicates CPU)')
# train, dev and test data
parser.add_argument('--vocabfile', default='', type=str,
help='Vocabulary file (.json)')
parser.add_argument('--dictmap', default='', type=str,
help='Dict id-map file (.json)')
parser.add_argument('--fea-type', nargs='+', type=str,
help='Image feature files (.pkl)')
parser.add_argument('--train-path', default='', type=str,
help='Path to training feature files')
parser.add_argument('--train-set', default='', type=str,
help='Filename of train data')
parser.add_argument('--valid-path', default='', type=str,
help='Path to validation feature files')
parser.add_argument('--valid-set', default='', type=str,
help='Filename of validation data')
parser.add_argument('--include-caption', action='store_true',
help='Include caption in the history')
parser.add_argument('--separate-caption', default=False, type=bool,
help='')
parser.add_argument('--exclude-video', action='store_true',
help='')
parser.add_argument('--pretrained-word-emb', default=None, type=str,
help='')
parser.add_argument('--pretrained-weights', default=None, type=str,
help='')
parser.add_argument('--pretrained-elmo', default=False, type=int,
help='')
parser.add_argument('--elmo-num-outputs', default=1, type=int,
help='')
parser.add_argument('--finetune-elmo', default=False, type=int,
help='')
parser.add_argument('--pretrained-bert', default=False, type=int,
help='')
parser.add_argument('--bert-model', default='bert-base-uncased', type=str,
help='')
parser.add_argument('--finetune-bert', default=False, type=int,
help='')
parser.add_argument('--add-word-emb', default=True, type=int,
help='')
parser.add_argument('--pretrained-all', default=True, type=int,
help='')
parser.add_argument('--concat-his', default=False, type=int,
help='')
# model parameters
parser.add_argument('--model', '-m', default='', type=str,
help='Attention model to be output')
# multimodal encoder parameters
parser.add_argument('--enc-psize', '-p', nargs='+', type=int,
help='Number of projection layer units')
parser.add_argument('--enc-hsize', '-u', nargs='+', type=int,
help='Number of hidden units')
parser.add_argument('--att-size', '-a', default=100, type=int,
help='Number of attention layer units')
parser.add_argument('--mout-size', default=100, type=int,
help='Number of output layer units')
parser.add_argument('--mm-att', default='baseline', type=str,
help="")
parser.add_argument('--mm-fusioning', default='baseline', type=str,
help="")
parser.add_argument('--mm-att-hops', default=1, type=int,
help='')
parser.add_argument('--caption-mm-att', action='store_true',
help='')
# input (question/caption) encoder parameters
parser.add_argument('--embed-size', default=200, type=int,
help='Word embedding size')
parser.add_argument('--in-enc-layers', default=2, type=int,
help='Number of input encoder layers')
parser.add_argument('--in-enc-hsize', default=200, type=int,
help='Number of input encoder hidden layer units')
parser.add_argument('--q-att', default=None, type=str,
help='')
parser.add_argument('--c-att', default=None, type=str,
help='')
parser.add_argument('--rnn-type', default='lstm', type=str,
help='')
parser.add_argument('--caption-states-att', default=False, type=bool,
help='')
# history (QA pairs) encoder parameters
parser.add_argument('--hist-enc-layers', nargs='+', type=int,
help='Number of history encoder layers')
parser.add_argument('--hist-enc-hsize', default=200, type=int,
help='History embedding size')
parser.add_argument('--hist-out-size', default=200, type=int,
help='History embedding size')
parser.add_argument('--ft-fusioning', default='baseline', type=str,
help='Fusioning fetures between images and text')
parser.add_argument('--caption-mm-fusion-out-size', default=-1, type=int,
help='')
# response (answer) decoder parameters
parser.add_argument('--dec-layers', default=2, type=int,
help='Number of decoder layers')
parser.add_argument('--dec-psize', '-P', default=200, type=int,
help='Number of decoder projection layer units')
parser.add_argument('--dec-hsize', '-d', default=200, type=int,
help='Number of decoder hidden layer units')
parser.add_argument('--classifier', default='baseline', type=str,
help='')
# Training conditions
parser.add_argument('--optimizer', '-o', default='AdaDelta', type=str,
choices=['SGD', 'Adam', 'AdaDelta', 'RMSprop'],
help="optimizer")
parser.add_argument('--rand-seed', '-s', default=1, type=int,
help="seed for generating random numbers")
parser.add_argument('--batch-size', '-b', default=20, type=int,
help='Batch size in training')
parser.add_argument('--num-epochs', '-e', default=15, type=int,
help='Number of epochs')
parser.add_argument('--max-length', default=20, type=int,
help='Maximum length for controling batch size')
parser.add_argument('--n-batches', default=-1, type=int,
help='Number of batches in training')
parser.add_argument('--weight-decay', default=0, type=float,
help='')
parser.add_argument('--lr-scheduler', action='store_true',
help='')
parser.add_argument('--lr', default=-1, type=float,
help='')
# others
parser.add_argument('--verbose', '-v', default=0, type=int,
help='verbose level')
args = parser.parse_args()
args.pretrained_elmo = bool(args.pretrained_elmo)
args.finetune_elmo = bool(args.finetune_elmo)
args.pretrained_bert = bool(args.pretrained_bert)
args.finetune_bert = bool(args.finetune_bert)
args.add_word_emb = bool(args.add_word_emb)
args.pretrained_all = bool(args.pretrained_all)
random.seed(args.rand_seed)
np.random.seed(args.rand_seed)
if args.dictmap != '':
dictmap = json.load(open(args.dictmap, 'r'))
else:
dictmap = None
if args.verbose >= 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s')
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
# get vocabulary
if args.pretrained_bert:
if 'bert' in args.bert_model:
bert_tokenizer = BertTokenizer.from_pretrained(args.bert_model)
elif 'openai-gpt' in args.bert_model:
bert_tokenizer = OpenAIGPTTokenizer.from_pretrained(args.bert_model)
elif 'gpt2' in args.bert_model:
bert_tokenizer = GPT2Tokenizer.from_pretrained(args.bert_model)
elif 'transfo-xl' in args.bert_model:
bert_tokenizer = TransfoXLTokenizer.from_pretrained(args.bert_model)
else:
bert_tokenizer = None
logging.info('Extracting words from ' + args.train_set)
vocab = dh.get_vocabulary(args.train_set, include_caption=args.include_caption, tokenizer=bert_tokenizer)
if args.pretrained_word_emb is not None and 'none' not in args.pretrained_word_emb:
pretrained_word_emb = dh.get_word_emb(vocab, args.pretrained_word_emb)
else:
pretrained_word_emb = None
# load data
logging.info('Loading training data from ' + args.train_set)
train_data = dh.load(args.fea_type, args.train_path, args.train_set,
vocabfile=args.vocabfile,
include_caption=args.include_caption, separate_caption=args.separate_caption,
vocab=vocab, dictmap=dictmap,
pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert,
bert_model=args.bert_model, tokenizer=bert_tokenizer,
pretrained_all=args.pretrained_all, concat_his=args.concat_his)
logging.info('Loading validation data from ' + args.valid_set)
valid_data = dh.load(args.fea_type, args.valid_path, args.valid_set,
vocabfile=args.vocabfile,
include_caption=args.include_caption, separate_caption=args.separate_caption,
vocab=vocab, dictmap=dictmap,
pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert,
bert_model=args.bert_model, tokenizer=bert_tokenizer,
pretrained_all=args.pretrained_all, concat_his=args.concat_his)
feature_dims = dh.feature_shape(train_data)
logging.info("Detected feature dims: {}".format(feature_dims));
# Prepare RNN model and load data
caption_state_size = -1
if args.pretrained_weights:
pretrained_weights = pickle.load(open(args.pretrained_weights, 'rb'))
pretrained_conf = ('/').join(args.pretrained_weights.split('/')[:-1]) + '/avsd_model.conf'
pretrained_vocab, _ = pickle.load(open(pretrained_conf, 'rb'))
pretrained_weights = dh.align_vocab(pretrained_vocab, vocab, pretrained_weights)
else:
pretrained_weights = None
if args.q_att:
in_size_decoder = args.mout_size + args.hist_out_size + args.in_enc_hsize*2
state_size = args.in_enc_hsize*2
else:
in_size_decoder = args.mout_size + args.hist_out_size + args.in_enc_hsize
state_size = args.in_enc_hsize
if args.separate_caption:
if args.c_att == 'conv_sum':
caption_state_size = args.in_enc_hsize*2
if not args.caption_states_att:
in_size_decoder += caption_state_size
else:
caption_state_size = args.in_enc_hsize
if not args.caption_states_att:
in_size_decoder += caption_state_size
if args.exclude_video:
mm_encoder = None
in_size_decoder -= args.mout_size
else:
if args.caption_mm_att:
mm_state_size = caption_state_size
else:
mm_state_size = state_size
mm_encoder = MMEncoder(feature_dims, args.mout_size, enc_psize=args.enc_psize,
enc_hsize=args.enc_hsize, att_size=args.att_size,
state_size=mm_state_size, attention=args.mm_att, fusioning=args.mm_fusioning,
att_hops=args.mm_att_hops)
if args.ft_fusioning == 'caption_mm_nonlinear_multiply':
in_size_decoder = in_size_decoder - args.mout_size - caption_state_size + args.caption_mm_fusion_out_size
weights_init=pretrained_weights['history_encoder'] if pretrained_weights is not None else None
hlstm_encoder = HLSTMEncoder(args.hist_enc_layers[0], args.hist_enc_layers[1],
len(vocab), args.hist_out_size, args.embed_size,
args.hist_enc_hsize, rnn_type=args.rnn_type, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb, pretrained_all=args.pretrained_all,
concat_his=args.concat_his)
weights_init=pretrained_weights['input_encoder'] if pretrained_weights is not None else None
input_encoder = LSTMEncoder(args.in_enc_layers, len(vocab), args.in_enc_hsize,
args.embed_size, attention=args.q_att, rnn_type=args.rnn_type, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb)
weights_init=pretrained_weights['response_decoder'] if pretrained_weights is not None else None
hlstm_decoder = HLSTMDecoder(args.dec_layers, len(vocab), len(vocab), args.embed_size,
in_size_decoder,
args.dec_hsize, args.dec_psize,
independent=True, rnn_type=args.rnn_type,
classifier=args.classifier, states_att=args.caption_states_att, state_size=caption_state_size, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb, pretrained_all=args.pretrained_all)
if args.separate_caption:
weights_init=pretrained_weights['caption_encoder'] if pretrained_weights is not None else None
caption_encoder = LSTMEncoder(args.in_enc_layers, len(vocab), args.in_enc_hsize,
args.embed_size, attention=args.c_att, rnn_type=args.rnn_type, q_size=state_size, weights_init=weights_init)
else:
caption_encoder = None
model = MMSeq2SeqModel(mm_encoder, hlstm_encoder, input_encoder, hlstm_decoder, fusioning=args.ft_fusioning, caption_encoder=caption_encoder,
caption_states_att = args.caption_states_att, caption_mm_att = args.caption_mm_att, c_in_size=caption_state_size, mm_in_size=args.mout_size, out_size=args.caption_mm_fusion_out_size)
# report data summary
logging.info('#vocab = %d' % len(vocab))
# make batchset for training
logging.info('Making mini batches for training data')
train_indices, train_samples = dh.make_batch_indices(train_data, args.batch_size,
max_length=args.max_length, separate_caption=args.separate_caption)
logging.info('#train sample = %d' % train_samples)
logging.info('#train batch = %d' % len(train_indices))
# make batchset for validation
logging.info('Making mini batches for validation data')
valid_indices, valid_samples = dh.make_batch_indices(valid_data, args.batch_size,
max_length=args.max_length, separate_caption=args.separate_caption)
logging.info('#validation sample = %d' % valid_samples)
logging.info('#validation batch = %d' % len(valid_indices))
# copy model to gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
parallel = False
path = args.model + '.conf'
with open(path, 'wb') as f:
pickle.dump((vocab, args), f, -1)
path2 = args.model + '_params.txt'
with open(path2, "w") as f:
for arg in vars(args):
f.write("{}={}\n".format(arg, getattr(args, arg)))
# start training
logging.info('----------------')
logging.info('Start training')
logging.info('----------------')
| |
<filename>framework/generated/vulkan_generators/vulkan_referenced_resource_consumer_body_generator.py
#!/usr/bin/python3 -i
#
# Copyright (c) 2020 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, re, sys
from base_generator import *
class VulkanReferencedResourceBodyGeneratorOptions(BaseGeneratorOptions):
"""Options for generating a C++ class for detecting unreferenced resource handles in a capture file"""
def __init__(
self,
blacklists=None, # Path to JSON file listing apicalls and structs to ignore.
platformTypes=None, # Path to JSON file listing platform (WIN32, X11, etc.) defined types.
filename=None,
directory='.',
prefixText='',
protectFile=False,
protectFeature=True
):
BaseGeneratorOptions.__init__(
self, blacklists, platformTypes, filename, directory, prefixText,
protectFile, protectFeature
)
# VulkanReferencedResourceBodyGenerator - subclass of BaseGenerator.
# Generates C++ member definitions for the VulkanReferencedResource class responsible for
# determining which resource handles are used or unused in a capture file.
class VulkanReferencedResourceBodyGenerator(BaseGenerator):
"""Generate a C++ class for detecting unreferenced resource handles in a capture file"""
# All resource and resource associated handle types to be processed.
RESOURCE_HANDLE_TYPES = [
'VkBuffer', 'VkImage', 'VkBufferView', 'VkImageView', 'VkFramebuffer',
'VkDescriptorSet', 'VkCommandBuffer'
]
# Handle types that contain resource and child resource handle types.
CONTAINER_HANDLE_TYPES = ['VkDescriptorSet']
# Handle types that use resource and child resource handle types.
USER_HANDLE_TYPES = ['VkCommandBuffer']
def __init__(
self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout
):
BaseGenerator.__init__(
self,
processCmds=True,
processStructs=True,
featureBreak=False,
errFile=errFile,
warnFile=warnFile,
diagFile=diagFile
)
# Map of Vulkan structs containing handles to a list values for handle members or struct members
# that contain handles (eg. VkGraphicsPipelineCreateInfo contains a VkPipelineShaderStageCreateInfo
# member that contains handles).
self.structsWithHandles = dict()
self.pNextStructs = dict(
) # Map of Vulkan structure types to sType value for structs that can be part of a pNext chain.
self.commandInfo = dict() # Map of Vulkan commands to parameter info
self.restrictHandles = True # Determines if the 'isHandle' override limits the handle test to only the values conained by RESOURCE_HANDLE_TYPES.
# Method override
# yapf: disable
def beginFile(self, genOpts):
BaseGenerator.beginFile(self, genOpts)
write('#include "generated/generated_vulkan_referenced_resource_consumer.h"', file=self.outFile)
self.newline()
write('#include <cassert>', file=self.outFile)
self.newline()
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(decode)', file=self.outFile)
# yapf: enable
# Method override
# yapf: disable
def endFile(self):
for cmd, info in self.commandInfo.items():
returnType = info[0]
params = info[2]
if params and params[0].baseType == 'VkCommandBuffer':
# Check for parameters with resource handle types.
handles = self.getParamListHandles(params[1:])
if (handles):
# Generate a function to add handles to the command buffer's referenced handle list.
cmddef = '\n'
# Temporarily remove resource only matching restriction from isHandle() when generating the function signature.
self.restrictHandles = False
cmddef += self.makeConsumerFuncDecl(returnType, 'VulkanReferencedResourceConsumer::Process_' + cmd, params) + '\n'
self.restrictHandles = True
cmddef += '{\n'
indent = self.INDENT_SIZE * ' '
# Add unreferenced parameter macros.
unrefCount = 0
for param in params[1:]:
if not param in handles:
cmddef += indent + 'GFXRECON_UNREFERENCED_PARAMETER({});\n'.format(param.name)
unrefCount += 1
if unrefCount > 0:
cmddef += '\n'
for index, handle in enumerate(handles):
cmddef += self.trackCommandHandle(index, params[0].name, handle, indent=indent)
cmddef += '}'
write(cmddef, file=self.outFile)
self.newline()
write('GFXRECON_END_NAMESPACE(decode)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
BaseGenerator.endFile(self)
# yapf: enable
#
# Method override
def genStruct(self, typeinfo, typename, alias):
BaseGenerator.genStruct(self, typeinfo, typename, alias)
if not alias:
self.checkStructMemberHandles(typename, self.structsWithHandles)
# Track this struct if it can be present in a pNext chain.
parentStructs = typeinfo.elem.get('structextends')
if parentStructs:
sType = self.makeStructureTypeEnum(typeinfo, typename)
if sType:
self.pNextStructs[typename] = sType
#
# Indicates that the current feature has C++ code to generate.
def needFeatureGeneration(self):
if self.featureCmdParams:
return True
return False
#
# Performs C++ code generation for the feature.
def generateFeature(self):
for cmd in self.getFilteredCmdNames():
self.commandInfo[cmd] = self.featureCmdParams[cmd]
#
# Override method to check for handle type, only matching resource handle types.
def isHandle(self, baseType):
if self.restrictHandles:
if baseType in self.RESOURCE_HANDLE_TYPES:
return True
return False
else:
return BaseGenerator.isHandle(self, baseType)
#
# Create list of parameters that have handle types or are structs that contain handles.
def getParamListHandles(self, values):
handles = []
for value in values:
if self.isHandle(value.baseType):
handles.append(value)
elif self.isStruct(
value.baseType
) and (value.baseType in self.structsWithHandles):
handles.append(value)
return handles
#
#
# yapf: disable
def trackCommandHandle(self, index, commandParamName, value, valuePrefix='', indent=''):
body = ''
tail = ''
indexName = None
countName = None
valueName = valuePrefix + value.name
isHandle = self.isHandle(value.baseType)
if (value.isPointer or value.isArray) and value.name != 'pnext_value':
if index > 0:
body += '\n'
accessOperator = '->'
if not valuePrefix:
# If there is no prefix, this is the pointer parameter received by the function, which should never be null.
body += indent + 'assert({} != nullptr);\n'.format(value.name)
body += '\n'
else:
# If there is a prefix, this is a struct member. We need to determine the type of access operator to use
# for the member of a 'decoded' struct type, where handle member types will be HandlePointerDecoder, but
# struct member types will be unique_ptr<StructPointerDecoder>.
if isHandle:
accessOperator = '.'
# Add IsNull and HasData checks for the pointer decoder, before accessing its data.
# Note that this does not handle the decoded struct member cases for static arrays, which would need to use '.' instead of '->'.
body += indent + 'if (!{prefix}{name}{op}IsNull() && ({prefix}{name}{op}HasData()))\n'.format(prefix=valuePrefix, name=value.name, op=accessOperator)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Get the pointer from the pointer decoder object.
valueName = '{}_ptr'.format(value.name)
if isHandle:
body += indent + 'auto {} = {}{}{}GetPointer();\n'.format(valueName, valuePrefix, value.name, accessOperator)
else:
body += indent + 'auto {} = {}{}{}GetMetaStructPointer();\n'.format(valueName, valuePrefix, value.name, accessOperator)
# Add a for loop for an array of values.
if value.isArray:
indexName = '{}_index'.format(value.name)
countName = '{}_count'.format(value.name)
body += indent + 'size_t {} = {}{}{}GetLength();\n'.format(countName, valuePrefix, value.name, accessOperator)
body += indent + 'for (size_t {i} = 0; {i} < {}; ++{i})\n'.format(countName, i=indexName)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Insert commands to add handles to a container, or to process struct members that contain handles.
if isHandle:
if value.isArray:
valueName = '{}[{}]'.format(valueName, indexName)
elif value.isPointer:
valueName = '(*{})'.format(valueName)
if value.baseType in self.CONTAINER_HANDLE_TYPES:
body += indent + 'GetTable().AddContainerToUser({}, {});\n'.format(commandParamName, valueName)
elif value.baseType in self.USER_HANDLE_TYPES:
body += indent + 'GetTable().AddUserToUser({}, {});\n'.format(commandParamName, valueName)
else:
body += indent + 'GetTable().AddResourceToUser({}, {});\n'.format(commandParamName, valueName)
elif self.isStruct(value.baseType) and (value.baseType in self.structsWithHandles):
if value.isArray:
accessOperator = '[{}].'.format(indexName)
else:
accessOperator = '->'
for index, entry in enumerate(self.structsWithHandles[value.baseType]):
if entry.name == 'pNext':
extStructsWithHandles = [extStruct for extStruct in self.registry.validextensionstructs[value.baseType] if extStruct in self.structsWithHandles]
if extStructsWithHandles:
body += indent + 'const VkBaseInStructure* pnext_header = nullptr;\n'
body += indent + 'if ({name}->pNext != nullptr)\n'.format(name=valueName)
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'pnext_header = reinterpret_cast<const VkBaseInStructure*>({}->pNext->GetPointer());\n'.format(valueName)
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
body += indent + 'while (pnext_header)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'switch (pnext_header->sType)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'default:\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'break;\n'
indent = indent[:-self.INDENT_SIZE]
for extStruct in extStructsWithHandles:
body += indent + 'case {}:\n'.format(self.pNextStructs[extStruct])
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body | |
<filename>sirepo/template/flash_views.py<gh_stars>10-100
# -*- coding: utf-8 -*-
u"""Flash Config parser.
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdp, pkdlog
import inspect
import re
def _fields(templates, values):
# template: [field template, label template]
# values: values to insert into the field/label templates
return {
t[0].format(v): t[1].format(v.upper()) for v in values for t in templates
}
class SpecializedViews:
# POSIT: FLASH field names are unique so flat list is ok
_LABELS = PKDict(
LimitedSlopeBeta='Limited Slope Beta',
RiemannSolver='Riemann Solver',
UnitSystem='System of Units',
allowDtSTSDominate='allowDtSTSDominate',
cfl='Courant Factor',
charLimiting='Characteristic Limiting',
cvisc='Artificial Viscosity Constant',
diff_eleFlCoef='Flux Limiter Coefficient',
diff_eleXlBoundaryType='X Left Boundary',
diff_eleXrBoundaryType='X Right Boundary',
diff_eleYlBoundaryType='Y Left Boundary',
diff_eleYrBoundaryType='Y Right Boundary',
diff_eleZlBoundaryType='Z Left Boundary',
diff_eleZrBoundaryType='Z Right Boundary',
diff_thetaImplct='Implicitness Factor',
diff_useEleCond='Use Ele Conduction',
dt_diff_factor='Timestep Factor',
dtinit='Initial Timestep [s]',
dtmax='Maximum Timestep',
dtmin='Minimum Timestep',
ed_crossSectionFunctionType_1='Cross Section Function Type',
ed_gaussianCenterMajor_1='Major Gaussian Center',
ed_gaussianCenterMinor_1='Minor Gaussian Center',
ed_gaussianExponent_1='Gaussian Exponent',
ed_gaussianRadiusMajor_1='Major Gaussian Radius',
ed_gaussianRadiusMinor_1='Minor Gaussian Radius',
ed_gradOrder='Gradient Order',
ed_gridType_1='Type of Beam Grid',
ed_laser3Din2D='3D Ray Tracing',
ed_laser3Din2DwedgeAngle='Wedge Angle',
ed_laserIOMaxNumberOfPositions='Max Ray Positions',
ed_laserIOMaxNumberOfRays='Max Rays',
ed_lensSemiAxisMajor_1='Lens Semi Axis Major',
ed_lensX_1='Lens X',
ed_lensY_1='Lens Y',
ed_lensZ_1='Lens Z',
ed_maxRayCount='Max Ray Count',
ed_numberOfBeams='Number of Beams',
ed_numberOfPulses='Number of Pulses',
ed_numberOfRays_1='Number of Rays',
ed_numberOfSections_1='Number of Sections',
ed_power_1_1='Laser Pulse Section 1',
ed_power_1_2='Laser Pulse Section 2',
ed_power_1_3='Laser Pulse Section 3',
ed_power_1_4='Laser Pulse Section 4',
ed_pulseNumber_1='Pulse Number',
ed_semiAxisMajorTorsionAngle_1='Major Semiaxis Torsion Angle',
ed_semiAxisMajorTorsionAxis_1='Major Semiaxis Torsion Axis',
ed_targetSemiAxisMajor_1='Major Target Semiaxis',
ed_targetSemiAxisMinor_1='Minor Target Semiaxis',
ed_targetX_1='X Target',
ed_targetY_1='Y Target',
ed_targetZ_1='Z Target',
ed_time_1_1='Laser Pulse Section 1',
ed_time_1_2='Laser Pulse Section 2',
ed_time_1_3='Laser Pulse Section 3',
ed_time_1_4='Laser Pulse Section 4',
ed_useLaserIO='Use Laser IO',
ed_wavelength_1='Wavelength',
entropy='Entropy Fix',
eosMode='Eos Mode',
eosModeInit='Initial Eos Mode',
fl_b='Flame Width',
fl_epsilon_0='Lower Sharpening Factor',
fl_epsilon_1='Upper Sharpening Factor',
fl_fsConstFlameSpeed='Constant Flame Speed',
fl_kpp_fact='Prefactor Adjustment',
flame_deltae='Flame Delta e',
gconst='Acceleration Constant',
gdirec='Direction of Acceleration',
geometry='Grid Geometry',
grav_boundary_type='Boundary Condition',
lrefine_max='Maximum Refinement Level',
lrefine_min='Minimum Refinement Level',
order='Order',
plotFileIntervalTime='Plot File Interval Time [s]',
refine_var_count='Refine Variable Count',
rt_dtFactor='Time Step Coefficient',
rt_mgdBounds_1='Boundary 1',
rt_mgdBounds_2='Boundary 2',
rt_mgdBounds_3='Boundary 3',
rt_mgdBounds_4='Boundary 4',
rt_mgdBounds_5='Boundary 5',
rt_mgdBounds_6='Boundary 6',
rt_mgdBounds_7='Boundary 7',
rt_mgdFlCoef='MGD Flux Limiter Coefficient',
rt_mgdFlMode='MGD Glux Limiter Mode',
rt_mgdNumGroups='Number of Groups',
rt_mgdXlBoundaryType='X MGD Left Boundary',
rt_mgdXrBoundaryType='X MGD Right Boundary',
rt_mgdYlBoundaryType='Y MGD Left Boundary',
rt_mgdYrBoundaryType='Y MGD Right Boundary',
rt_mgdZlBoundaryType='Z MGD Left Boundary',
rt_mgdZrBoundaryType='Z MGD Right Boundary',
rt_useMGD='Use Multigroup Radiation Diffusion',
shockDetect='Use Strong Compressive Shock Detection',
slopeLimiter='Slope Limiter',
sumyi_burned='Burned sumyi',
sumyi_unburned='Unburned sumyi',
threadHydroBlockList='Block List Threading',
threadHydroWithinBlock='Within Block Threading',
tmax='Maximum Simulation Time [s]',
updateHydroFluxes='Update Hydro Fluxes',
useDiffuse='Use Diffusive Effects',
useEnergyDeposition='Use Energy Deposition',
useFlame='Use Flame',
useGravity='Use Gravity',
useHydro='Use Hydro Calculation',
useRadTrans='Use Radiative Transfer',
use_cma_advection='Use CMA Advection',
use_cma_flattening='Use CMA Flattening',
ye_burned='Burned ye',
ye_unburned='Unburned ye',
**_fields([
['{}l_boundary_type', '{} Lower Boundary Type'],
['{}r_boundary_type', '{} Upper Boundary Type'],
['{}min', '{} Minimum'],
['{}max', '{} Maximum'],
['nblock{}', 'Blocks in {}'],
], ['x', 'y', 'z']),
**_fields([
['refine_var_{}', 'Name Variable {}'],
['refine_cutoff_{}', 'Refine Variable {}'],
['derefine_cutoff_{}', 'Derefine Variable {}'],
], [str(v) for v in range(1, 7)]),
)
_VIEW_FUNC_PREFIX = '_view_'
def __init__(self):
self._view_fns = PKDict()
for n, o in inspect.getmembers(self):
if n.startswith(self._VIEW_FUNC_PREFIX) and inspect.ismethod(o):
self._view_fns[n[len(self._VIEW_FUNC_PREFIX):]] = o
def update_schema(self, schema):
self._update_labels(schema)
self._update_views(schema)
return schema
def _assert_model_view_fields_exist(self, name, view, schema):
"""Check that model fields in view exist in models"""
def flatten(to_flatten):
def flatten_column(to_flatten):
if isinstance(to_flatten[0], str):
return flatten(to_flatten[1])
res = []
for f in to_flatten:
res += flatten_column(f)
return res
res = []
for f in to_flatten:
if isinstance(f, str):
res.append(f)
continue
assert isinstance(f, list), \
'uknown type f={f}'
res += flatten_column(f)
return res
for f in flatten(view.get('basic', []) + view.get('advanced', [])):
if '.' not in f:
f = f'{name}.{f}'
p = f.split('.')
assert p[0] in schema.model, \
f'model name={p[0]} does not exist in known models={schema.model.keys()}'
assert p[1] in schema.model[p[0]], \
f'field={p[1]} does not exist in model={schema.model[p[0]]} name={p[0]}'
def _get_species_list(self, schema):
res = []
for f in schema.model.Multispecies_MultispeciesMain:
m = re.search(r'eos_(.*)EosType', f)
if m:
res.append(m.group(1))
return res
def _update_labels(self, schema):
labels = self._LABELS.copy()
self._update_sim_labels(schema, labels)
self._update_multispecies_labels(schema, labels)
for m in schema.model.values():
for f in m:
if f not in labels:
continue
info = m[f]
if len(info) == 3:
info.append(f)
elif info[3]:
info[3] = '{} {}'.format(f, info[3])
else:
info[3] = f
info[0] = labels[f]
def _update_multispecies_labels(self, schema, labels):
if 'Multispecies_MultispeciesMain' not in schema.model:
return
for s in self._get_species_list(schema):
for f, label in {
'ms_{}A': 'Number of protons and neutrons in nucleus',
'ms_{}Z': 'Atomic number',
'ms_{}ZMin': 'Minimum allowed average ionization',
'eos_{}EosType': 'EOS type to use for MTMMMT EOS',
'eos_{}SubType': 'EOS subtype to use for MTMMMT EOS',
'ms_{}Gamma': 'Ratio of heat capacities',
'eos_{}TableFile': 'Tabulated EOS file name',
'op_{}Absorb': 'Absorption',
'op_{}Emiss': 'Emission',
'op_{}Trans': 'Transport',
}.items():
labels[f.format(s)] = f'{s.title()} {label}'
def _update_sim_labels(self, schema, labels):
#TODO(pjm): use constant for flashApp model
# special case for main simulation labels - use full description as label
for f, info in schema.model.Simulation_SimulationMain_flashApp.items():
if len(info) > 3:
labels[f] = info[3]
info[3] = ''
def _update_views(self, schema):
for n, f in self._view_fns.items():
if n not in schema.view:
continue
v = f(schema)
if v:
self._assert_model_view_fields_exist(n, v, schema)
schema.view[n].update(v)
def _view_Driver_DriverMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Driver.txt
v = PKDict(
title='Simulation Driver',
advanced=[
['Driver', [
'dr_abortPause',
'dr_dtMinBelowAction',
'dr_dtMinContinue',
'dr_numPosdefVars',
'dr_posdefDtFactor',
'dr_posdefVar_1',
'dr_posdefVar_2',
'dr_posdefVar_3',
'dr_posdefVar_4',
'dr_printTStepLoc',
'dr_shortenLastStepBeforeTMax',
'dr_tstepSlowStartFactor',
'dr_usePosdefComputeDt',
]],
['Drift', [
'drift_break_inst',
'drift_trunc_mantissa',
'drift_tuples',
'drift_verbose_inst',
]],
['Time', [
'wall_clock_time_limit',
'tinitial',
]],
['Timestep', [
'tstep_change_factor',
'nbegin',
'nend',
'useSTS',
'useSTSforDiffusion',
'nuSTS',
'nstepTotalSTS',
]],
['Thread', [
'threadBlockListBuild',
'threadDriverBlockList',
'threadDriverWithinBlock',
'threadRayTraceBuild',
'threadWithinBlockBuild',
]],
['Redshift', [
'zInitial',
'zFinal',
]],
['Other', [
'meshCopyCount',
'sweepOrder',
]],
],
basic=[
'dtinit',
'tmax',
'dtmax',
'dtmin',
'allowDtSTSDominate',
],
)
if 'IO_IOMain' in schema.model:
v.basic.append('IO_IOMain.plotFileIntervalTime')
return v
def _view_physics_Diffuse_DiffuseMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Diffuse.txt
v = PKDict(
title='Diffusive Effects',
basic=[
'diff_eleFlMode',
'diff_eleFlCoef',
'dt_diff_factor',
[
['X', [
'diff_eleXlBoundaryType',
'diff_eleXrBoundaryType',
]],
['Y', [
'diff_eleYlBoundaryType',
'diff_eleYrBoundaryType',
]],
['Z', [
'diff_eleZlBoundaryType',
'diff_eleZrBoundaryType',
]]
]
],
)
if 'physics_Diffuse_DiffuseMain_Unsplit' in schema.model:
v.basic.insert(3, 'physics_Diffuse_DiffuseMain_Unsplit.diff_thetaImplct')
if 'physics_Diffuse_DiffuseMain' in schema.model :
v.basic.insert(0, 'physics_Diffuse_DiffuseMain.diff_useEleCond')
v.basic.insert(0, 'physics_Diffuse_DiffuseMain.useDiffuse')
return v
def _view_physics_Gravity_GravityMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Gravity.txt
v = PKDict(
basic=[
'useGravity',
],
)
if 'physics_Gravity' in schema.model:
# Flash docs seem to be wrong. useGravity does not exist in
# physics/Gravity. Just physics/Gravity/GravityMain
v.basic.insert(1, 'physics_Gravity.grav_boundary_type')
if 'physics_Gravity_GravityMain_Constant' in schema.model:
v.basic.extend([
'physics_Gravity_GravityMain_Constant.gconst',
'physics_Gravity_GravityMain_Constant.gdirec',
])
return v
def _view_Grid_GridMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Grid.txt
v = PKDict(
title='Grid',
basic=[
['Main', [
'geometry',
'eosModeInit',
'eosMode',
[
['X', [
'xl_boundary_type',
'xr_boundary_type',
'xmin',
'xmax'
]],
['Y', [
'yl_boundary_type',
'yr_boundary_type',
'ymin',
'ymax'
]],
['Z', [
'zl_boundary_type',
'zr_boundary_type',
'zmin',
'zmax'
]]
]
]],
],
)
if 'Grid_GridMain_paramesh' in schema.model:
v.basic.append(
['Paramesh', [
'Grid_GridMain_paramesh.nblockx',
'Grid_GridMain_paramesh.nblocky',
'Grid_GridMain_paramesh.nblockz',
'Grid_GridMain_paramesh.lrefine_min',
'Grid_GridMain_paramesh.lrefine_max',
'Grid_GridMain_paramesh.refine_var_count',
[
['Name', [
'Grid_GridMain_paramesh.refine_var_1',
'Grid_GridMain_paramesh.refine_var_2',
'Grid_GridMain_paramesh.refine_var_3',
'Grid_GridMain_paramesh.refine_var_4'
]],
['Refine Cutoff', [
'Grid_GridMain_paramesh.refine_cutoff_1',
'Grid_GridMain_paramesh.refine_cutoff_2',
'Grid_GridMain_paramesh.refine_cutoff_3',
'Grid_GridMain_paramesh.refine_cutoff_4'
]],
['Derefine Cutoff', [
'Grid_GridMain_paramesh.derefine_cutoff_1',
'Grid_GridMain_paramesh.derefine_cutoff_2',
'Grid_GridMain_paramesh.derefine_cutoff_3',
'Grid_GridMain_paramesh.derefine_cutoff_4'
]],
],
]],
)
return v
def _view_physics_Hydro_HydroMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Hydro.txt
v = PKDict(
title='Hydrodynamics',
basic=[
'useHydro',
'cfl',
],
fieldsPerTab=10,
)
if 'physics_Hydro_HydroMain_unsplit' in schema.model:
v.basic.extend([
'physics_Hydro_HydroMain_unsplit.order',
'physics_Hydro_HydroMain_unsplit.slopeLimiter',
'physics_Hydro_HydroMain_unsplit.LimitedSlopeBeta',
'physics_Hydro_HydroMain_unsplit.charLimiting',
'physics_Hydro_HydroMain_unsplit.cvisc',
'physics_Hydro_HydroMain_unsplit.RiemannSolver',
'physics_Hydro_HydroMain_unsplit.entropy',
'physics_Hydro_HydroMain_unsplit.shockDetect'
])
return v
def _view_physics_materialProperties_Opacity_OpacityMain_Multispecies(self, schema):
v = PKDict(
title='Material Properties',
basic=[]
)
if 'physics_materialProperties_Opacity_OpacityMain' in schema.model:
v.basic.append('physics_materialProperties_Opacity_OpacityMain.useOpacity')
if 'physics_materialProperties_Conductivity_ConductivityMain' in schema.model:
v.basic.append('physics_materialProperties_Conductivity_ConductivityMain.useConductivity')
if 'physics_materialProperties_MagneticResistivity_MagneticResistivityMain' in schema.model:
v.basic.append('physics_materialProperties_MagneticResistivity_MagneticResistivityMain.useMagneticResistivity')
v.basic.append([])
for s in self._get_species_list(schema):
v.basic[-1].append(
[s.title(), [
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Absorb',
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Emiss',
f'physics_materialProperties_Opacity_OpacityMain_Multispecies.op_{s}Trans',
]],
)
return v
def _view_Multispecies_MultispeciesMain(self, schema):
v = PKDict(
title='Multispecies',
basic=[
[],
],
)
for s in self._get_species_list(schema):
v.basic[-1].append(
[s.title(), [
f'ms_{s}A',
f'ms_{s}Z',
f'ms_{s}ZMin',
f'eos_{s}EosType',
f'eos_{s}SubType',
f'eos_{s}TableFile',
]],
)
return v
def _view_physics_RadTrans_RadTransMain_MGD(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4.py?submit=rp_RadTrans.txt
v = PKDict(
title='Radiative Transfer',
basic=[
['Main', [
'rt_useMGD',
'rt_mgdFlMode',
'rt_mgdFlCoef',
[
['X', [
'rt_mgdXlBoundaryType',
'rt_mgdXrBoundaryType',
]],
['Y', [
'rt_mgdYlBoundaryType',
'rt_mgdYrBoundaryType',
]],
['Z', [
'rt_mgdZlBoundaryType',
'rt_mgdZrBoundaryType',
]],
],
]],
['MGD Groups', [
'rt_mgdNumGroups',
'rt_mgdBounds_1',
'rt_mgdBounds_2',
'rt_mgdBounds_3',
'rt_mgdBounds_4',
'rt_mgdBounds_5',
'rt_mgdBounds_6',
'rt_mgdBounds_7',
]],
],
)
if 'physics_RadTrans_RadTransMain' in schema.model:
v.basic[0][1].insert(0, 'physics_RadTrans_RadTransMain.rt_dtFactor')
v.basic[0][1].insert(0, 'physics_RadTrans_RadTransMain.useRadTrans')
return v
def _view_physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p22.py?submit=rp_EnergyDeposition.txt
v = PKDict(
title='Energy Deposition - Laser',
basic=[
['Bulk', [
'useEnergyDeposition',
'ed_maxRayCount',
'ed_gradOrder',
'ed_laser3Din2D',
'ed_laser3Din2DwedgeAngle',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_useLaserIO',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_laserIOMaxNumberOfPositions',
'physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser_LaserIO.ed_laserIOMaxNumberOfRays',
]],
['Pulse 1', [
'ed_numberOfPulses',
'ed_numberOfSections_1',
[
['Time', [
'ed_time_1_1',
'ed_time_1_2',
'ed_time_1_3',
'ed_time_1_4',
]],
['Power', [
'ed_power_1_1',
'ed_power_1_2',
'ed_power_1_3',
'ed_power_1_4',
]]
]
]],
['Beam 1', [
[
['X', [
'ed_lensX_1',
'ed_targetX_1',
]],
['Y', [
'ed_lensY_1',
'ed_targetY_1',
]],
['Z', [
'ed_lensZ_1',
'ed_targetZ_1',
]]
],
'ed_numberOfBeams',
'ed_lensSemiAxisMajor_1',
'ed_crossSectionFunctionType_1',
'ed_numberOfRays_1',
'ed_pulseNumber_1',
'ed_wavelength_1',
'ed_gridType_1',
'ed_gridnRadialTics_1',
'ed_gaussianExponent_1',
[
['Major', [
'ed_gaussianRadiusMajor_1',
'ed_gaussianCenterMajor_1',
'ed_targetSemiAxisMajor_1',
'ed_semiAxisMajorTorsionAngle_1',
'ed_semiAxisMajorTorsionAxis_1',
]],
['Minor', [
'ed_gaussianRadiusMinor_1',
'ed_gaussianCenterMinor_1',
'ed_targetSemiAxisMinor_1',
]],
],
]],
],
| |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class RuleBatchRetryStrategy(Property):
"""
AWS Object Type = "AWS::Events::Rule.BatchRetryStrategy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchretrystrategy.html
Property Document:
- ``p_Attempts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchretrystrategy.html#cfn-events-rule-batchretrystrategy-attempts
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.BatchRetryStrategy"
p_Attempts: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Attempts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchretrystrategy.html#cfn-events-rule-batchretrystrategy-attempts"""
@attr.s
class RuleHttpParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.HttpParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html
Property Document:
- ``p_HeaderParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-headerparameters
- ``p_PathParameterValues``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-pathparametervalues
- ``p_QueryStringParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-querystringparameters
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.HttpParameters"
p_HeaderParameters: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "HeaderParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-headerparameters"""
p_PathParameterValues: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PathParameterValues"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-pathparametervalues"""
p_QueryStringParameters: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "QueryStringParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-httpparameters.html#cfn-events-rule-httpparameters-querystringparameters"""
@attr.s
class EventBusPolicyCondition(Property):
"""
AWS Object Type = "AWS::Events::EventBusPolicy.Condition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html
Property Document:
- ``p_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-key
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-type
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-value
"""
AWS_OBJECT_TYPE = "AWS::Events::EventBusPolicy.Condition"
p_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-key"""
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-type"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-eventbuspolicy-condition.html#cfn-events-eventbuspolicy-condition-value"""
@attr.s
class RuleBatchArrayProperties(Property):
"""
AWS Object Type = "AWS::Events::Rule.BatchArrayProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batcharrayproperties.html
Property Document:
- ``p_Size``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batcharrayproperties.html#cfn-events-rule-batcharrayproperties-size
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.BatchArrayProperties"
p_Size: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Size"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batcharrayproperties.html#cfn-events-rule-batcharrayproperties-size"""
@attr.s
class RuleBatchParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.BatchParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html
Property Document:
- ``rp_JobDefinition``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-jobdefinition
- ``rp_JobName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-jobname
- ``p_ArrayProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-arrayproperties
- ``p_RetryStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-retrystrategy
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.BatchParameters"
rp_JobDefinition: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "JobDefinition"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-jobdefinition"""
rp_JobName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "JobName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-jobname"""
p_ArrayProperties: typing.Union['RuleBatchArrayProperties', dict] = attr.ib(
default=None,
converter=RuleBatchArrayProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleBatchArrayProperties)),
metadata={AttrMeta.PROPERTY_NAME: "ArrayProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-arrayproperties"""
p_RetryStrategy: typing.Union['RuleBatchRetryStrategy', dict] = attr.ib(
default=None,
converter=RuleBatchRetryStrategy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleBatchRetryStrategy)),
metadata={AttrMeta.PROPERTY_NAME: "RetryStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-batchparameters.html#cfn-events-rule-batchparameters-retrystrategy"""
@attr.s
class RuleDeadLetterConfig(Property):
"""
AWS Object Type = "AWS::Events::Rule.DeadLetterConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-deadletterconfig.html
Property Document:
- ``p_Arn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-deadletterconfig.html#cfn-events-rule-deadletterconfig-arn
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.DeadLetterConfig"
p_Arn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Arn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-deadletterconfig.html#cfn-events-rule-deadletterconfig-arn"""
@attr.s
class RuleRunCommandTarget(Property):
"""
AWS Object Type = "AWS::Events::Rule.RunCommandTarget"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandtarget.html
Property Document:
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandtarget.html#cfn-events-rule-runcommandtarget-key
- ``rp_Values``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandtarget.html#cfn-events-rule-runcommandtarget-values
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.RunCommandTarget"
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandtarget.html#cfn-events-rule-runcommandtarget-key"""
rp_Values: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Values"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandtarget.html#cfn-events-rule-runcommandtarget-values"""
@attr.s
class RuleInputTransformer(Property):
"""
AWS Object Type = "AWS::Events::Rule.InputTransformer"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-inputtransformer.html
Property Document:
- ``rp_InputTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-inputtransformer.html#cfn-events-rule-inputtransformer-inputtemplate
- ``p_InputPathsMap``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-inputtransformer.html#cfn-events-rule-inputtransformer-inputpathsmap
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.InputTransformer"
rp_InputTemplate: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InputTemplate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-inputtransformer.html#cfn-events-rule-inputtransformer-inputtemplate"""
p_InputPathsMap: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "InputPathsMap"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-inputtransformer.html#cfn-events-rule-inputtransformer-inputpathsmap"""
@attr.s
class RuleSqsParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.SqsParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-sqsparameters.html
Property Document:
- ``rp_MessageGroupId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-sqsparameters.html#cfn-events-rule-sqsparameters-messagegroupid
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.SqsParameters"
rp_MessageGroupId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "MessageGroupId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-sqsparameters.html#cfn-events-rule-sqsparameters-messagegroupid"""
@attr.s
class RuleRetryPolicy(Property):
"""
AWS Object Type = "AWS::Events::Rule.RetryPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-retrypolicy.html
Property Document:
- ``p_MaximumEventAgeInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-retrypolicy.html#cfn-events-rule-retrypolicy-maximumeventageinseconds
- ``p_MaximumRetryAttempts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-retrypolicy.html#cfn-events-rule-retrypolicy-maximumretryattempts
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.RetryPolicy"
p_MaximumEventAgeInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaximumEventAgeInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-retrypolicy.html#cfn-events-rule-retrypolicy-maximumeventageinseconds"""
p_MaximumRetryAttempts: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaximumRetryAttempts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-retrypolicy.html#cfn-events-rule-retrypolicy-maximumretryattempts"""
@attr.s
class RuleKinesisParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.KinesisParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-kinesisparameters.html
Property Document:
- ``rp_PartitionKeyPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-kinesisparameters.html#cfn-events-rule-kinesisparameters-partitionkeypath
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.KinesisParameters"
rp_PartitionKeyPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PartitionKeyPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-kinesisparameters.html#cfn-events-rule-kinesisparameters-partitionkeypath"""
@attr.s
class RuleRedshiftDataParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.RedshiftDataParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html
Property Document:
- ``rp_Database``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-database
- ``rp_Sql``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-sql
- ``p_DbUser``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-dbuser
- ``p_SecretManagerArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-secretmanagerarn
- ``p_StatementName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-statementname
- ``p_WithEvent``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-withevent
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.RedshiftDataParameters"
rp_Database: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Database"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-database"""
rp_Sql: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Sql"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-sql"""
p_DbUser: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DbUser"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-dbuser"""
p_SecretManagerArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SecretManagerArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-secretmanagerarn"""
p_StatementName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "StatementName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-statementname"""
p_WithEvent: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "WithEvent"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-redshiftdataparameters.html#cfn-events-rule-redshiftdataparameters-withevent"""
@attr.s
class RuleAwsVpcConfiguration(Property):
"""
AWS Object Type = "AWS::Events::Rule.AwsVpcConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html
Property Document:
- ``rp_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-subnets
- ``p_AssignPublicIp``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-assignpublicip
- ``p_SecurityGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-securitygroups
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.AwsVpcConfiguration"
rp_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Subnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-subnets"""
p_AssignPublicIp: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AssignPublicIp"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-assignpublicip"""
p_SecurityGroups: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-awsvpcconfiguration.html#cfn-events-rule-awsvpcconfiguration-securitygroups"""
@attr.s
class RuleRunCommandParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.RunCommandParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandparameters.html
Property Document:
- ``rp_RunCommandTargets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandparameters.html#cfn-events-rule-runcommandparameters-runcommandtargets
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.RunCommandParameters"
rp_RunCommandTargets: typing.List[typing.Union['RuleRunCommandTarget', dict]] = attr.ib(
default=None,
converter=RuleRunCommandTarget.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(RuleRunCommandTarget), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "RunCommandTargets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-runcommandparameters.html#cfn-events-rule-runcommandparameters-runcommandtargets"""
@attr.s
class RuleNetworkConfiguration(Property):
"""
AWS Object Type = "AWS::Events::Rule.NetworkConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-networkconfiguration.html
Property Document:
- ``p_AwsVpcConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-networkconfiguration.html#cfn-events-rule-networkconfiguration-awsvpcconfiguration
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.NetworkConfiguration"
p_AwsVpcConfiguration: typing.Union['RuleAwsVpcConfiguration', dict] = attr.ib(
default=None,
converter=RuleAwsVpcConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleAwsVpcConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "AwsVpcConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-networkconfiguration.html#cfn-events-rule-networkconfiguration-awsvpcconfiguration"""
@attr.s
class RuleEcsParameters(Property):
"""
AWS Object Type = "AWS::Events::Rule.EcsParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html
Property Document:
- ``rp_TaskDefinitionArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-taskdefinitionarn
- ``p_Group``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-group
- ``p_LaunchType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-launchtype
- ``p_NetworkConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-networkconfiguration
- ``p_PlatformVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-platformversion
- ``p_TaskCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-taskcount
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.EcsParameters"
rp_TaskDefinitionArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TaskDefinitionArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-taskdefinitionarn"""
p_Group: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Group"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-group"""
p_LaunchType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-launchtype"""
p_NetworkConfiguration: typing.Union['RuleNetworkConfiguration', dict] = attr.ib(
default=None,
converter=RuleNetworkConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleNetworkConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "NetworkConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-networkconfiguration"""
p_PlatformVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-platformversion"""
p_TaskCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TaskCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-ecsparameters.html#cfn-events-rule-ecsparameters-taskcount"""
@attr.s
class RuleTarget(Property):
"""
AWS Object Type = "AWS::Events::Rule.Target"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html
Property Document:
- ``rp_Arn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-arn
- ``rp_Id``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-id
- ``p_BatchParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-batchparameters
- ``p_DeadLetterConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-deadletterconfig
- ``p_EcsParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-ecsparameters
- ``p_HttpParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-httpparameters
- ``p_Input``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-input
- ``p_InputPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-inputpath
- ``p_InputTransformer``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-inputtransformer
- ``p_KinesisParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-kinesisparameters
- ``p_RedshiftDataParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-redshiftdataparameters
- ``p_RetryPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-retrypolicy
- ``p_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-rolearn
- ``p_RunCommandParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-runcommandparameters
- ``p_SqsParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-sqsparameters
"""
AWS_OBJECT_TYPE = "AWS::Events::Rule.Target"
rp_Arn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Arn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-arn"""
rp_Id: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Id"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-id"""
p_BatchParameters: typing.Union['RuleBatchParameters', dict] = attr.ib(
default=None,
converter=RuleBatchParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleBatchParameters)),
metadata={AttrMeta.PROPERTY_NAME: "BatchParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-batchparameters"""
p_DeadLetterConfig: typing.Union['RuleDeadLetterConfig', dict] = attr.ib(
default=None,
converter=RuleDeadLetterConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleDeadLetterConfig)),
metadata={AttrMeta.PROPERTY_NAME: "DeadLetterConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-deadletterconfig"""
p_EcsParameters: typing.Union['RuleEcsParameters', dict] = attr.ib(
default=None,
converter=RuleEcsParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleEcsParameters)),
metadata={AttrMeta.PROPERTY_NAME: "EcsParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-ecsparameters"""
p_HttpParameters: typing.Union['RuleHttpParameters', dict] = attr.ib(
default=None,
converter=RuleHttpParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleHttpParameters)),
metadata={AttrMeta.PROPERTY_NAME: "HttpParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-httpparameters"""
p_Input: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Input"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-input"""
p_InputPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InputPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-inputpath"""
p_InputTransformer: typing.Union['RuleInputTransformer', dict] = attr.ib(
default=None,
converter=RuleInputTransformer.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleInputTransformer)),
metadata={AttrMeta.PROPERTY_NAME: "InputTransformer"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-inputtransformer"""
p_KinesisParameters: typing.Union['RuleKinesisParameters', dict] = attr.ib(
default=None,
converter=RuleKinesisParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleKinesisParameters)),
metadata={AttrMeta.PROPERTY_NAME: "KinesisParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-kinesisparameters"""
p_RedshiftDataParameters: typing.Union['RuleRedshiftDataParameters', dict] = attr.ib(
default=None,
converter=RuleRedshiftDataParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleRedshiftDataParameters)),
metadata={AttrMeta.PROPERTY_NAME: "RedshiftDataParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-redshiftdataparameters"""
p_RetryPolicy: typing.Union['RuleRetryPolicy', dict] = attr.ib(
default=None,
converter=RuleRetryPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleRetryPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "RetryPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-retrypolicy"""
p_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-rolearn"""
p_RunCommandParameters: typing.Union['RuleRunCommandParameters', dict] = attr.ib(
default=None,
converter=RuleRunCommandParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleRunCommandParameters)),
metadata={AttrMeta.PROPERTY_NAME: "RunCommandParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-runcommandparameters"""
p_SqsParameters: typing.Union['RuleSqsParameters', dict] = attr.ib(
default=None,
converter=RuleSqsParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(RuleSqsParameters)),
metadata={AttrMeta.PROPERTY_NAME: "SqsParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-events-rule-target.html#cfn-events-rule-target-sqsparameters"""
#--- Resource declaration ---
@attr.s
class Connection(Resource):
"""
AWS Object Type = "AWS::Events::Connection"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html
Property Document:
- ``rp_AuthParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-authparameters
- ``rp_AuthorizationType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-authorizationtype
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-description
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-name
"""
AWS_OBJECT_TYPE = "AWS::Events::Connection"
rp_AuthParameters: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "AuthParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-authparameters"""
rp_AuthorizationType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AuthorizationType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-authorizationtype"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-description"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#cfn-events-connection-name"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#aws-resource-events-connection-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_SecretArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-connection.html#aws-resource-events-connection-return-values"""
return GetAtt(resource=self, attr_name="SecretArn")
@attr.s
class EventBusPolicy(Resource):
"""
AWS Object Type = "AWS::Events::EventBusPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html
Property Document:
- ``rp_StatementId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-statementid
- ``p_Action``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-action
- ``p_Condition``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-condition
- ``p_EventBusName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-eventbusname
- ``p_Principal``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-principal
- ``p_Statement``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-statement
"""
AWS_OBJECT_TYPE = "AWS::Events::EventBusPolicy"
rp_StatementId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "StatementId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-statementid"""
p_Action: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Action"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbuspolicy.html#cfn-events-eventbuspolicy-action"""
p_Condition: typing.Union['EventBusPolicyCondition', dict] = attr.ib(
default=None,
converter=EventBusPolicyCondition.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(EventBusPolicyCondition)),
metadata={AttrMeta.PROPERTY_NAME: "Condition"},
| |
<reponame>paulzzh/mahjong
# -*- coding: utf-8 -*-
import unittest
from mahjong.constants import EAST, SOUTH, WEST, NORTH, FIVE_RED_SOU
from mahjong.hand_calculating.hand import HandCalculator
from mahjong.hand_calculating.hand_config import HandConfig, OptionalRules
from mahjong.hand_calculating.yaku_config import YakuConfig
from mahjong.meld import Meld
from mahjong.tests_mixin import TestMixin
from mahjong.tile import TilesConverter
class YakuCalculationTestCase(unittest.TestCase, TestMixin):
def setUp(self):
self.config = YakuConfig()
def test_hands_calculation(self):
"""
Group of hands that were not properly calculated on tenhou replays
I did fixes and leave hands in tests, to be sure that bugs were fixed.
"""
hand = HandCalculator()
player_wind = EAST
tiles = self._string_to_136_array(pin='112233999', honors='11177')
win_tile = self._string_to_136_tile(pin='9')
melds = [
self._make_meld(Meld.PON, honors='111'),
self._make_meld(Meld.CHI, pin='123'),
self._make_meld(Meld.CHI, pin='123'),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
# we had a bug with multiple dora indicators and honor sets
# this test is working with this situation
tiles = self._string_to_136_array(pin='22244456799', honors='444')
win_tile = self._string_to_136_tile(pin='2')
dora_indicators = [self._string_to_136_tile(sou='3'), self._string_to_136_tile(honors='3')]
melds = [self._make_meld(Meld.KAN, honors='4444')]
result = hand.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 6)
self.assertEqual(result.fu, 50)
self.assertEqual(len(result.yaku), 2)
# if we can't ad pinfu to the hand hand
# we can add 2 fu to make hand more expensive
tiles = self._string_to_136_array(sou='678', man='11', pin='123345', honors='666')
win_tile = self._string_to_136_tile(pin='3')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
tiles = self._string_to_136_array(man='234789', pin='12345666')
win_tile = self._string_to_136_tile(pin='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 30)
tiles = self._string_to_136_array(sou='678', pin='34555789', honors='555')
win_tile = self._string_to_136_tile(pin='5')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
tiles = self._string_to_136_array(sou='123345678', man='678', pin='88')
win_tile = self._string_to_136_tile(sou='3')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou='12399', man='123456', pin='456')
win_tile = self._string_to_136_tile(sou='1')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou='111123666789', honors='11')
win_tile = self._string_to_136_tile(sou='1')
melds = [self._make_meld(Meld.PON, sou='666')]
dora_indicators = [self._string_to_136_tile(honors='4')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds,
dora_indicators=dora_indicators,
config=self._make_hand_config(player_wind=player_wind))
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 4)
tiles = self._string_to_136_array(pin='12333', sou='567', honors='666777')
win_tile = self._string_to_136_tile(pin='3')
melds = [self._make_meld(Meld.PON, honors='666'), self._make_meld(Meld.PON, honors='777')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin='12367778', sou='678', man='456')
win_tile = self._string_to_136_tile(pin='7')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(man='11156677899', honors='777')
win_tile = self._string_to_136_tile(man='7')
melds = [
self._make_meld(Meld.KAN, honors='7777'),
self._make_meld(Meld.PON, man='111'),
self._make_meld(Meld.CHI, man='678'),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 3)
tiles = self._string_to_136_array(man='122223777888', honors='66')
win_tile = self._string_to_136_tile(man='2')
melds = [self._make_meld(Meld.CHI, man='123'), self._make_meld(Meld.PON, man='777')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin='11144678888', honors='444')
win_tile = self._string_to_136_tile(pin='8')
melds = [
self._make_meld(Meld.PON, honors='444'),
self._make_meld(Meld.PON, pin='111'),
self._make_meld(Meld.PON, pin='888'),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou='67778', man='345', pin='999', honors='222')
win_tile = self._string_to_136_tile(sou='7')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(sou='33445577789', man='345')
win_tile = self._string_to_136_tile(sou='7')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin='112233667788', honors='22')
win_tile = self._string_to_136_tile(pin='3')
melds = [self._make_meld(Meld.CHI, pin='123')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou='345', man='12333456789')
win_tile = self._string_to_136_tile(man='3')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou='11123456777888')
melds = [
self._make_meld(Meld.CHI, sou='123'),
self._make_meld(Meld.PON, sou='777'),
self._make_meld(Meld.PON, sou='888'),
]
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 5)
tiles = self._string_to_136_array(sou='112233789', honors='55777')
melds = [self._make_meld(Meld.CHI, sou='123')]
win_tile = self._string_to_136_tile(sou='2')
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 4)
tiles = self._string_to_136_array(pin='234777888999', honors='22')
melds = [self._make_meld(Meld.CHI, pin='234'), self._make_meld(Meld.CHI, pin='789')]
win_tile = self._string_to_136_tile(pin='9')
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin='77888899', honors='777', man='444')
melds = [self._make_meld(Meld.PON, honors='777'), self._make_meld(Meld.PON, man='444')]
win_tile = self._string_to_136_tile(pin='8')
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(pin='12333345', honors='555', man='567')
win_tile = self._string_to_136_tile(pin='3')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(pin='34567777889', honors='555')
win_tile = self._string_to_136_tile(pin='7')
melds = [self._make_meld(Meld.CHI, pin='345')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 3)
tiles = self._string_to_136_array(pin='567', sou='333444555', honors='77')
win_tile = self._string_to_136_tile(sou='3')
melds = [self._make_meld(Meld.KAN, is_open=False, sou='4444')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.fu, 60)
self.assertEqual(result.han, 1)
def test_is_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou='123')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_riichi=True))
self.assertNotEqual(result.error, None)
def test_is_tsumo(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# with open hand tsumo not giving yaku
melds = [self._make_meld(Meld.CHI, sou='123')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertNotEqual(result.error, None)
def test_is_ippatsu(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile,
config=self._make_hand_config(is_riichi=True, is_ippatsu=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
# without riichi ippatsu is not possible
result = hand.estimate_hand_value(tiles, win_tile,
config=self._make_hand_config(is_riichi=False, is_ippatsu=True))
self.assertNotEqual(result.error, None)
def test_is_rinshan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_rinshan=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_chankan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_chankan=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_haitei(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_haitei=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_houtei(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_houtei=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_renhou(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_renhou=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_daburu_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123444', man='234456', pin='66')
win_tile = self._string_to_136_tile(sou='4')
result = hand.estimate_hand_value(tiles, win_tile,
config=self._make_hand_config(is_daburu_riichi=True, is_riichi=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_nagashi_mangan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou='13579', man='234456', pin='66')
result = hand.estimate_hand_value(tiles, None, config=self._make_hand_config(is_nagashi_mangan=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_chitoitsu_hand(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou='113355', man='113355', pin='11')
self.assertTrue(self.config.chiitoitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou='2299', man='2299', pin='1199', honors='44')
self.assertTrue(self.config.chiitoitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou='113355', man='113355', pin='11')
win_tile = self._string_to_136_tile(pin='1')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 25)
self.assertEqual(len(result.yaku), 1)
def test_is_tanyao(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou='234567', man='234567', pin='22')
self.assertTrue(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou='123456', man='234567', pin='22')
self.assertFalse(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou='234567', man='234567', honors='22')
self.assertFalse(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou='234567', man='234567', pin='22')
win_tile = self._string_to_136_tile(man='7')
result = hand.estimate_hand_value(tiles, win_tile,
config=self._make_hand_config(is_tsumo=False, is_riichi=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 3)
tiles = self._string_to_136_array(sou='234567', man='234567', pin='22')
win_tile = self._string_to_136_tile(man='7')
melds = [self._make_meld(Meld.CHI, sou='234')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds,
config=self._make_hand_config(has_open_tanyao=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou='234567', man='234567', pin='22')
win_tile = self._string_to_136_tile(man='7')
melds = [self._make_meld(Meld.CHI, sou='234')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds,
config=self._make_hand_config(has_open_tanyao=False))
self.assertNotEqual(result.error, None)
def test_is_pinfu_hand(self):
player_wind, round_wind = EAST, WEST
hand = HandCalculator()
tiles = self._string_to_136_array(sou='123456', man='123456', pin='55')
win_tile = self._string_to_136_tile(man='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# waiting in two pairs
tiles = self._string_to_136_array(sou='123456', man='123555', pin='55')
win_tile = self._string_to_136_tile(man='5')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# contains pon or kan
tiles = self._string_to_136_array(sou='111456', man='123456', pin='55')
win_tile = self._string_to_136_tile(man='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# penchan waiting
tiles = self._string_to_136_array(sou='123456', man='123456', pin='55')
win_tile = self._string_to_136_tile(sou='3')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# kanchan waiting
tiles = self._string_to_136_array(sou='123567', man='123456', pin='55')
win_tile = self._string_to_136_tile(sou='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# tanki waiting
tiles = self._string_to_136_array(man='22456678', pin='123678')
win_tile = self._string_to_136_tile(man='2')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# valued pair
tiles = self._string_to_136_array(sou='123678', man='123456', honors='11')
win_tile = self._string_to_136_tile(sou='6')
result = hand.estimate_hand_value(tiles, win_tile,
config=self._make_hand_config(player_wind=player_wind, round_wind=round_wind))
self.assertNotEqual(result.error, None)
# not valued pair
tiles = self._string_to_136_array(sou='123678', man='123456', honors='22')
win_tile = self._string_to_136_tile(sou='6')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# open hand
tiles = self._string_to_136_array(sou='12399', man='123456', pin='456')
win_tile = self._string_to_136_tile(man='1')
melds = [self._make_meld(Meld.CHI, sou='123')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertNotEqual(result.error, None)
def test_is_iipeiko(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou='112233', man='123', pin='23444')
self.assertTrue(self.config.iipeiko.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou='112233', man='333', pin='12344')
win_tile = self._string_to_136_tile(man='3')
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou='123')]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertNotEqual(result.error, None)
def test_is_ryanpeiko(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou='112233', | |
_convert_to_numpy_array(wind_speed_array_baseline)
wind_direction_array_baseline = _convert_to_numpy_array(
wind_direction_array_baseline
)
reference_power_controlled = _convert_to_numpy_array(reference_power_controlled)
test_power_controlled = _convert_to_numpy_array(test_power_controlled)
wind_speed_array_controlled = _convert_to_numpy_array(wind_speed_array_controlled)
wind_direction_array_controlled = _convert_to_numpy_array(
wind_direction_array_controlled
)
# Handle no overlap specificed (assume non-overlap)
if wind_direction_bin_p_overlap is None:
wind_direction_bin_p_overlap = 0
# Compute binning radius (is this right?)
wind_direction_bin_radius = (
(1.0 + wind_direction_bin_p_overlap / 100.0)
* (wind_direction_bins[1] - wind_direction_bins[0])
/ 2.0
)
ratio_array_base = np.zeros(len(wind_direction_bins)) * np.nan
lower_ratio_array_base = np.zeros(len(wind_direction_bins)) * np.nan
upper_ratio_array_base = np.zeros(len(wind_direction_bins)) * np.nan
counts_ratio_array_base = np.zeros(len(wind_direction_bins)) * np.nan
ratio_array_con = np.zeros(len(wind_direction_bins)) * np.nan
lower_ratio_array_con = np.zeros(len(wind_direction_bins)) * np.nan
upper_ratio_array_con = np.zeros(len(wind_direction_bins)) * np.nan
counts_ratio_array_con = np.zeros(len(wind_direction_bins)) * np.nan
diff_array = np.zeros(len(wind_direction_bins)) * np.nan
lower_diff_array = np.zeros(len(wind_direction_bins)) * np.nan
upper_diff_array = np.zeros(len(wind_direction_bins)) * np.nan
counts_diff_array = np.zeros(len(wind_direction_bins)) * np.nan
p_change_array = np.zeros(len(wind_direction_bins)) * np.nan
lower_p_change_array = np.zeros(len(wind_direction_bins)) * np.nan
upper_p_change_array = np.zeros(len(wind_direction_bins)) * np.nan
counts_p_change_array = np.zeros(len(wind_direction_bins)) * np.nan
for i, wind_direction_bin in enumerate(wind_direction_bins):
wind_dir_mask_baseline = (
wind_direction_array_baseline
>= wind_direction_bin - wind_direction_bin_radius
) & (
wind_direction_array_baseline
< wind_direction_bin + wind_direction_bin_radius
)
wind_dir_mask_controlled = (
wind_direction_array_controlled
>= wind_direction_bin - wind_direction_bin_radius
) & (
wind_direction_array_controlled
< wind_direction_bin + wind_direction_bin_radius
)
reference_power_baseline_wd = reference_power_baseline[wind_dir_mask_baseline]
test_power_baseline_wd = test_power_baseline[wind_dir_mask_baseline]
wind_speed_array_baseline_wd = wind_speed_array_baseline[wind_dir_mask_baseline]
wind_dir_array_baseline_wd = wind_direction_array_baseline[
wind_dir_mask_baseline
]
# wd_baseline_dist = (wind_dir_array_baseline_wd - wind_direction_bin)**2
# # if wind_direction_bin_p_overlap > 1:
# else:
# baseline_weight = np.ones_like(reference_power_baseline_wd)
# if wind_direction_bin_radius > 1.5:
if wind_direction_bin_p_overlap > 5.0:
baseline_weight = gaussian(
wind_dir_array_baseline_wd,
wind_direction_bin,
wind_direction_bin_radius / 2.0,
)
else:
baseline_weight = np.ones_like(wind_dir_array_baseline_wd)
baseline_weight = baseline_weight / np.sum(baseline_weight)
reference_power_controlled_wd = reference_power_controlled[
wind_dir_mask_controlled
]
test_power_controlled_wd = test_power_controlled[wind_dir_mask_controlled]
wind_speed_array_controlled_wd = wind_speed_array_controlled[
wind_dir_mask_controlled
]
wind_dir_array_controlled_wd = wind_direction_array_controlled[
wind_dir_mask_controlled
]
# if wind_direction_bin_radius > 1.5:
if wind_direction_bin_p_overlap > 5.0:
controlled_weight = gaussian(
wind_dir_array_controlled_wd,
wind_direction_bin,
wind_direction_bin_radius / 2.0,
)
else:
controlled_weight = np.ones_like(wind_dir_array_controlled_wd)
controlled_weight = controlled_weight / np.sum(controlled_weight)
# wd_controlled_dist = (wind_dir_array_controlled_wd - wind_direction_bin)**2
if (len(reference_power_baseline_wd) == 0) or (
len(reference_power_controlled_wd) == 0
):
continue
# Convert wind speed to integers
wind_speed_array_baseline_wd = wind_speed_array_baseline_wd.round().astype(int)
wind_speed_array_controlled_wd = wind_speed_array_controlled_wd.round().astype(
int
)
# compute the energy ratio
(
ratio_array_base[i],
ratio_array_con[i],
diff_array[i],
p_change_array[i],
counts_ratio_array_base[i],
counts_ratio_array_con[i],
counts_diff_array[i],
counts_p_change_array[i],
) = energy_ratio(
reference_power_baseline_wd,
test_power_baseline_wd,
wind_speed_array_baseline_wd,
reference_power_controlled_wd,
test_power_controlled_wd,
wind_speed_array_controlled_wd,
)
# Get the bounds through boot strapping
# determine the number of bootstrap iterations if not given
if n_boostrap is None:
n_boostrap = _calculate_bootstrap_iterations(
len(reference_power_baseline_wd)
)
ratio_base_bs = np.zeros(n_boostrap)
ratio_con_bs = np.zeros(n_boostrap)
diff_bs = np.zeros(n_boostrap)
p_change_bs = np.zeros(n_boostrap)
for i_bs in range(n_boostrap):
# random resampling w/ replacement
# ind_bs = np.random.randint(
# len(reference_power_baseline_wd), size=len(reference_power_baseline_wd))
if wind_direction_bin_p_overlap > 1:
ind_bs = np.random.choice(
len(reference_power_baseline_wd),
size=len(reference_power_baseline_wd),
p=baseline_weight,
)
else:
ind_bs = np.random.choice(
len(reference_power_baseline_wd),
size=len(reference_power_baseline_wd),
)
reference_power_binned_baseline = reference_power_baseline_wd[ind_bs]
test_power_binned_baseline = test_power_baseline_wd[ind_bs]
wind_speed_binned_baseline = wind_speed_array_baseline_wd[ind_bs]
# ind_bs = np.random.randint(
# len(reference_power_controlled_wd), size=len(reference_power_controlled_wd))
if wind_direction_bin_p_overlap > 1:
ind_bs = np.random.choice(
len(reference_power_controlled_wd),
size=len(reference_power_controlled_wd),
p=controlled_weight,
)
else:
ind_bs = np.random.choice(
len(reference_power_controlled_wd),
size=len(reference_power_controlled_wd),
)
reference_power_binned_controlled = reference_power_controlled_wd[ind_bs]
test_power_binned_controlled = test_power_controlled_wd[ind_bs]
wind_speed_binned_controlled = wind_speed_array_controlled_wd[ind_bs]
# compute the energy ratio
(
ratio_base_bs[i_bs],
ratio_con_bs[i_bs],
diff_bs[i_bs],
p_change_bs[i_bs],
_,
_,
_,
_,
) = energy_ratio(
reference_power_binned_baseline,
test_power_binned_baseline,
wind_speed_binned_baseline,
reference_power_binned_controlled,
test_power_binned_controlled,
wind_speed_binned_controlled,
)
# Get the confidence bounds
percentiles = _get_confidence_bounds(confidence)
# Compute the central over from the bootstrap runs
# print('wd',wind_direction_bin)
# print(ratio_base_bs)
# print(np.mean(ratio_base_bs))
# Trim out the nans
# print('size before',len(ratio_base_bs))
is_not_nan = (
(~np.isnan(ratio_base_bs))
& (~np.isnan(ratio_con_bs))
& (~np.isnan(diff_bs))
& (~np.isnan(p_change_bs))
)
ratio_base_bs = ratio_base_bs[is_not_nan]
ratio_con_bs = ratio_con_bs[is_not_nan]
diff_bs = diff_bs[is_not_nan]
p_change_bs = p_change_bs[is_not_nan]
# print('size after',len(ratio_base_bs))
if len(ratio_base_bs) == 0:
ratio_array_base[i] = np.nan
ratio_array_con[i] = np.nan
diff_array[i] = np.nan
p_change_array[i] = np.nan
lower_ratio_array_base[i] = np.nan
upper_ratio_array_base[i] = np.nan
lower_ratio_array_con[i] = np.nan
upper_ratio_array_con[i] = np.nan
lower_diff_array[i] = np.nan
upper_diff_array[i] = np.nan
lower_p_change_array[i] = np.nan
upper_p_change_array[i] = np.nan
else:
ratio_array_base[i] = np.mean(ratio_base_bs)
ratio_array_con[i] = np.mean(ratio_con_bs)
diff_array[i] = np.mean(diff_bs)
p_change_array[i] = np.mean(p_change_bs)
(
lower_ratio_array_base[i],
upper_ratio_array_base[i],
) = _calculate_lower_and_upper_bound(
ratio_base_bs,
percentiles,
central_estimate=ratio_array_base[i],
method="simple_percentile",
)
(
lower_ratio_array_con[i],
upper_ratio_array_con[i],
) = _calculate_lower_and_upper_bound(
ratio_con_bs,
percentiles,
central_estimate=ratio_array_con[i],
method="simple_percentile",
)
lower_diff_array[i], upper_diff_array[i] = _calculate_lower_and_upper_bound(
diff_bs,
percentiles,
central_estimate=diff_array[i],
method="simple_percentile",
)
(
lower_p_change_array[i],
upper_p_change_array[i],
) = _calculate_lower_and_upper_bound(
p_change_bs,
percentiles,
central_estimate=p_change_array[i],
method="simple_percentile",
)
return (
ratio_array_base,
lower_ratio_array_base,
upper_ratio_array_base,
counts_ratio_array_base,
ratio_array_con,
lower_ratio_array_con,
upper_ratio_array_con,
counts_ratio_array_con,
diff_array,
lower_diff_array,
upper_diff_array,
counts_diff_array,
p_change_array,
lower_p_change_array,
upper_p_change_array,
counts_p_change_array,
)
def plot_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=None,
wind_direction_bin_p_overlap=None,
axarr=None,
base_color="b",
con_color="g",
label_array=None,
label_pchange=None,
plot_simple=False,
plot_ratio_scatter=False,
marker_scale=1.0,
show_count=True,
hide_controlled_case=False,
ls="--",
marker=None,
):
"""
Plot the balanced energy ratio.
Function mainly acts as a wrapper to call
calculate_balanced_energy_ratio and plot the results.
Args:
reference_power_baseline (np.array): Array of power
of reference turbine in baseline conditions.
test_power_baseline (np.array): Array of power of
test turbine in baseline conditions.
wind_speed_array_baseline (np.array): Array of wind
speeds in baseline conditions.
wind_direction_array_baseline (np.array): Array of
wind directions in baseline case.
reference_power_controlled (np.array): Array of power
of reference turbine in controlled conditions.
test_power_controlled (np.array): Array of power of
test turbine in controlled conditions.
wind_speed_array_controlled (np.array): Array of wind
speeds in controlled conditions.
wind_direction_array_controlled (np.array): Array of
wind directions in controlled case.
wind_direction_bins (np.array): Wind directions bins.
confidence (int, optional): Confidence level to use.
Defaults to 95.
n_boostrap (int, optional): Number of bootstaps, if
none, _calculate_bootstrap_iterations is called. Defaults
to None.
wind_direction_bin_p_overlap (np.array, optional):
Percentage overlap between wind direction bin. Defaults to
None.
axarr ([axes], optional): list of axes to plot to.
Defaults to None.
base_color (str, optional): Color of baseline in
plots. Defaults to 'b'.
con_color (str, optional): Color of controlled in
plots. Defaults to 'g'.
label_array ([str], optional): List of labels to
apply Defaults to None.
label_pchange ([type], optional): Label for
percentage change. Defaults to None.
plot_simple (bool, optional): Plot only the ratio, no
confidence. Defaults to False.
plot_ratio_scatter (bool, optional): Include scatter
plot of values, sized to indicate counts. Defaults to False.
marker_scale ([type], optional): Marker scale.
Defaults to 1.
show_count (bool, optional): Show the counts as scatter plot
hide_controlled_case (bool, optional): Option to hide the control case from plots, for demonstration
"""
if axarr is None:
fig, axarr = plt.subplots(3, 1, sharex=True)
if label_array is None:
label_array = ["Baseline", "Controlled"]
if label_pchange is None:
label_pchange = "Energy Gain"
(
ratio_array_base,
lower_ratio_array_base,
upper_ratio_array_base,
counts_ratio_array_base,
ratio_array_con,
lower_ratio_array_con,
upper_ratio_array_con,
counts_ratio_array_con,
diff_array,
lower_diff_array,
upper_diff_array,
counts_diff_array,
p_change_array,
lower_p_change_array,
upper_p_change_array,
counts_p_change_array,
) = calculate_balanced_energy_ratio(
reference_power_baseline,
test_power_baseline,
wind_speed_array_baseline,
wind_direction_array_baseline,
reference_power_controlled,
test_power_controlled,
wind_speed_array_controlled,
wind_direction_array_controlled,
wind_direction_bins,
confidence=95,
n_boostrap=n_boostrap,
wind_direction_bin_p_overlap=wind_direction_bin_p_overlap,
)
if plot_simple:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls=ls,
marker=marker,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls=ls,
marker=marker,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
else:
ax = axarr[0]
ax.plot(
wind_direction_bins,
ratio_array_base,
label=label_array[0],
color=base_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_base,
upper_ratio_array_base,
alpha=0.3,
color=base_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_base,
s=counts_ratio_array_base * marker_scale,
label="_nolegend_",
color=base_color,
marker="o",
alpha=0.2,
)
if not hide_controlled_case:
ax.plot(
wind_direction_bins,
ratio_array_con,
label=label_array[1],
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_ratio_array_con,
upper_ratio_array_con,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
ratio_array_con,
s=counts_ratio_array_con * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(1, color="k")
ax.set_ylabel("Energy Ratio (-)")
ax = axarr[1]
ax.plot(
wind_direction_bins,
diff_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_diff_array,
upper_diff_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
diff_array,
s=counts_diff_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("Change in Energy Ratio (-)")
ax = axarr[2]
ax.plot(
wind_direction_bins,
p_change_array,
label=label_pchange,
color=con_color,
ls="-",
marker=".",
)
ax.fill_between(
wind_direction_bins,
lower_p_change_array,
upper_p_change_array,
alpha=0.3,
color=con_color,
label="_nolegend_",
)
if show_count:
ax.scatter(
wind_direction_bins,
p_change_array,
s=counts_p_change_array * marker_scale,
label="_nolegend_",
color=con_color,
marker="o",
alpha=0.2,
)
ax.axhline(0, color="k")
ax.set_ylabel("% Change in Energy Ratio (-)")
for ax in axarr:
ax.grid(True)
ax.set_xlabel("Wind Direction (Deg)")
return diff_array
# ######WIND SPEED VERSIONS##################
def energy_ratio_ws(
ref_pow_base,
test_pow_base,
wd_base,
ref_pow_con,
test_pow_con,
wd_con,
use_absolutes=False,
use_mean=False,
):
"""
Compute the balanced energy ratio, however, inverted so balancing against wind direction, compute for a single wind speed bin
This function is typically called to compute a single balanced
energy ratio calculation for a particular wind speed bin. Note
the reference turbine should not be the turbine implementing
control, but should be an unaffected nearby turbine, or a synthetic
power | |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Tests for task decoding'''
import numpy as np
import pytest
import jams
import pumpp
# Sampling rate and hop are simple here to keep things
# divisible for inverse checks
@pytest.fixture()
def sr():
return 10
@pytest.fixture()
def hop_length():
return 1
@pytest.fixture()
def ann_tag():
ann = jams.Annotation(namespace='tag_gtzan', duration=10)
ann.append(time=0, duration=5, value='blues')
ann.append(time=1.5, duration=1.5, value='reggae')
return ann
@pytest.fixture()
def ann_vector():
ann = jams.Annotation(namespace='vector', duration=1)
ann.append(time=0, duration=0, value=np.arange(32))
return ann
@pytest.fixture()
def ann_beat():
ann = jams.Annotation(namespace='beat', duration=10)
# for n, i in enumerate(np.arange(0, 10, 0.5)):
# ann.append(time=i, duration=0, value=1 + (n % 4))
# Make up two measures of 4/4, plus two pickup beats
for t, v in [(0, -2), (0.5, -1),
(1, 1), (1.5, 2), (2, 3), (3, 4),
(3.5, 1), (4, 2), (4.5, 3), (5, 4),
(5.5, 1), (6, 2), (6.5, 3), (7, 4)]:
ann.append(time=t, duration=0, value=v)
return ann
@pytest.fixture()
def ann_chord():
ann = jams.Annotation(namespace='chord', duration=5)
for t, c in [(0, 'C'),
(1, 'C:maj'),
(2, 'D:min/3'),
(3, 'F#:7(*5)'),
(4, 'G:sus2')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_chord(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[False, True])
def p_state_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_tags(request):
if request.param is None:
return None
if request.param:
return 0.5 * np.ones(10) # 10 tags in GTZAN
else:
return 0.5
@pytest.fixture(params=[False, True])
def p_init_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[False, True])
def p_state_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_beat(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, False, True])
def p_self_down(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, 0.5])
def p_init_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_init_down(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_down(request):
return request.param
@pytest.fixture()
def ann_segment():
ann = jams.Annotation(namespace='segment_open', duration=5)
for t, c in [(0, 'A'),
(1, 'B'),
(2, 'A'),
(3, 'B'),
(4, 'C')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture()
def ann_key():
ann = jams.Annotation(namespace='key_mode', duration=5)
for t, c in [(0, 'A:major'),
(1, 'Bb:lydian'),
(2, 'A:minor'),
(3, 'B:major'),
(4, 'C:dorian')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_key(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
@pytest.fixture(params=[False, True])
def p_state_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
def test_decode_tags_dynamic_hard(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
inverse = tc.inverse(data['tags'], duration=ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_dynamic_soft(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
# Soften the data, but preserve the decisions
tags_predict = 0.9 * data['tags'] + 0.1 * np.ones_like(data['tags']) / data['tags'].shape[1]
inverse = tc.inverse(tags_predict, duration=ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_static_hard(ann_tag):
tc = pumpp.task.StaticLabelTransformer('genre', 'tag_gtzan')
data = tc.transform_annotation(ann_tag, ann_tag.duration)
inverse = tc.inverse(data['tags'], ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_static_soft(ann_tag):
tc = pumpp.task.StaticLabelTransformer('genre', 'tag_gtzan')
data = tc.transform_annotation(ann_tag, ann_tag.duration)
tags_predict = data['tags'] * 0.51 + 0.1
inverse = tc.inverse(tags_predict, ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_beat_hard(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat):
tc = pumpp.task.BeatTransformer('beat', sr=sr,
hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['beat'], duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_soft(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat):
tc = pumpp.task.BeatTransformer('beat', sr=sr,
hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
beat_pred = 0.9 * data['beat'] + 0.1 * np.ones_like(data['beat']) / data['beat'].shape[-1]
inverse = tc.inverse(beat_pred, duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_downbeat_hard(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat,
p_self_down, p_init_down, p_state_down):
tc = pumpp.task.BeatTransformer('beat', sr=sr, hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat,
p_self_down=p_self_down,
p_init_down=p_init_down,
p_state_down=p_state_down)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['beat'], downbeat=data['downbeat'],
duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_downbeat_soft(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat,
p_self_down, p_init_down, p_state_down):
tc = pumpp.task.BeatTransformer('beat', sr=sr, hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat,
p_self_down=p_self_down,
p_init_down=p_init_down,
p_state_down=p_state_down)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
beat_pred = 0.9 * data['beat'] + 0.1 * np.ones_like(data['beat']) / data['beat'].shape[-1]
dbeat_pred = 0.9 * data['downbeat'] + 0.1 * np.ones_like(data['downbeat']) / data['downbeat'].shape[-1]
inverse = tc.inverse(beat_pred, downbeat=dbeat_pred,
duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_vector(ann_vector):
tc = pumpp.task.VectorTransformer('cf', 'vector', 32)
data = tc.transform_annotation(ann_vector, ann_vector.duration)
inverse = tc.inverse(data['vector'], duration=ann_vector.duration)
data2 = tc.transform_annotation(inverse, ann_vector.duration)
assert np.allclose(data['vector'], data2['vector'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_chord(sr, hop_length, ann_chord):
tc = pumpp.task.ChordTransformer('chord', sr=sr, hop_length=hop_length)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['pitch'], data['root'], data['bass'],
duration=ann_chord.duration)
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['pitch'], data2['pitch'])
assert np.allclose(data['root'], data2['root'])
assert np.allclose(data['bass'], data2['bass'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_simplechord(sr, hop_length, ann_chord):
tc = pumpp.task.SimpleChordTransformer('chord', sr=sr,
hop_length=hop_length)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['pitch'], duration=ann_chord.duration)
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['pitch'], data2['pitch'])
def test_decode_chordtag_hard_dense(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_soft_dense(sr, hop_length, ann_chord, p_self_chord, p_init_chord, p_state_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
chord_predict = 0.9 * data['chord'] + 0.1 * np.ones_like(data['chord']) / data['chord'].shape[1]
inverse = tc.inverse(chord_predict, duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_hard_sparse_sparse(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_hard_dense_sparse(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tcd = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False)
tcs = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True)
# Make a hard, dense encoding of the data
data = tcd.transform_annotation(ann_chord, ann_chord.duration)
# Invert using the sparse encoder
inverse = tcs.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tcs.transform_annotation(inverse, ann_chord.duration)
dense_positions = np.where(data['chord'])[1]
sparse_positions = data2['chord'][:, 0]
assert np.allclose(dense_positions, sparse_positions)
def test_decode_chordtag_soft_dense_sparse(sr, hop_length, ann_chord, p_self_chord, p_init_chord, p_state_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tcd = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
tcs = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
# Make a soft, dense encoding of the data
data = tcd.transform_annotation(ann_chord, ann_chord.duration)
chord_predict = 0.9 * data['chord'] + 0.1 * np.ones_like(data['chord']) / data['chord'].shape[1]
# Invert using the sparse encoder
inverse = tcs.inverse(chord_predict, duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tcs.transform_annotation(inverse, ann_chord.duration)
dense_positions = np.where(data['chord'])[1]
sparse_positions = data2['chord'][:, 0]
assert np.allclose(dense_positions, sparse_positions)
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_structure(sr, hop_length, ann_segment):
tc = pumpp.task.StructureTransformer('struct', sr=sr,
hop_length=hop_length)
data = tc.transform_annotation(ann_segment, ann_segment.duration)
inverse = tc.inverse(data['agree'], duration=ann_segment.duration)
data2 = tc.transform_annotation(inverse, ann_segment.duration)
assert np.allclose(data['agree'], data2['agree'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_beatpos(sr, hop_length, ann_beat):
tc = pumpp.task.BeatPositionTransformer('beat', sr=sr,
max_divisions=12,
hop_length=hop_length)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['position'], duration=ann_beat.duration)
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['position'], data2['position'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_task_key_inverse_transform(sr, hop_length):
jam = jams.JAMS(file_metadata=dict(duration=4.0))
trans = pumpp.task.KeyTransformer(name='key',
sr=sr, hop_length=hop_length)
output = | |
<gh_stars>1000+
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GraphSum model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import six
from collections import namedtuple
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from models.encoder import transformer_encoder, graph_encoder
from models.decoder import graph_decoder
from models.neural_modules import pre_process_layer
from models.trigram_blocking import TrigramBlocking
INF = 1. * 1e18
class GraphSumConfig(object):
"""Parser for configuration files"""
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def __setitem__(self, key, value):
self._config_dict[key] = value
def print_config(self):
"""Print configuration"""
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class GraphSumModel(object):
"""GraphSum Model"""
def __init__(self, args, config, padding_idx, bos_idx, eos_idx, tokenizer):
self.args = args
self._emb_size = config['hidden_size']
self._enc_word_layer = config['enc_word_layers']
self._enc_graph_layer = config['enc_graph_layers']
self._dec_n_layer = config['dec_graph_layers']
self._n_head = config['num_attention_heads']
self._max_position_seq_len = config['max_position_embeddings']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self._preprocess_command = config['preprocess_command']
self._postprocess_command = config['postprocess_command']
self._word_emb_name = config['word_embedding_name']
self._enc_word_pos_emb_name = config['enc_word_pos_embedding_name']
self._enc_sen_pos_emb_name = config['enc_sen_pos_embedding_name']
self._dec_word_pos_emb_name = config['dec_word_pos_embedding_name']
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._label_smooth_eps = args.label_smooth_eps
self._padding_idx = padding_idx
self._weight_sharing = args.weight_sharing
self._dtype = "float16" if args.use_fp16 else "float32"
self._use_fp16 = args.use_fp16
self._emb_dtype = "float32"
self.beam_size = args.beam_size
self.eos_idx = eos_idx
self.bos_idx = bos_idx
self.tokenizer = tokenizer # spm tokenizer
self.voc_size = len(tokenizer)
self.max_para_len = args.max_para_len
self.max_para_num = args.max_para_num
self.graph_type = args.graph_type
self.max_tgt_len = args.max_tgt_len
self.len_penalty = args.len_penalty
self.max_out_len = args.max_out_len
self.min_out_len = args.min_out_len
self.block_trigram = args.block_trigram
self.pos_win = args.pos_win
def _gen_enc_input(self, src_word, src_word_pos, src_sen_pos, word_slf_attn_bias,
sen_slf_attn_bias, graph_attn_bias):
# (batch_size, max_n_block, max_n_token, emb_dim)
word_emb_out = fluid.layers.embedding(
input=src_word,
size=[self.voc_size, self._emb_size],
padding_idx=self._padding_idx, # set embedding of pad to 0
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
word_emb_out = layers.scale(x=word_emb_out, scale=self._emb_size ** 0.5)
# (batch_size, max_n_block, max_n_token, emb_dim/2)
word_pos_out = fluid.layers.embedding(
input=src_word_pos,
size=[self._max_position_seq_len, self._emb_size // 2],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=self._enc_word_pos_emb_name, trainable=False))
word_pos_out.stop_gradient = True
# (batch_size, max_n_block, emb_dim/2)
sen_pos_out = fluid.layers.embedding(
input=src_sen_pos,
size=[self._max_position_seq_len, self._emb_size // 2],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=self._enc_sen_pos_emb_name, trainable=False))
sen_pos_out.stop_gradient = True
# (batch_size, max_n_block, max_n_token, emb_dim/2)
sen_pos_out = layers.expand(layers.unsqueeze(sen_pos_out, axes=[2]),
expand_times=[1, 1, self.max_para_len, 1])
# (batch_size, n_blocks, n_tokens, emb_dim)
combined_pos_enc = layers.concat([word_pos_out, sen_pos_out], axis=-1)
emb_out = word_emb_out + combined_pos_enc # (batch_size, n_blocks, n_tokens, emb_dim)
emb_out = layers.dropout(emb_out,
dropout_prob=self._prepostprocess_dropout,
dropout_implementation="upscale_in_train",
is_test=False) if self._prepostprocess_dropout else emb_out
if self._dtype is "float16":
emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype)
if word_slf_attn_bias is not None:
word_slf_attn_bias = fluid.layers.cast(x=word_slf_attn_bias, dtype=self._dtype)
if sen_slf_attn_bias is not None:
sen_slf_attn_bias = fluid.layers.cast(x=sen_slf_attn_bias, dtype=self._dtype)
if sen_slf_attn_bias is not None:
graph_attn_bias = fluid.layers.cast(x=graph_attn_bias, dtype=self._dtype)
res = namedtuple('results', ['emb_out', 'word_slf_attn_bias', 'sen_slf_attn_bias',
'graph_attn_bias'])
return res(emb_out=emb_out, word_slf_attn_bias=word_slf_attn_bias,
sen_slf_attn_bias=sen_slf_attn_bias, graph_attn_bias=graph_attn_bias)
def _gen_dec_input(self, trg_word, trg_pos, trg_slf_attn_bias, trg_src_words_attn_bias,
trg_src_sents_attn_bias, graph_attn_bias):
emb_out = fluid.layers.embedding(
input=trg_word,
size=[self.voc_size, self._emb_size],
padding_idx=self._padding_idx, # set embedding of pad to 0
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
emb_out = layers.scale(x=emb_out, scale=self._emb_size ** 0.5)
position_emb_out = fluid.layers.embedding(
input=trg_pos,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=self._dec_word_pos_emb_name, trainable=False))
position_emb_out.stop_gradient = True
emb_out = emb_out + position_emb_out
emb_out = layers.dropout(
emb_out,
dropout_prob=self._prepostprocess_dropout,
dropout_implementation="upscale_in_train",
is_test=False) if self._prepostprocess_dropout else emb_out
if self._dtype is "float16":
emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype)
if trg_slf_attn_bias is not None:
trg_slf_attn_bias = fluid.layers.cast(x=trg_slf_attn_bias, dtype=self._dtype)
if trg_src_words_attn_bias is not None:
trg_src_words_attn_bias = fluid.layers.cast(x=trg_src_words_attn_bias, dtype=self._dtype)
if trg_src_sents_attn_bias is not None:
trg_src_sents_attn_bias = fluid.layers.cast(x=trg_src_sents_attn_bias, dtype=self._dtype)
if graph_attn_bias is not None:
graph_attn_bias = fluid.layers.cast(x=graph_attn_bias, dtype=self._dtype)
res = namedtuple('results', ['emb_out', 'trg_slf_attn_bias', 'trg_src_words_attn_bias',
'trg_src_sents_attn_bias', 'graph_attn_bias'])
return res(emb_out=emb_out, trg_slf_attn_bias=trg_slf_attn_bias,
trg_src_words_attn_bias=trg_src_words_attn_bias,
trg_src_sents_attn_bias=trg_src_sents_attn_bias,
graph_attn_bias=graph_attn_bias)
def encode(self, enc_input):
"""Encoding the source input"""
src_word, src_word_pos, src_sen_pos, src_words_slf_attn_bias, \
src_sents_slf_attn_bias, graph_attn_bias = enc_input
enc_res = self._gen_enc_input(src_word, src_word_pos, src_sen_pos, src_words_slf_attn_bias,
src_sents_slf_attn_bias, graph_attn_bias)
emb_out, src_words_slf_attn_bias, src_sents_slf_attn_bias, graph_attn_bias = \
enc_res.emb_out, enc_res.word_slf_attn_bias, enc_res.sen_slf_attn_bias, enc_res.graph_attn_bias
# (batch_size*n_blocks, n_tokens, emb_dim)
emb_out = layers.reshape(emb_out, shape=[-1, self.max_para_len, self._emb_size])
# (batch_size*n_block, n_head, n_tokens, n_tokens)
src_words_slf_attn_bias = layers.reshape(src_words_slf_attn_bias,
shape=[-1, self._n_head, self.max_para_len, self.max_para_len])
# the token-level transformer encoder
# (batch_size*n_blocks, n_tokens, emb_dim)
enc_words_out = transformer_encoder(
enc_input=emb_out,
attn_bias=src_words_slf_attn_bias,
n_layer=self._enc_word_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=self._prepostprocess_dropout,
hidden_act=self._hidden_act,
preprocess_cmd=self._preprocess_command,
postprocess_cmd=self._postprocess_command,
param_initializer=self._param_initializer,
name='transformer_encoder',
with_post_process=False
)
# the paragraph-level graph encoder
# (batch_size, n_block, emb_dim)
enc_sents_out = graph_encoder(
enc_words_output=enc_words_out, # (batch_size*n_blocks, n_tokens, emb_dim)
src_words_slf_attn_bias=src_words_slf_attn_bias, # (batch_size*max_nblock, n_head, max_ntoken, max_ntoken)
src_sents_slf_attn_bias=src_sents_slf_attn_bias, # (batch_size, n_head, max_nblock, max_nblock)
graph_attn_bias=graph_attn_bias, # (batch_size, n_head, max_nblock, max_nblock)
pos_win=self.pos_win,
graph_layers=self._enc_graph_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=self._prepostprocess_dropout,
hidden_act=self._hidden_act,
# n_block=self.max_para_num,
preprocess_cmd=self._preprocess_command,
postprocess_cmd=self._postprocess_command,
param_initializer=self._param_initializer,
name='graph_encoder')
enc_words_out = pre_process_layer(
enc_words_out, self._preprocess_command, self._prepostprocess_dropout, name="post_encoder")
enc_words_out = layers.reshape(enc_words_out,
shape=[-1, self.max_para_num, self.max_para_len, self._emb_size])
return enc_words_out, enc_sents_out
def decode(self, dec_input, enc_words_output, enc_sents_output, caches=None, gather_idx=None):
"""Decoding to generate output text"""
trg_word, trg_pos, trg_slf_attn_bias, trg_src_words_attn_bias, \
trg_src_sents_attn_bias, graph_attn_bias = dec_input
dec_res = self._gen_dec_input(trg_word, trg_pos, trg_slf_attn_bias, trg_src_words_attn_bias,
trg_src_sents_attn_bias, graph_attn_bias)
emb_out, trg_slf_attn_bias, trg_src_words_attn_bias, trg_src_sents_attn_bias, graph_attn_bias = \
dec_res.emb_out, dec_res.trg_slf_attn_bias, dec_res.trg_src_words_attn_bias, \
dec_res.trg_src_sents_attn_bias, dec_res.graph_attn_bias
# (batch_size, tgt_len, emb_dim)
dec_output = graph_decoder(
dec_input=emb_out, # (batch_size, tgt_len, emb_dim)
enc_words_output=enc_words_output, # (batch_size, n_blocks, n_tokens, emb_dim)
enc_sents_output=enc_sents_output, # (batch_size, n_blocks, emb_dim)
dec_slf_attn_bias=trg_slf_attn_bias, # (batch_size, n_head, tgt_len, tgt_len)
dec_enc_words_attn_bias=trg_src_words_attn_bias, # (batch_size, n_blocks, n_head, tgt_len, n_tokens)
dec_enc_sents_attn_bias=trg_src_sents_attn_bias, # (batch_size, n_head, tgt_len, n_blocks)
graph_attn_bias=graph_attn_bias, # (batch_size, n_head, n_blocks, n_blocks)
pos_win=self.pos_win,
n_layer=self._dec_n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=self._prepostprocess_dropout,
hidden_act=self._hidden_act,
preprocess_cmd=self._preprocess_command,
postprocess_cmd=self._postprocess_command,
param_initializer=self._param_initializer,
caches=caches,
gather_idx=gather_idx,
name='graph_decoder')
# Reshape to 2D tensor to use GEMM instead of BatchedGEMM
# (batch_size*tgt_len, emb_dim)
dec_output = layers.reshape(dec_output, shape=[-1, self._emb_size], inplace=True)
if self._dtype is "float16":
dec_output = fluid.layers.cast(x=dec_output, dtype=self._emb_dtype)
if self._weight_sharing:
out = layers.matmul(
x=dec_output,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
bias = layers.create_parameter(shape=[self.voc_size],
dtype=self._emb_dtype,
attr=fluid.ParamAttr(
name='generator.bias',
initializer=fluid.initializer.Constant(value=0.0)),
is_bias=True)
predict = layers.elementwise_add(x=out, y=bias, axis=-1)
else:
predict = layers.fc(input=dec_output,
size=self.voc_size,
param_attr=fluid.ParamAttr(
name="generator.w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name='generator.bias',
initializer=fluid.initializer.Constant(value=0.0)))
return predict
def build_model(self, enc_input, dec_input, tgt_label, label_weights):
"""Build the model with source encoding and target decoding"""
enc_word_output, enc_sen_output = self.encode(enc_input)
dec_output = self.decode(dec_input, enc_word_output, enc_sen_output)
predict_token_idx = layers.argmax(dec_output, axis=-1)
correct_token_idx = layers.cast(layers.equal(tgt_label,
layers.reshape(predict_token_idx, shape=[-1, 1])),
dtype='float32')
weighted_correct = layers.elementwise_mul(x=correct_token_idx, y=label_weights, axis=0)
sum_correct = layers.reduce_sum(weighted_correct)
sum_correct.stop_gradient = True
# Padding index do not contribute to the total loss. The weights is used to
# cancel padding index in calculating the loss.
if self._label_smooth_eps:
# TODO: use fluid.input.one_hot after softmax_with_cross_entropy removing
# the enforcement that the last dimension of label must be 1.
tgt_label = layers.label_smooth(label=layers.one_hot(input=tgt_label,
depth=self.voc_size),
epsilon=self._label_smooth_eps)
cost = layers.softmax_with_cross_entropy(
logits=dec_output,
label=tgt_label,
soft_label=True if self._label_smooth_eps else False)
weighted_cost = layers.elementwise_mul(x=cost, y=label_weights, axis=0)
sum_cost = layers.reduce_sum(weighted_cost)
token_num = layers.reduce_sum(label_weights)
token_num.stop_gradient = True
avg_cost = sum_cost / token_num
graph_vars = {
"loss": avg_cost,
"sum_correct": sum_correct,
"token_num": token_num,
}
for k, v in graph_vars.items():
v.persistable = True
return graph_vars
def create_model(self, pyreader_name, is_prediction=False):
"""Create the network"""
if is_prediction:
return self.fast_decode(pyreader_name)
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, self.max_para_num, self.max_para_len], # src_word
[-1, self.max_para_num, self.max_para_len], # src_word_pos
[-1, self.max_para_num], # src_sent_pos
[-1, self.max_para_num, self.max_para_len], # src_words_slf_attn_bias
[-1, self.max_para_num], # src_sents_slf_attn_bias
[-1, self.max_para_num, self.max_para_num], # graph_attn_bias
[-1, self.max_tgt_len], # trg_word
[-1, self.max_tgt_len], # trg_pos
[-1, self.max_tgt_len, self.max_tgt_len], # trg_slf_attn_bias
[-1, 1], # tgt_label
[-1, 1]], # label_weights
dtypes=['int64', 'int64', 'int64', 'float32', 'float32', 'float32',
'int64', 'int64', 'float32',
'int64', 'float32'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_word, src_word_pos, src_sent_pos, src_words_slf_attn_bias, src_sents_slf_attn_bias,
graph_attn_bias, trg_word, trg_pos, trg_slf_attn_bias, tgt_label, label_weights) = \
fluid.layers.read_file(pyreader)
src_words_slf_attn_bias = layers.expand(layers.unsqueeze(src_words_slf_attn_bias, axes=[2, 3]),
expand_times=[1, 1, self._n_head, self.max_para_len, 1])
src_words_slf_attn_bias.stop_gradient = True
src_sents_slf_attn_bias = layers.expand(layers.unsqueeze(src_sents_slf_attn_bias, axes=[1, 2]),
expand_times=[1, self._n_head, self.max_para_num, 1])
src_sents_slf_attn_bias.stop_gradient = True
graph_attn_bias = layers.expand(layers.unsqueeze(graph_attn_bias, axes=[1]),
expand_times=[1, self._n_head, 1, 1])
graph_attn_bias.stop_gradient = True
trg_slf_attn_bias = layers.expand(layers.unsqueeze(trg_slf_attn_bias, axes=[1]),
expand_times=[1, self._n_head, 1, 1])
trg_slf_attn_bias.stop_gradient = True
tgt_src_words_attn_bias = layers.expand(layers.slice(src_words_slf_attn_bias, axes=[3],
starts=[0], ends=[1]),
expand_times=[1, 1, 1, self.max_tgt_len, 1])
tgt_src_words_attn_bias.stop_gradient = True
tgt_src_sents_attn_bias = layers.expand(layers.slice(src_sents_slf_attn_bias, | |
<reponame>ykwang1/PypeIt
""" Routines related to MasterFrames"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import numpy as np
import os
# import yaml
import json
from astropy.io import fits
from astropy import units
import linetools.utils
from pypeit.par import pypeitpar
from pypeit import msgs
from pypeit import debugger
def set_master_dir(redux_path, spectrograph, par):
"""
Set the master directory auto-magically
Args:
redux_path: str or None
spectrograph: Spectrograph or None
par: ParSet or None
Returns:
master_dir : str
Path of the MasterFrame directory
"""
# Parameters
if par is None:
tmppar = pypeitpar.CalibrationsPar()
else:
if 'caldir' not in par.keys():
tmppar = pypeitpar.CalibrationsPar()
else:
tmppar = par
# Redux path
if redux_path is None:
redux_path = os.getcwd()
master_dir = os.path.join(redux_path, tmppar['caldir'])
# Spectrograph
if spectrograph is not None:
master_dir += '_'+spectrograph.spectrograph
# Return
return master_dir
def master_name(ftype, setup, mdir):
""" Default filenames for MasterFrames
Parameters
----------
ftype : str
Frame type
setup : str
Setup name
mdir : str, optional
Master directory
Returns
-------
msname : str
"""
name_dict = dict(bias='{:s}/MasterBias_{:s}.fits'.format(mdir, setup),
badpix='{:s}/MasterBadPix_{:s}.fits'.format(mdir, setup),
trace='{:s}/MasterTrace_{:s}'.format(mdir, setup), # Just a root as FITS+JSON are generated
pinhole='{:s}/MasterPinhole_{:s}.fits'.format(mdir, setup),
pixelflat='{:s}/MasterPixelFlat_{:s}.fits'.format(mdir, setup),
illumflat='{:s}/MasterIllumFlat_{:s}.fits'.format(mdir, setup),
arc='{:s}/MasterArc_{:s}.fits'.format(mdir, setup),
wave='{:s}/MasterWave_{:s}.fits'.format(mdir, setup),
wv_calib='{:s}/MasterWaveCalib_{:s}.json'.format(mdir, setup),
tilts='{:s}/MasterTilts_{:s}.fits'.format(mdir, setup),
# sensfunc='{:s}/MasterSensFunc_{:s}_{:s}.yaml'.format(mdir, setup[0], setup[-2:]),
sensfunc='{:s}/MasterSensFunc_{:s}_{:s}.fits'.format(mdir, setup[0], setup[-2:]),
)
return name_dict[ftype]
'''
def load_master_frame(slf, mftype, det=None):
"""
Mainly a wrapper on core_load_master_frame
This method will be deprecated by load methods in the MasterFrame classes
Parameters
----------
slf
mftype
det
Returns
-------
"""
# TODO -- This method will be deprecated by load methods in the MasterFrame classes
# Presently there are only 4 calls to this (tilts, mswave, wavecalib)
# Were MasterFrames even desired?
if (settings.argflag['reduce']['masters']['reuse']) or (settings.argflag['reduce']['masters']['force']):
ret, head, _ = core_load_master_frame(mftype, slf.setup,
settings.argflag['run']['directory']['master']+'_'+settings.argflag['run']['spectrograph'],
force=settings.argflag['reduce']['masters']['force'])
else:
return None
if ret is None:
return None
elif mftype == 'arc':
slf._transpose = head['transp']
if slf._transpose: # Need to setup for flipping
settings.argflag['trace']['dispersion']['direction'] = 1
else:
settings.argflag['trace']['dispersion']['direction'] = 0
elif mftype == 'trace':
Tslits = ret[0]
Tslits._make_pixel_arrays()
#
slf.SetFrame(slf._lordloc, Tslits.lcen, det)
slf.SetFrame(slf._rordloc, Tslits.rcen, det)
slf.SetFrame(slf._pixcen, Tslits.pixcen, det)
slf.SetFrame(slf._pixwid, Tslits.pixwid, det)
slf.SetFrame(slf._lordpix, Tslits.lordpix, det)
slf.SetFrame(slf._rordpix, Tslits.rordpix, det)
slf.SetFrame(slf._slitpix, Tslits.slitpix, det)
# Mask -- It is assumed that all slits loaded are ok
slf._maskslits[det-1] = np.array([False] * slf._lordloc[det-1].shape[1])
# We only want to send back the mstrace image (for now)
# This should change when slf is Refactored
ret = Tslits.mstrace
# Append as loaded
settings.argflag['reduce']['masters']['loaded'].append(mftype+slf.setup)
return ret
'''
def load_master_frame(mftype, setup, mdir, force=False):
""" If a MasterFrame exists, load it
Will soon replace the load_master_frame above
Parameters
----------
mftype : str
setup : str
mdir : str
Returns
-------
msfile : ndarray or dict or None
head : Header or None
file_list : list (or None)
Typically the files used the generate the master frame (may be incomplete or None)
"""
# Name
ms_name = master_name(mftype, setup, mdir)
# Load
msframe, head, file_list = _load(ms_name, exten=0, frametype=mftype, force=force)
# Check
if msframe is None:
msgs.warn("No Master frame found of type {:s}: {:s}".format(mftype,ms_name))
return None, None, None
# Return
return msframe, head, file_list
def _load(name, exten=0, frametype='<None>', force=False):
"""
Low level load method for master frames
Should mainly be called by core_load_master_frame
Parameters
----------
name : str
Name of the master calibration file to be loaded
exten : int, optional
frametype : str, optional
The type of master calibration frame being loaded.
This keyword is only used for terminal print out.
force : bool, optional
Crash out if the file does not exist!
Returns
-------
frame : ndarray or dict or TraceSlits or None
The data from the master calibration frame
head : str (or None)
file_list : list (or None)
"""
# Check to see if file exists
if not os.path.isfile(name):
msgs.warn("Master frame does not exist: {:s}".format(name))
if force:
msgs.error("Crashing out because reduce-masters-force=True:"+msgs.newline()+name)
return None, None, None
#
if frametype == 'wv_calib':
msgs.error('Load from the class not this method')
#msgs.info("Loading Master {0:s} frame:".format(frametype)+msgs.newline()+name)
#ldict = linetools.utils.loadjson(name)
#return ldict, None, [name]
elif frametype == 'sensfunc':
msgs.info("Loading a pre-existing master calibration frame of type: {:}".format(frametype) + " from filename: {:}".format(name))
hdu = fits.open(name)
head = hdu[0].header
tbl = hdu['SENSFUNC'].data
sens_dict = {}
sens_dict['wave'] = tbl['WAVE']
sens_dict['sensfunc'] = tbl['SENSFUNC']
for key in ['wave_min','wave_max','exptime','airmass','std_file','std_ra','std_dec','std_name','cal_file']:
try:
sens_dict[key] = head[key.upper()]
except:
pass
return sens_dict, head, [name]
elif frametype == 'trace':
msgs.error('Load from the class not this method')
elif frametype == 'tilts':
msgs.info("Loading a pre-existing master calibration frame of type: {:}".format(frametype) + " from filename: {:}".format(name))
hdu = fits.open(name)
head0 = hdu[0].header
tilts = hdu[0].data
head1 = hdu[1].header
coeffs = hdu[1].data
tilts_dict = {'tilts':tilts,'coeffs':coeffs,'func2D': head1['FUNC2D']} # This is the tilts_dict
return tilts_dict, head0, [name]
else:
msgs.info("Loading a pre-existing master calibration frame of type: {:}".format(frametype) + " from filename: {:}".format(name))
hdu = fits.open(name)
#msgs.info("Master {0:s} frame loaded successfully:".format(hdu[0].header['FRAMETYP'])+msgs.newline()+name)
head0 = hdu[0].header
data = hdu[exten].data.astype(np.float)
# List of files used to generate the Master frame (e.g. raw file frames)
file_list = []
for key in head0:
if 'FRAME' in key:
file_list.append(head0[key])
return data, head0, file_list
'''
def save_masters(slf, det, mftype='all'):
""" Save Master Frames
THIS WILL BE DEPRECATED BIT BY BIT
Parameters
----------
slf
mftype : str
'all' -- Save them all
"""
# TODO - Deprecate
setup = slf.setup
transpose = bool(settings.argflag['trace']['dispersion']['direction'])
# Bias
if (mftype == 'bias'):
msgs.error("Should not get here anymore. Save the bias in the BiasFrame class")
# Bad Pixel
if (mftype in ['badpix', 'all']) and ('badpix'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the trace in the TraceSlits class")
#save_master(slf, slf._bpix[det-1],
# filename=master_name('badpix', setup),
# frametype='badpix')
# Trace
if (mftype in ['trace', 'all']) and ('trace'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the trace in the TraceSlits class")
# Pixel Flat
if (mftype in ['pixelflat', 'all']) and ('pixelflat'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the trace in the TraceSlits class")
#save_master(slf, slf._mspixelflatnrm[det-1],
# filename=master_name('normpixelflat', setup),
# frametype='normpixelflat')
# Pinhole Flat
if (mftype in ['pinhole', 'all']) and ('pinhole'+setup not in settings.argflag['reduce']['masters']['loaded']):
save_master(slf, slf._mspinhole[det-1],
filename=master_name('pinhole', setup),
frametype='pinhole')
# Arc
if (mftype in ['arc', 'all']) and ('arc'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the arc in the ArcImage class")
# Wavelength image
if (mftype in ['wave', 'all']) and ('wave'+setup not in settings.argflag['reduce']['masters']['loaded']):
save_master(slf, slf._mswave[det-1],
filename=master_name('wave', setup),
frametype='wave')
if (mftype in ['wv_calib', 'all']) and ('wv_calib'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the arc in the ArcImage class")
# Wavelength fit
#gddict = linetools.utils.jsonify(slf._wvcalib[det-1])
#json_file = master_name('wv_calib', setup)
#if gddict is not None:
# linetools.utils.savejson(json_file, gddict, easy_to_read=True, overwrite=True)
#else:
# msgs.warn("The master wavelength solution has not been saved")
# Tilts
if (mftype in ['tilts', 'all']) and ('tilts'+setup not in settings.argflag['reduce']['masters']['loaded']):
msgs.error("Should not get here anymore. Save the arc in the ArcImage class")
#save_master(slf, slf._tilts[det-1],
# filename=master_name('tilts', setup),
# frametype='tilts')
# Spatial slit profile
if (mftype in ['slitprof', 'all']) and ('slitprof'+setup not in settings.argflag['reduce']['masters']['loaded']):
save_master(slf, slf._slitprof[det - 1],
filename=master_name('slitprof', setup),
frametype='slit profile')
'''
'''
def save_master(slf, data, filename="temp.fits", frametype="<None>", ind=[],
extensions=None, keywds=None, names=None):
""" Wrapper to core_save_master
Will be Deprecated
Parameters
----------
slf
data
filename
frametype
ind
extensions
keywds
names
Returns
-------
"""
if len(ind) > 0:
raw_files=slf._fitsdict['filename']
else:
raw_files=None
core_save_master(data, filename=filename, frametype=frametype,
extensions=extensions, keywds=keywds, names=names,
raw_files=raw_files)
'''
def save_master(data, filename="temp.fits", frametype="<None>",
extensions=None, keywds=None, names=None, raw_files=None,
overwrite=True):
""" Core function to write a MasterFrame image
More sophisticated MasterFrame objects may be written by their own Class, e.g. TraceSlits
Parameters
----------
data : ndarray
filename : str (optional)
frametype : str (optional)
extensions : list, optional
Additional data images to write
names : list, optional
Names of the extensions
keywds : Additional keywords for the Header
raw_files : list or ndarray
Names of the raw files used to generate the image
Returns
-------
"""
# Check for existing
if os.path.exists(filename) and (not overwrite):
msgs.warn("This file already exists. Use overwrite=True to overwrite it")
return
#
msgs.info("Saving master {0:s} frame as:".format(frametype)+msgs.newline()+filename)
if frametype == 'wv_calib':
# Wavelength fit(s)
gddict = linetools.utils.jsonify(data)
linetools.utils.savejson(filename, gddict, easy_to_read=True, overwrite=True)
else: # 2D Image
hdu = fits.PrimaryHDU(data)
hlist = [hdu]
# Extensions
if extensions is not None:
for kk,exten in enumerate(extensions):
hdu = | |
break
for x in range( start+max_pull, self._exists+1, max_pull ):
if x in self.seqno_cache:
un = self.seqno_cache[x][0]
sn = x
break
if un is None:
sn = self._last_exists
un = self._uid_next - 1
s = ( self._exists / max_pull ) * max_pull
for x in range( s, s0, -max_pull ):
if x in self.seqno_cache:
un = self.seqno_cache[x][0]
sn = x
break
if un <= u0 or sn == s0:
if seqno==0:
seqno = 1
return seqno, 4*max_pull - 1
cn = sn - s0
rn = un - u0
self._imap.log( "Adapting for range %d (%d) to %d (%d), across %d (%d)" % ( u0, s0, un, sn, rn, cn ) )
holes = rn-cn
if holes < 0:
self._imap.log( "Holes is negative? UIDNEXT is buggy." )
self._imap.alert( "Holes is negative? UIDNEXT is buggy." )
import sys
sys.exit()
holes = 0
max_num = 2*(holes+1)
if max_num > cn:
max_num = cn
self._imap.log( "Max is %d, with range %d, count %d and holes %d." % ( max_num, rn, cn, holes ) )
foo = ( rn * max_pull * 3 ) / max_num
self._imap.log( "Foo is %s" % `foo` )
to = ( int( foo ) / max_pull ) * max_pull - 1
self._imap.log( "To is %s" % `to` )
#if to >= ( max_pull * 15 ):
# to = max_pull * 15 - 1
fr = ( s0 / max_pull ) * max_pull
self._imap.log( "Initial start is %d" % fr )
while fr + to < seqno:
fr += max_pull
self._imap.log( "Post coverage check, now %d" % fr )
if fr==0:
fr = 1
return fr, to
if seqno==0:
seqno = 1
return seqno, max_pull * 3 - 1
def logical_start_position( self ):
self.uidvalidity()
return len(self) - self._recent
def have_rights( self, what ):
return self._mi.have_rights( what )
def mbox_info( self ):
return self._mi
def register_notify( self, who ):
self._notifies.append( weakref.ref( who ) )
def delete_notify( self, who ):
tmp = self._notifies
self._notifies = []
for x in tmp:
xx = x()
if xx is not None:
if xx is not who:
self._notifies.append( x )
def notify( self ):
if self._waiting:
return False
if 0==len(self._mods):
return False
#tt = time.time()
#if ( tt - self._last_notify ) < 2:
# return 0!=len(self._mods)
#self._last_notify = tt
yes = False
omods = self._mods
self._mods = []
for x in self._notifies:
xx = x()
if xx is not None:
yes = True
xx.notify_change( self, omods )
return yes
def detail_save_real( self ):
try:
self.detail_cache['uid_validity'] = self._uid_validity
self.detail_cache['uid_next'] = self._uid_next
self.detail_cache['last_exists'] = self._last_exists
self.detail_cache['exists'] = self._exists
if self._condstore_real:
self.detail_cache['highest_mod_seq'] = self._highest_modseq
self.detail_cache['last_modseq_seen'] = self._last_modseq_seen
self.detail_cache['highwater_uid'] = self._highwater_uid
self.detail_cache['highwater_modseq'] = self._highwater_modseq
else:
self.detail_cache['highest_mod_seq'] = '0'
self.detail_cache['last_modseq_seen'] = '0'
self.detail_cache['highwater_uid'] = 0
self.detail_cache['highwater_modseq'] = '0'
self.detail_cache['witnessed_expunges'] = self._witnessed_expunges
self.detail_cache['flags'] = self._flags
self.detail_cache['permflags'] = self._perm_flags
except:
self.detail_cache_start( True )
self.detail_save_real()
self.seqno_cache.sync()
self.cache.sync()
self.mid_cache.sync()
self.detail_cache.sync()
def detail_save( self ):
pass
def detail_restore( self ):
try:
self._uid_validity = self.detail_cache['uid_validity']
self._uid_next = self.detail_cache['uid_next']
self._last_exists = self.detail_cache['last_exists']
self._exists = self.detail_cache['exists']
self._highest_modseq = self.detail_cache['highest_mod_seq']
self._last_modseq_seen = self.detail_cache['last_modseq_seen']
self._witnessed_expunges = self.detail_cache['witnessed_expunges']
self._flags = self.detail_cache['flags']
self._perm_flags = self.detail_cache['permflags']
self._highwater_uid = self.detail_cache['highwater_uid']
self._highwater_modseq = self.detail_cache['highwater_modseq']
except:
self.detail_cache_start( True )
self._uid_validity = None
self._uid_next = None
self._last_exists = None
self._exists = None
self._highest_modseq = '0'
self._last_modseq_seen = '0'
self._highwater_uid = 0
self._highwater_modseq = '0'
def gen_cache_start( self, cache, persist, name, reset=False, double=False ):
if cache is not None:
try:
if persist:
cache.close();
except:
pass
try:
cache = None
except:
pass
if double:
ncache = infotrope.cache.dummy_double()
else:
ncache = infotrope.cache.dummy()
persist = False
if self._imap.cache_root is not None:
if reset:
for x in os.listdir( self.cachedir ):
if x.find(name)==0:
os.remove( os.path.join( self.cachedir, x ) )
try:
if double:
ncache = infotrope.cache.open_double( os.path.join( self.cachedir, name ), int )
else:
ncache = infotrope.cache.open( os.path.join( self.cachedir, name ) )
persist = True
except:
if reset:
if double:
ncache = infotrope.cache.dummy_double()
else:
ncache = infotrope.cache.dummy()
else:
self.gen_cache_start( ncache, persist, name, reset=True, double=double )
return ( ncache, persist )
def cache_start( self, reset=False ):
( self.cache_real, self.cache_persist ) = self.gen_cache_start( self.cache_real, self.cache_persist, 'cache', reset, double=True )
self.cache = cache_wrapper(self, self.cache_real)
def detail_cache_start( self, reset=False ):
( self.detail_cache, self.detail_cache_persist ) = self.gen_cache_start( self.detail_cache, self.detail_cache_persist, 'detail', reset )
def seqno_cache_start( self, reset=False ):
( self.seqno_cache_real, self.seqno_cache_persist ) = self.gen_cache_start( self.seqno_cache_real, self.seqno_cache_persist, 'seqno', reset )
self.seqno_cache = seqno_cache_thing( self.seqno_cache_real )
def mid_cache_start( self, reset=False ):
( self.mid_cache, self.mid_cache_persist ) = self.gen_cache_start( self.mid_cache, self.mid_cache_persist, 'mid', reset )
def server( self ):
return self._imap
def path( self ):
return self._path
def uri( self ):
if self.proxy_uri_stub:
return infotrope.url.URL( self.proxy_uri_stub + ';UIDVALIDITY=' + self._uid_validity )
return infotrope.url.URL( str(self._imap.mbox_info(self._path).uri())+';UIDVALIDITY=' + self._uid_validity )
def master_uri(self):
return self.uri()
def copy( self, msg, topath ):
if isinstance(msg,message):
uids = [int(msg.uid())]
elif isinstance(msg,int):
uids = [msg]
elif isinstance(msg,list):
uids = []
for m in msg:
if isinstance(m,message):
uids.append(int(m.uid()))
elif isinstance(m,int):
uids.append(m)
else:
uids.append(int(m))
else:
uids = [int(msg)]
self.uidvalidity()
t,r,s = self._imap.send( 'UID COPY', ','.join(self._make_msg_set(uids)), astring(topath), mbox=self.path() )
if r is None:
t,r,s = self._imap.wait( t )
if r.lower()!='ok':
raise infotrope.base.connection.exception(s)
if isinstance( s[0], list ):
if s[0][0].upper()=='COPYUID' and self._imap.have_capability('UIDPLUS'):
dst_m = self._imap.mailbox( topath, open=False )
if s[0][1]==dst_m._uid_validity:
dst_uids = self._imap.decompose_set(s[0][3],nosort=True)
if len(uids)!=len(dst_uids):
print "COPYUID Length Mismatch!"
return
uids = self._imap.decompose_set(s[0][2],nosort=True)
for i in range(len(uids)):
self.copy_cache( uids[i], dst_m, dst_uids[i] )
def copy_cache(self, uid, dst_m, dst_uid):
for u,k,v in self.cache.items(uid):
if 'MODSEQ' in k:
continue
if k not in never_cache:
dst_m.set_cache(dst_uid, k, v)
dst_m.sync()
def have_cached(self, uid, item):
return (uid,item) in self.cache
def copy_cache_old( self, uid, dst_m, dst_uid ):
items = ['ENVELOPE','BODYSTRUCTURE','INTERNALDATE','RFC822.SIZE','RFC822', 'RFC822.HEADER', 'RFC822.TEXT', 'BODY' ]
items += self[uid].parts().all_parts()
for x in items:
item = self.check_in_cache( uid, x )
if item is not None:
dst_m.set_cache( dst_uid, item, self.get_from_cache( uid, item ) )
def seqno( self, seqno, testonly=False ):
''' Translate a sequence number for this mailbox into a UID. '''
if self._imap.logged_out:
return None
self.uidvalidity()
if seqno > self._exists:
return None
if seqno <= 0:
return None
start = ( seqno / max_pull ) * max_pull
block = self.seqno_cache.get(start,[])
if len(block)<=(seqno-start):
if testonly:
return None
self.seqno_prime( seqno )
block = self.seqno_cache.get(start,[])
if len(block)<=(seqno-start):
self._imap.log("Block at %d (length %d) too short to have seqno %d" % ( start, len(block), seqno ))
return None
return block[seqno-start]
def check_seqno( self, seqno ):
return self.seqno( seqno, testonly=True )
def seqno_remove( self, seqno ):
''' Find and remove a specific seqno, rearranging seqno map to suit. '''
# self.uidvalidity() Don't call this, we're definately selected.
self._imap.log("Removing seqno %d" % (seqno))
start = ( seqno / max_pull ) * max_pull
block = self.seqno_cache.get(start)
if block:
if len(block) > (seqno - start):
block = block[:]
del block[seqno - start]
self.seqno_cache[start] = block
self._imap.log("Removed seqno")
for chunk in range(start, self._exists+max_pull, max_pull):
block = self.seqno_cache.get(chunk)
if block:
block = block[:]
if chunk > start:
self._imap.log("Trim block %d" % (chunk))
block = block[1:]
nextblock = self.seqno_cache.get(chunk+max_pull)
if nextblock:
block.append(nextblock[0])
self._imap.log("Append from next for %d" % (chunk))
self.seqno_cache[chunk] = block
def seqno_add( self, seqno, uid ):
self._imap.log("Adding seqno %d as %d" % (seqno, uid))
start = ( seqno / max_pull ) * max_pull
if start in self.seqno_cache:
if len( self.seqno_cache[start] ) == ( seqno - start ):
tmp = self.seqno_cache[start]
tmp.append( uid )
self.seqno_cache[start] = tmp
elif start == seqno:
self.seqno_cache[start] = [uid]
elif seqno == 1:
self.seqno_cache[0] = [0,uid]
def seqno_prime( self, seqno, then=None ):
self.uidvalidity()
self.seqno_prime_real( seqno, then=then )
def seqno_prime_real( self, seqno, new_exists=None, then=None ):
try:
self._imap.log("SEARCH for %s" % ( seqno ))
search_override = new_exists
self._seqno_search = [ x for x in self._seqno_search if x.response is None ]
for x in self._seqno_search:
if x.seqno_start <= seqno <= x.seqno_end:
self._imap.log( "SEARCH in progress for %d (%s)" % ( seqno, x ) )
if then is None:
self._imap.wait( x )
else:
x.oncomplete( then )
return
use_esearch = False
if new_exists is None:
use_esearch = True
search_override = self._exists
start = | |
Add nodes to the TensorFlow graph.
if begin_mask is None:
begin_mask = 0
begin_mask = _execute.make_int(begin_mask, "begin_mask")
if end_mask is None:
end_mask = 0
end_mask = _execute.make_int(end_mask, "end_mask")
if ellipsis_mask is None:
ellipsis_mask = 0
ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
if new_axis_mask is None:
new_axis_mask = 0
new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
if shrink_axis_mask is None:
shrink_axis_mask = 0
shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StridedSliceGrad", shape=shape, begin=begin, end=end,
strides=strides, dy=dy, begin_mask=begin_mask,
end_mask=end_mask, ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Index",
_op._get_attr_type("Index"), "begin_mask",
_op._get_attr_int("begin_mask"), "end_mask",
_op._get_attr_int("end_mask"), "ellipsis_mask",
_op._get_attr_int("ellipsis_mask"), "new_axis_mask",
_op._get_attr_int("new_axis_mask"), "shrink_axis_mask",
_op._get_attr_int("shrink_axis_mask"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StridedSliceGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StridedSliceGrad = tf_export("raw_ops.StridedSliceGrad")(_ops.to_raw_op(strided_slice_grad))
def strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, name, ctx):
if begin_mask is None:
begin_mask = 0
begin_mask = _execute.make_int(begin_mask, "begin_mask")
if end_mask is None:
end_mask = 0
end_mask = _execute.make_int(end_mask, "end_mask")
if ellipsis_mask is None:
ellipsis_mask = 0
ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask")
if new_axis_mask is None:
new_axis_mask = 0
new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask")
if shrink_axis_mask is None:
shrink_axis_mask = 0
shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask")
_attr_T, (dy,) = _execute.args_to_matching_eager([dy], ctx)
_attr_Index, _inputs_Index = _execute.args_to_matching_eager([shape, begin, end, strides], ctx)
(shape, begin, end, strides) = _inputs_Index
_inputs_flat = [shape, begin, end, strides, dy]
_attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask,
"end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask",
new_axis_mask, "shrink_axis_mask", shrink_axis_mask)
_result = _execute.execute(b"StridedSliceGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"StridedSliceGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_scatter_nd_add', v1=['tensor_scatter_nd_add', 'tensor_scatter_add'])
@deprecated_endpoints('tensor_scatter_add')
def tensor_scatter_add(tensor, indices, updates, name=None):
r"""Adds sparse `updates` to an existing tensor according to `indices`.
This operation creates a new tensor by adding sparse `updates` to the passed
in `tensor`.
This operation is very similar to `tf.scatter_nd_add`, except that the updates
are added onto an existing tensor (as opposed to a variable). If the memory
for the existing tensor cannot be re-used, a copy is made and updated.
`indices` is an integer tensor containing indices into a new tensor of shape
`tensor.shape`. The last dimension of `indices` can be at most the rank of
`tensor.shape`:
indices.shape[-1] <= tensor.shape.rank
The last dimension of `indices` corresponds to indices into elements
(if `indices.shape[-1] = tensor.shape.rank`) or slices
(if `indices.shape[-1] < tensor.shape.rank`) along dimension
`indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape
indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
The simplest form of tensor_scatter_add is to add individual elements to a
tensor by index. For example, say we want to add 4 elements in a rank-1
tensor with 8 elements.
In Python, this scatter add operation would look like this:
```python
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
tensor = tf.ones([8], dtype=tf.int32)
updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
print(updated)
```
The resulting tensor would look like this:
[1, 12, 1, 11, 10, 1, 1, 13]
We can also, insert entire slices of a higher rank tensor all at once. For
example, if we wanted to insert two slices in the first dimension of a
rank-3 tensor with two matrices of new values.
In Python, this scatter add operation would look like this:
```python
indices = tf.constant([[0], [2]])
updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]]])
tensor = tf.ones([4, 4, 4],dtype=tf.int32)
updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
print(updated)
```
The resulting tensor would look like this:
[[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, the index is ignored.
Args:
tensor: A `Tensor`. Tensor to copy/update.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
updates: A `Tensor`. Must have the same type as `tensor`.
Updates to scatter into output.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorScatterAdd", name,
tld.op_callbacks, tensor, indices, updates)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_scatter_add_eager_fallback(
tensor, indices, updates, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_scatter_add, (), dict(tensor=tensor, indices=indices,
updates=updates, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorScatterAdd", tensor=tensor, indices=indices, updates=updates,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_scatter_add, (), dict(tensor=tensor, indices=indices,
updates=updates, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorScatterAdd", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorScatterAdd = tf_export("raw_ops.TensorScatterAdd")(_ops.to_raw_op(tensor_scatter_add))
def tensor_scatter_add_eager_fallback(tensor, indices, updates, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([tensor, updates], ctx)
(tensor, updates) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx)
_inputs_flat = [tensor, indices, updates]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"TensorScatterAdd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorScatterAdd", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_scatter_nd_max')
def tensor_scatter_max(tensor, indices, updates, name=None):
r"""TODO: add doc.
Args:
tensor: A `Tensor`. Tensor to update.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
updates: A `Tensor`. Must have the same type as `tensor`.
Updates to scatter into output.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorScatterMax", name,
tld.op_callbacks, tensor, indices, updates)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_scatter_max_eager_fallback(
tensor, indices, updates, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_scatter_max, (), dict(tensor=tensor, indices=indices,
updates=updates, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorScatterMax", tensor=tensor, indices=indices, updates=updates,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_scatter_max, (), dict(tensor=tensor, indices=indices,
updates=updates, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorScatterMax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorScatterMax = tf_export("raw_ops.TensorScatterMax")(_ops.to_raw_op(tensor_scatter_max))
def tensor_scatter_max_eager_fallback(tensor, indices, updates, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([tensor, updates], ctx)
(tensor, updates) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx)
_inputs_flat = [tensor, indices, updates]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"TensorScatterMax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorScatterMax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_scatter_nd_min')
def tensor_scatter_min(tensor, indices, updates, name=None):
r"""TODO: add doc.
Args:
tensor: A `Tensor`. Tensor to update.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
updates: A `Tensor`. Must have the same type as `tensor`.
Updates to scatter into output.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorScatterMin", name,
tld.op_callbacks, tensor, indices, updates)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_scatter_min_eager_fallback(
tensor, indices, updates, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_scatter_min, (), dict(tensor=tensor, indices=indices,
updates=updates, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# | |
against known Juju API types.
if changes_ is not None and not isinstance(changes_, (bytes, str, list)):
raise Exception("Expected changes_ to be a Sequence, received: {}".format(type(changes_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if watcher_id_ is not None and not isinstance(watcher_id_, (bytes, str)):
raise Exception("Expected watcher_id_ to be a str, received: {}".format(type(watcher_id_)))
self.changes = changes_
self.error = error_
self.watcher_id = watcher_id_
self.unknown_fields = unknown_fields
class StringsWatchResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~StringsWatchResult]
'''
results_ = [StringsWatchResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class Subnet(Type):
_toSchema = {'cidr': 'cidr', 'life': 'life', 'provider_id': 'provider-id', 'provider_network_id': 'provider-network-id', 'provider_space_id': 'provider-space-id', 'space_tag': 'space-tag', 'status': 'status', 'vlan_tag': 'vlan-tag', 'zones': 'zones'}
_toPy = {'cidr': 'cidr', 'life': 'life', 'provider-id': 'provider_id', 'provider-network-id': 'provider_network_id', 'provider-space-id': 'provider_space_id', 'space-tag': 'space_tag', 'status': 'status', 'vlan-tag': 'vlan_tag', 'zones': 'zones'}
def __init__(self, cidr=None, life=None, provider_id=None, provider_network_id=None, provider_space_id=None, space_tag=None, status=None, vlan_tag=None, zones=None, **unknown_fields):
'''
cidr : str
life : str
provider_id : str
provider_network_id : str
provider_space_id : str
space_tag : str
status : str
vlan_tag : int
zones : typing.Sequence[str]
'''
cidr_ = cidr
life_ = life
provider_id_ = provider_id
provider_network_id_ = provider_network_id
provider_space_id_ = provider_space_id
space_tag_ = space_tag
status_ = status
vlan_tag_ = vlan_tag
zones_ = zones
# Validate arguments against known Juju API types.
if cidr_ is not None and not isinstance(cidr_, (bytes, str)):
raise Exception("Expected cidr_ to be a str, received: {}".format(type(cidr_)))
if life_ is not None and not isinstance(life_, (bytes, str)):
raise Exception("Expected life_ to be a str, received: {}".format(type(life_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
if provider_network_id_ is not None and not isinstance(provider_network_id_, (bytes, str)):
raise Exception("Expected provider_network_id_ to be a str, received: {}".format(type(provider_network_id_)))
if provider_space_id_ is not None and not isinstance(provider_space_id_, (bytes, str)):
raise Exception("Expected provider_space_id_ to be a str, received: {}".format(type(provider_space_id_)))
if space_tag_ is not None and not isinstance(space_tag_, (bytes, str)):
raise Exception("Expected space_tag_ to be a str, received: {}".format(type(space_tag_)))
if status_ is not None and not isinstance(status_, (bytes, str)):
raise Exception("Expected status_ to be a str, received: {}".format(type(status_)))
if vlan_tag_ is not None and not isinstance(vlan_tag_, int):
raise Exception("Expected vlan_tag_ to be a int, received: {}".format(type(vlan_tag_)))
if zones_ is not None and not isinstance(zones_, (bytes, str, list)):
raise Exception("Expected zones_ to be a Sequence, received: {}".format(type(zones_)))
self.cidr = cidr_
self.life = life_
self.provider_id = provider_id_
self.provider_network_id = provider_network_id_
self.provider_space_id = provider_space_id_
self.space_tag = space_tag_
self.status = status_
self.vlan_tag = vlan_tag_
self.zones = zones_
self.unknown_fields = unknown_fields
class SubnetsFilters(Type):
_toSchema = {'space_tag': 'space-tag', 'zone': 'zone'}
_toPy = {'space-tag': 'space_tag', 'zone': 'zone'}
def __init__(self, space_tag=None, zone=None, **unknown_fields):
'''
space_tag : str
zone : str
'''
space_tag_ = space_tag
zone_ = zone
# Validate arguments against known Juju API types.
if space_tag_ is not None and not isinstance(space_tag_, (bytes, str)):
raise Exception("Expected space_tag_ to be a str, received: {}".format(type(space_tag_)))
if zone_ is not None and not isinstance(zone_, (bytes, str)):
raise Exception("Expected zone_ to be a str, received: {}".format(type(zone_)))
self.space_tag = space_tag_
self.zone = zone_
self.unknown_fields = unknown_fields
class TaggedCredential(Type):
_toSchema = {'credential': 'credential', 'tag': 'tag'}
_toPy = {'credential': 'credential', 'tag': 'tag'}
def __init__(self, credential=None, tag=None, **unknown_fields):
'''
credential : CloudCredential
tag : str
'''
credential_ = CloudCredential.from_json(credential) if credential else None
tag_ = tag
# Validate arguments against known Juju API types.
if credential_ is not None and not isinstance(credential_, (dict, CloudCredential)):
raise Exception("Expected credential_ to be a CloudCredential, received: {}".format(type(credential_)))
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
self.credential = credential_
self.tag = tag_
self.unknown_fields = unknown_fields
class TaggedCredentials(Type):
_toSchema = {'credentials': 'credentials'}
_toPy = {'credentials': 'credentials'}
def __init__(self, credentials=None, **unknown_fields):
'''
credentials : typing.Sequence[~TaggedCredential]
'''
credentials_ = [TaggedCredential.from_json(o) for o in credentials or []]
# Validate arguments against known Juju API types.
if credentials_ is not None and not isinstance(credentials_, (bytes, str, list)):
raise Exception("Expected credentials_ to be a Sequence, received: {}".format(type(credentials_)))
self.credentials = credentials_
self.unknown_fields = unknown_fields
class TokenResult(Type):
_toSchema = {'error': 'error', 'token': 'token'}
_toPy = {'error': 'error', 'token': 'token'}
def __init__(self, error=None, token=None, **unknown_fields):
'''
error : Error
token : str
'''
error_ = Error.from_json(error) if error else None
token_ = token
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if token_ is not None and not isinstance(token_, (bytes, str)):
raise Exception("Expected token_ to be a str, received: {}".format(type(token_)))
self.error = error_
self.token = token_
self.unknown_fields = unknown_fields
class TokenResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~TokenResult]
'''
results_ = [TokenResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class Tools(Type):
_toSchema = {'sha256': 'sha256', 'size': 'size', 'url': 'url', 'version': 'version'}
_toPy = {'sha256': 'sha256', 'size': 'size', 'url': 'url', 'version': 'version'}
def __init__(self, sha256=None, size=None, url=None, version=None, **unknown_fields):
'''
sha256 : str
size : int
url : str
version : Binary
'''
sha256_ = sha256
size_ = size
url_ = url
version_ = Binary.from_json(version) if version else None
# Validate arguments against known Juju API types.
if sha256_ is not None and not isinstance(sha256_, (bytes, str)):
raise Exception("Expected sha256_ to be a str, received: {}".format(type(sha256_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
if url_ is not None and not isinstance(url_, (bytes, str)):
raise Exception("Expected url_ to be a str, received: {}".format(type(url_)))
if version_ is not None and not isinstance(version_, (dict, Binary)):
raise Exception("Expected version_ to be a Binary, received: {}".format(type(version_)))
self.sha256 = sha256_
self.size = size_
self.url = url_
self.version = version_
self.unknown_fields = unknown_fields
class ToolsResult(Type):
_toSchema = {'disable_ssl_hostname_verification': 'disable-ssl-hostname-verification', 'error': 'error', 'tools': 'tools'}
_toPy = {'disable-ssl-hostname-verification': 'disable_ssl_hostname_verification', 'error': 'error', 'tools': 'tools'}
def __init__(self, disable_ssl_hostname_verification=None, error=None, tools=None, **unknown_fields):
'''
disable_ssl_hostname_verification : bool
error : Error
tools : typing.Sequence[~Tools]
'''
disable_ssl_hostname_verification_ = disable_ssl_hostname_verification
error_ = Error.from_json(error) if error else None
tools_ = [Tools.from_json(o) for o in tools or []]
# Validate arguments against known Juju API types.
if disable_ssl_hostname_verification_ is not None and not isinstance(disable_ssl_hostname_verification_, bool):
raise Exception("Expected disable_ssl_hostname_verification_ to be a bool, received: {}".format(type(disable_ssl_hostname_verification_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if tools_ is not None and not isinstance(tools_, (bytes, str, list)):
raise Exception("Expected tools_ to be a Sequence, received: {}".format(type(tools_)))
self.disable_ssl_hostname_verification = disable_ssl_hostname_verification_
self.error = error_
self.tools = tools_
self.unknown_fields = unknown_fields
class ToolsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ToolsResult]
'''
results_ = [ToolsResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class TrackArgs(Type):
_toSchema = {'payloads': 'payloads'}
_toPy = {'payloads': 'payloads'}
def __init__(self, payloads=None, **unknown_fields):
'''
payloads : typing.Sequence[~Payload]
'''
payloads_ = [Payload.from_json(o) for o in payloads or []]
# Validate arguments against known Juju API types.
if payloads_ is not None and not isinstance(payloads_, (bytes, str, list)):
raise Exception("Expected payloads_ to be a Sequence, received: {}".format(type(payloads_)))
self.payloads = payloads_
self.unknown_fields = unknown_fields
class TrackPayloadArgs(Type):
_toSchema = {'payloads': 'payloads'}
_toPy = {'payloads': 'payloads'}
def __init__(self, payloads=None, **unknown_fields):
'''
payloads : typing.Sequence[~Payload]
'''
payloads_ = [Payload.from_json(o) for o in payloads or []]
| |
TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomCrop(size=50, padding=10, pad_if_needed=False, fill=0, padding_mode='constant')
>>> image = transform(image)
>>> print(image)
>>> image shape : (70,70,3)
"""
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, image):
if self.padding_mode not in ['constant', 'reflect', 'symmetric']:
raise TypeError("Padding mode should be 'constant', 'reflect', or 'symmetric'.")
return random_crop(
image,
size=self.size,
padding=self.padding,
pad_if_needed=self.pad_if_needed,
fill=self.fill,
padding_mode=self.padding_mode,
)
class RandomResizedCrop(object):
"""Crop the given image to random size and aspect ratio.
Parameters
----------
size: int or sequence
- Desired output size of the crop.
- If size is an int instead of sequence like (h, w), a square crop (size, size) is made.
- If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
scale: tuple of float
scale range of the cropped image before resizing, relatively to the origin image.
ratio: tuple of float
aspect ratio range of the cropped image before resizing.
interpolation: str
Type of interpolation. Default is "bilinear"."nearest","bilinear" and "bicubic" are supported.
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomResizedCrop(size = (100, 100), scale = (0.08, 1.0), ratio = (3./4.,4./3.), interpolation = 'bilinear')
>>> image = transform(image)
>>> print(image)
>>> image shape : (100,100,3)
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear'):
self.size = size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, image):
return random_resized_crop(image, self.size, self.scale, self.ratio, self.interpolation)
class RandomFlipVertical(object):
"""Vertically flip the given image randomly with a given probability.
Parameters
----------
prob: float
probability of the image being flipped. Default value is 0.5
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomFlipVertical(prob = 0.5)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image):
return random_vflip(image, self.prob)
class RandomFlipHorizontal(object):
"""Horizontally flip the given image randomly with a given probability.
Parameters
----------
prob: float
probability of the image being flipped. Default value is 0.5
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomFlipHorizontal(prob = 0.5)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image):
return random_hflip(image, self.prob)
class RandomRotation(object):
"""Rotate the image by random angle.
Parameters
----------
degrees: number or sequnence
- Range of degrees to select from.
- If degrees is a number, the range of degrees will be (-degrees, +degrees).
- If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]).
interpolation: str
Interpolation method. Default is 'bilinear'. 'nearest','bilinear' are supported.
expand: boolean
- If true, expands the output to make it large enough to hold the entire rotated image.
- If false or omitted, make the output image the same size as the input image.
- Note that the expand flag assumes rotation around the center and no translation.
center: sequence or None
Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
fill: number or sequence
Pixel fill value for the area outside the rotated image. Default is 0.
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomRotation(degrees=30, interpolation='bilinear', expand=False, center=None, fill=0)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, degrees, interpolation='bilinear', expand=False, center=None, fill=0):
if interpolation not in ('nearest', 'bilinear'):
raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .')
self.degrees = degrees
self.interpolation = interpolation
self.expand = expand
self.center = center
self.fill = fill
def __call__(self, image):
return random_rotation(image, self.degrees, self.interpolation, self.expand, self.center, self.fill)
class RandomShear(object):
"""Shear the image by random angle.
Parameters
----------
shear: number or sequnence
- Range of degrees to select from.
- If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied.
- If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied.
- If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
interpolation: str
Interpolation method. Default is 'bilinear'.'nearest','bilinear' are supported.
fill: number or sequence
Pixel fill value for the area outside the sheared image. Default is 0.
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomShear(shear=30, interpolation='bilinear', fill=0)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, shear, interpolation='bilinear', fill=0):
if interpolation not in ('nearest', 'bilinear'):
raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .')
self.shear = shear
self.interpolation = interpolation
self.fill = fill
def __call__(self, image):
return random_shear(image, self.shear, self.interpolation, self.fill)
class RandomShift(object):
"""Shift the image by random translations.
Parameters
----------
shift: list or tuple
Maximum absolute fraction for horizontal and vertical translations.
shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a.
vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b.
interpolation: str
Interpolation method. Default is 'bilinear'. 'nearest','bilinear' are supported.
fill: number or sequence
Pixel fill value for the area outside the sheared image. Default is 0.
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomShift(shift=(0.2, 0.2), interpolation='bilinear', fill=0)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, shift, interpolation='bilinear', fill=0):
if interpolation not in ('nearest', 'bilinear'):
raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .')
self.shift = shift
self.interpolation = interpolation
self.fill = fill
def __call__(self, image):
return random_shift(image, self.shift, self.interpolation, self.fill)
class RandomZoom(object):
"""Zoom the image by random scale.
Parameters
----------
zoom: list or tuple
Scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b.
interpolation: str
Interpolation method. Default is 'bilinear'. 'nearest','bilinear' are supported.
fill: number or sequence
Pixel fill value for the area outside the sheared image. Default is 0.
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8)
>>> transform = tlx.vision.transforms.RandomZoom(zoom=(0.2, 0.5), interpolation='bilinear', fill=0)
>>> image = transform(image)
>>> print(image)
"""
def __init__(self, zoom, interpolation='bilinear', fill=0):
if interpolation not in ('nearest', 'bilinear'):
raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .')
self.zoom = zoom
self.interpolation = interpolation
self.fill = fill
def __call__(self, image):
return random_zoom(image, self.zoom, self.interpolation, self.fill)
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant.
Parameters
----------
degrees: number or sequnence
- Range of degrees to select from.
- If degrees is a number, the range of degrees will be (-degrees, +degrees).
- If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]).
- Set to 0 to deactivate rotations.
shift: sequence or None
- Maximum absolute fraction for horizontal and vertical translations.
- shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a.
- vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b.
- Will not shift by default.
shear: number or sequnence or None
- Range of degrees to select from.
- If degrees is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied.
- If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied.
- If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
- Will not apply shear by default.
| |
# -*- coding: UTF-8 -*-
# From <NAME>'s iSRb tool
from warnings import warn
import numpy as np
#from skimage.measure import profile_line # toolbox implements its own line profle measurement routine
from scipy.optimize import curve_fit, minimize
from scipy.signal import find_peaks, peak_widths
from time import time
import copy
class OrderError(Exception):
'''Raise on inappropiate execution order. Correct order is:
__init__(), profile(), calc_dips(), interpolate()'''
def __init__(self, message):
super().__init__(message)
return
class ResultError(Exception):
'''Raise on bad calculation results, if unable to continue'''
def __init__(self, message):
super().__init__(message)
return
class Interpolation(object):
'''Manage Interpolation
The interpolation process is separated into 4 parts:
__init__() - open image
profile() - measure profile in image
calc_dips() - calculate dips
interpolate() - calculate isrb from dips
Each part (except _init__) can be re-executed to estimate suitable
variables. The Interpolation object saves parameters additional to
the parameters needed for further calculations. They simplify
finding fitting parameters for the calculation.
'''
def __init__(self, im, pixelsize, SOD=1000, SDD=1000, wire_length = 15,
wire_spacing = [0.8, 0.63, 0.5, 0.4, 0.32, 0.25, 0.2, 0.16, 0.13, 0.1, 0.08, 0.063, 0.05]):
'''Create the Interpolation instance with the image representing
array and other necessary parameters.
Parameters
----------
im : array-like, shape (n, m)
The array representing the gray/intensity scale image
(background is assumed to have high intensity, bright
gray scales).
pixelsize : scalar
The pixelsize of the image in milimeters. Only square
pixels are possible.
SOD : float, optional
Source Object Distance of the scan setup in milimeters.
SDD : float, optional
Source Detector Distance of the scan setup in milimeters.
wire_length : scalar
Length of the wires in mm
wire_spacing : array-like, shape (n, )
Spacing that the wire pairs hold, acending (matches the
wire diameter)
'''
# calculating the scaling of the duplex wires
if SDD == 0:
raise ValueError('SDD cannot be 0')
if SOD == 0:
raise ValueError('SOD cannot be 0')
self.scale = SDD/SOD
if self.scale < 1:
raise ValueError('SOD cannot be larger than SDD')
# initializing required variables
self.im = np.asarray(im, dtype = np.float64)
self.pxsize = float(pixelsize)
# the space inbetween that the wire pairs hold and the wire length
self.wire_spacing = np.asarray(wire_spacing)
self.wire_len = wire_length
# variables that will later be set
self.measure = None # ndarray
self.dips = None # ndarray
self.dip20 = None # scalar
self.criticalIndex = None # Index of last wire pair with dip > 20%.
# variable not necessary to pass between functions but useful to investigate
self.coords = None # tuple of profile line properties
self.peaks = None # tuples of ndarray, shape (2, x)
self.max_peaks = None
self.despike = None
self.bg = None
self.dipsoi = None
self.inter = None
# Interpolation fit results:
self.a = 0
self.b = 0
self.c = 0
return
def quadratic(x, a, b, c):
'''quadratic function'''
return a*x**2 + b*x + c
def inverted_quadratic(y, a, b, c):
'''solve quadratic function ax^2+bx+c=y
Returns
-------
tuple
Both possible solutions for x'''
return (-b/(2*a) + np.sqrt((b/a)**2 /4 - (c-y)/a),
-b/(2*a) - np.sqrt((b/a)**2 /4 - (c-y)/a))
def _bivariance(self, phi, rho, start, def_kwargs):
'''Calculate the variance of the vertically calculated variance
of the region of interest
Parameters
----------
phi : scalar
Angle between the profile line and the horizontal plane
rho : scalar
Length of the profile line (pixel coordinates)
start : array-like, shape (2, )
Pixel coordinate of the profile line start
def_kwargs : dict
Options specifying the profile line
Returns
-------
bivar : scalar
variance of vertical variance
'''
# scipy minimization passes arrays
phi = phi[0]
def_kwargs['reduce_func'] = np.var
stop = np.array(start) + rho * np.array((np.cos(phi), np.sin(phi)))
# matrix indeces differ from cartesian coordinates
var = profile_line(self.im, start[::-1], stop[::-1], **def_kwargs)
bivar = np.var(var)
return bivar
def profile(self, start, stop, polar_coord = False, optimize = False, rel_width = 0.6):
'''Measure intensity along a profile line (area) in the loaded image.
Parameters
----------
start : array-like, shape (2, )
Pixel coordinate of the profile line start.
stop : array-like, shape (2, )
Coordinate of the profile line stop. Look at polar_coord for more info.
polar_coord : bool, optional
True: 'stop' is treated as polar coordinate (rho, phi), relative to 'start'.
Angle in degree, counter-clockwise.
False: 'stop' is treated as cartesian coordinate (x, y)
optimize : bool, optional
If True, optimize the angle (phi) of 'stop' (regardless of coordinate type).
Optimize by minimizig the variance of vertical variance of the region of
interest with the Powell's Method.
rel_width : scalar, optional
Relative portion of the wire length used for measuring. values ranging
from 0.3 to 0.6 are recommended.
Returns
-------
None
Write the intensity profile along the scan line to
self.measure : ndarray, shape (n, )
on execution
General Settings
----------------
- linewidth = width * wire_length
- bi-quadratic filtering for off-pixel coordinates
- nearest filter for coordinates outside of the image
- arithmetic mean for aggregation of pixels perpendicular to the line
Notes
-----
The possibility of non-square pixels:
The transformation between pixel-coordinates and cartesian
distance-coordinates is not a conform map, if the pixels are not squares.
Therefore the measured pixels perpendicular to the profile line in the pixel
coordinates would not be perpendicular in distance coordinates.
The measurement is based on skimage.measure.profile_line(). For
further informations, check out
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.profile_line'''
try:
rel_width = float(rel_width)
except (TypeError, ValueError):
raise TypeError("'width' must be float type.")
if rel_width < 0.3 or rel_width > 0.6:
warn("Expected 0.3 <= 'width' <= 0.6 but width = {0:.2f}".format(rel_width))
def_kwargs = {'linewidth' : int(np.rint(self.scale * self.wire_len/self.pxsize * rel_width)),
'order' : 2,
'mode' : 'nearest'}
if optimize:
print('optimizing')
t = time()
if polar_coord:
rho, phi = stop[0], -np.deg2rad(stop[1])
else:
rho = np.linalg.norm(stop-start)
# geometric scalar product
phi = np.arccos((stop[0]-start[0])/rho)
# minimization
sol = minimize(self.bivariance, phi, method='Powell', args=(rho, start, def_kwargs.copy()))
phi = sol['x']
print('optimizing done in {:.6f}ms \n'.format((time()-t)*1000))
print('phi = {:.6f}, bivar = {:.6f}, rho = {:.6f} \n'.format(-np.rad2deg(phi), sol['fun'], rho))
if polar_coord:
stop = start + rho*np.array((np.cos(phi), np.sin(phi)))
stop = np.asarray(np.rint(stop), dtype=int)
else:
if polar_coord:
rho, phi = stop[0], -np.deg2rad(stop[1])
stop = start + rho*np.array((np.cos(phi), np.sin(phi)))
stop = np.asarray(np.rint(stop), dtype=int)
else:
pass
self.coords = (start, stop, def_kwargs['linewidth'])
# saving variables
# matrix indeces differ from cartesian coordinates
measure = profile_line(self.im, start[::-1], stop[::-1], **def_kwargs)
self.measure = measure
self.ind = np.arange(0, self.measure.size)
return
def calc_dips(self, bg_func = quadratic, prominence=10, height=None, width=None, rel_height=0.9):
'''Calculate the dips in the profile.
Find downwards orientated peaks (lower peaks, minimum peaks).
Calculate the widths from the prominence data.
Order peaks into pairs.
Estimate background values.
Calculate dips from pairs.
Parameters
----------
bg_func : callable, optional
f(x, *args) -> y
Scalar function that approximates the background values of the
measurement. 'args' will be calculated by regression.
prominence : float, optional
Peak prominence of the lower peaks to find. The absolute difference
between the peak and its contour line.
Reference in 'scipy.signal.peak_prominence()'.
height : float, optional
Peak height of the lower peaks to find (peaks lying above this value
will be excluded).
Reference in 'scipy.signal.find_peaks()'.
width : float, optional
Peak width of the lower peaks to find.
Reference in 'scipy.signal.peak_width()'.
rel_height : float, optional
Relative height to measure the width of the peaks at.
Reference in 'scipy.signal.peak_widths()'.
Returns
-------
None
Write found dips to self.dips : ndarray, shape (m, )
Write found minima to self.min : array-like, shape (2, n)
Write found background values to self.bg : array-like, shape (2, o)'''
if self.measure is None:
raise OrderError("no profile was measured")
if height is not None:
# self.measure is negated and so is the height
height = -height
# finding minimum peaks
peaks, prop = find_peaks(-self.measure, prominence=prominence, height=height, | |
<filename>src/hapPyTango/CosNotifyComm_skel/__init__.py
""" Module: IDL:omg.org/CosNotifyComm:1.0
Automagically generated by:-
The ORB called Fnorb v1.1.Return.of.Fnorb
"""
_FNORB_ID = "IDL:omg.org/CosNotifyComm:1.0"
# Fnorb modules.
import Fnorb.orb.CORBA
import Fnorb.orb.TypeManager
import Fnorb.orb.Util
class NotifyPublish_skel(Fnorb.orb.CORBA.Object_skel):
""" Interface: IDL:omg.org/CosNotifyComm/NotifyPublish:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/NotifyPublish:1.0"
def _skel_offer_change(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/NotifyPublish/offer_change:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyComm/InvalidEventType:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.offer_change, arguments)
# Create the reply.
server_request.results(results)
return
class NotifySubscribe_skel(Fnorb.orb.CORBA.Object_skel):
""" Interface: IDL:omg.org/CosNotifyComm/NotifySubscribe:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/NotifySubscribe:1.0"
def _skel_subscription_change(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/NotifySubscribe/subscription_change:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyComm/InvalidEventType:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.subscription_change, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosEventComm_skel
class PushConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel, CosEventComm_skel.PushConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyComm/PushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/PushConsumer:1.0"
pass
# Import base interface packages.
import CosEventComm_skel
class PullConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel, CosEventComm_skel.PullConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyComm/PullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/PullConsumer:1.0"
pass
# Import base interface packages.
import CosEventComm_skel
class PullSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel, CosEventComm_skel.PullSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyComm/PullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/PullSupplier:1.0"
pass
# Import base interface packages.
import CosEventComm_skel
class PushSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel, CosEventComm_skel.PushSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyComm/PushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/PushSupplier:1.0"
pass
class StructuredPushConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel):
""" Interface: IDL:omg.org/CosNotifyComm/StructuredPushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/StructuredPushConsumer:1.0"
def _skel_push_structured_event(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPushConsumer/push_structured_event:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/StructuredEvent:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.push_structured_event, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_disconnect_structured_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPushConsumer/disconnect_structured_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_structured_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
class StructuredPullConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel):
""" Interface: IDL:omg.org/CosNotifyComm/StructuredPullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/StructuredPullConsumer:1.0"
def _skel_disconnect_structured_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPullConsumer/disconnect_structured_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_structured_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
class StructuredPullSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel):
""" Interface: IDL:omg.org/CosNotifyComm/StructuredPullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/StructuredPullSupplier:1.0"
def _skel_pull_structured_event(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPullSupplier/pull_structured_event:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/StructuredEvent:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.pull_structured_event, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_try_pull_structured_event(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPullSupplier/try_pull_structured_event:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/StructuredEvent:1.0"))
outputs.append(Fnorb.orb.CORBA.TC_boolean)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.try_pull_structured_event, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_disconnect_structured_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPullSupplier/disconnect_structured_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_structured_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
class StructuredPushSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel):
""" Interface: IDL:omg.org/CosNotifyComm/StructuredPushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/StructuredPushSupplier:1.0"
def _skel_disconnect_structured_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/StructuredPushSupplier/disconnect_structured_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_structured_push_supplier, arguments)
# Create the reply.
server_request.results(results)
return
class SequencePushConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel):
""" Interface: IDL:omg.org/CosNotifyComm/SequencePushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/SequencePushConsumer:1.0"
def _skel_push_structured_events(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePushConsumer/push_structured_events:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventBatch:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.push_structured_events, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_disconnect_sequence_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePushConsumer/disconnect_sequence_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_sequence_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
class SequencePullConsumer_skel(Fnorb.orb.CORBA.Object_skel, NotifyPublish_skel):
""" Interface: IDL:omg.org/CosNotifyComm/SequencePullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/SequencePullConsumer:1.0"
def _skel_disconnect_sequence_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePullConsumer/disconnect_sequence_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_sequence_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
class SequencePullSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel):
""" Interface: IDL:omg.org/CosNotifyComm/SequencePullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/SequencePullSupplier:1.0"
def _skel_pull_structured_events(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePullSupplier/pull_structured_events:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventBatch:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.pull_structured_events, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_try_pull_structured_events(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePullSupplier/try_pull_structured_events:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_long)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventBatch:1.0"))
outputs.append(Fnorb.orb.CORBA.TC_boolean)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.try_pull_structured_events, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_disconnect_sequence_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePullSupplier/disconnect_sequence_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.disconnect_sequence_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
class SequencePushSupplier_skel(Fnorb.orb.CORBA.Object_skel, NotifySubscribe_skel):
""" Interface: IDL:omg.org/CosNotifyComm/SequencePushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyComm/SequencePushSupplier:1.0"
def _skel_disconnect_sequence_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyComm/SequencePushSupplier/disconnect_sequence_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation | |
<gh_stars>1-10
import cStringIO
import hashlib
import MySQLdb
import os
import random
import signal
import sys
import threading
import time
import string
CHARS = string.letters + string.digits
def sha1(x):
return hashlib.sha1(str(x)).hexdigest()
# Should be deterministic given an idx
def get_msg(do_blob, idx):
random.seed(idx);
if do_blob:
blob_length = random.randint(1, 24000)
else:
blob_length = random.randint(1, 255)
if random.randint(1, 2) == 1:
# blob that cannot be compressed (well, compresses to 85% of original size)
return ''.join([random.choice(CHARS) for x in xrange(blob_length)])
else:
# blob that can be compressed
return random.choice(CHARS) * blob_length
class PopulateWorker(threading.Thread):
global LG_TMP_DIR
def __init__(self, con, start_id, end_id, i):
threading.Thread.__init__(self)
self.con = con
con.autocommit(False)
self.log = open('/%s/populate-%d.log' % (LG_TMP_DIR, i), 'a')
self.num = i
self.start_id = start_id
self.end_id = end_id
self.exception = None
self.start_time = time.time()
self.start()
def run(self):
try:
self.runme()
print >> self.log, "ok"
except Exception, e:
self.exception = e
try:
cursor = self.con.cursor()
cursor.execute("INSERT INTO errors VALUES('%s')" % e)
except MySQLdb.Error, e2:
print >> self.log, "caught while inserting error (%s)" % e2
print >> self.log, "caught (%s)" % e
finally:
self.finish()
def finish(self):
print >> self.log, "total time: %.2f s" % (time.time() - self.start_time)
self.log.close()
self.con.commit()
self.con.close()
def runme(self):
print >> self.log, "populate thread-%d started" % self.num
cur = self.con.cursor()
stmt = None
for i in xrange(self.start_id, self.end_id):
msg = get_msg(do_blob, i)
stmt = """
INSERT INTO t1(id,msg_prefix,msg,msg_length,msg_checksum) VALUES (%d,'%s','%s',%d,'%s')
""" % (i+1, msg[0:255], msg, len(msg), sha1(msg))
cur.execute(stmt)
if i % 100 == 0:
self.con.commit()
def populate_table(con, num_records_before, do_blob, log):
con.autocommit(False)
cur = con.cursor()
stmt = None
workers = []
N = num_records_before / 10
start_id = 0
for i in xrange(10):
w = PopulateWorker(MySQLdb.connect(user=user, host=host, port=port, db=db),
start_id, start_id + N, i)
start_id += N
workers.append(w)
for i in xrange(start_id, num_records_before):
msg = get_msg(do_blob, i)
# print >> log, "length is %d, complen is %d" % (len(msg), len(zlib.compress(msg, 6)))
stmt = """
INSERT INTO t1(id,msg_prefix,msg,msg_length,msg_checksum) VALUES (%d,'%s','%s',%d,'%s')
""" % (i+1, msg[0:255], msg, len(msg), sha1(msg))
cur.execute(stmt)
con.commit()
for w in workers:
w.join()
if w.exception:
print >>log, "populater thead %d threw an exception" % w.num
return False
return True
def get_update(msg, idx):
return """
UPDATE t1 SET msg_prefix='%s',msg='%s',msg_length=%d,msg_checksum='%s' WHERE id=%d""" % (
msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert_on_dup(msg, idx):
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id) VALUES ('%s','%s',%d,'%s',%d)
ON DUPLICATE KEY UPDATE
msg_prefix=VALUES(msg_prefix),
msg=VALUES(msg),
msg_length=VALUES(msg_length),
msg_checksum=VALUES(msg_checksum),
id=VALUES(id)""" % (
msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert(msg, idx):
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id) VALUES ('%s','%s',%d,'%s',%d)""" % (
msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert_null(msg):
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id) VALUES ('%s','%s',%d,'%s',NULL)""" % (
msg[0:255], msg, len(msg), sha1(msg))
class ChecksumWorker(threading.Thread):
global LG_TMP_DIR
def __init__(self, con, checksum):
threading.Thread.__init__(self)
self.con = con
con.autocommit(False)
self.log = open('/%s/worker-checksum.log' % LG_TMP_DIR, 'a')
self.checksum = checksum
print >> self.log, "given checksum=%d" % checksum
self.start()
def run(self):
try:
self.runme()
print >> self.log, "ok"
except Exception, e:
try:
cursor = self.con.cursor()
cursor.execute("INSERT INTO errors VALUES('%s')" % e)
con.commit()
except MySQLdb.Error, e2:
print >> self.log, "caught while inserting error (%s)" % e2
print >> self.log, "caught (%s)" % e
finally:
self.finish()
def finish(self):
print >> self.log, "total time: %.2f s" % (time.time() - self.start_time)
self.log.close()
self.con.close()
def runme(self):
print >> self.log, "checksum thread started"
self.start_time = time.time()
cur = self.con.cursor()
cur.execute("SET SESSION innodb_lra_size=16")
cur.execute("CHECKSUM TABLE t1")
checksum = cur.fetchone()[1]
self.con.commit()
if checksum != self.checksum:
print >> self.log, "checksums do not match. given checksum=%d, calculated checksum=%d" % (self.checksum, checksum)
self.checksum = checksum
else:
print >> self.log, "checksums match! (both are %d)" % checksum
class Worker(threading.Thread):
global LG_TMP_DIR
def __init__(self, num_xactions, xid, con, server_pid, do_blob, max_id, fake_changes, secondary_checks):
threading.Thread.__init__(self)
self.do_blob = do_blob
self.xid = xid
con.autocommit(False)
self.con = con
self.num_xactions = num_xactions
cur = self.con.cursor()
self.rand = random.Random()
self.rand.seed(xid * server_pid)
self.loop_num = 0
self.max_id = max_id
self.num_primary_select = 0
self.num_secondary_select = 0
self.num_secondary_only_select = 0
self.num_inserts = 0
self.num_deletes = 0
self.num_updates = 0
self.time_spent = 0
self.log = open('/%s/worker%02d.log' % (LG_TMP_DIR, self.xid), 'a')
if fake_changes:
cur.execute("SET innodb_fake_changes=1")
self.secondary_checks = secondary_checks
self.start()
def finish(self):
print >> self.log, "loop_num:%d, total time: %.2f s" % (
self.loop_num, time.time() - self.start_time + self.time_spent)
print >> self.log, "num_primary_select=%d,num_secondary_select=%d,num_secondary_only_select=%d" %\
(self.num_primary_select, self.num_secondary_select, self.num_secondary_only_select)
print >> self.log, "num_inserts=%d,num_updates=%d,num_deletes=%d,time_spent=%d" %\
(self.num_inserts, self.num_updates, self.num_deletes, self.time_spent)
self.log.close()
def validate_msg(self, msg_prefix, msg, msg_length, msg_checksum, idx):
prefix_match = msg_prefix == msg[0:255]
checksum = sha1(msg)
checksum_match = checksum == msg_checksum
len_match = len(msg) == msg_length
if not prefix_match or not checksum_match or not len_match:
errmsg = "id(%d), length(%s,%d,%d), checksum(%s,%s,%s) prefix(%s,%s,%s)" % (
idx,
len_match, len(msg), msg_length,
checksum_match, checksum, msg_checksum,
prefix_match, msg_prefix, msg[0:255])
print >> self.log, errmsg
cursor = self.con.cursor()
cursor.execute("INSERT INTO errors VALUES('%s')" % errmsg)
cursor.execute("COMMIT")
raise Exception('validate_msg failed')
else:
print >> self.log, "Validated for length(%d) and id(%d)" % (msg_length, idx)
# Check to see if the idx is in the first column of res_array
def check_exists(self, res_array, idx):
for res in res_array:
if res[0] == idx:
return True
return False
def run(self):
try:
self.runme()
print >> self.log, "ok, with do_blob %s" % self.do_blob
except Exception, e:
try:
cursor = self.con.cursor()
cursor.execute("INSERT INTO errors VALUES('%s')" % e)
cursor.execute("COMMIT")
except MySQLdb.Error, e2:
print >> self.log, "caught while inserting error (%s)" % e2
print >> self.log, "caught (%s)" % e
finally:
self.finish()
def runme(self):
self.start_time = time.time()
cur = self.con.cursor()
print >> self.log, "thread %d started, run from %d to %d" % (
self.xid, self.loop_num, self.num_xactions)
while not self.num_xactions or (self.loop_num < self.num_xactions):
idx = self.rand.randint(0, self.max_id)
insert_or_update = self.rand.randint(0, 3)
self.loop_num += 1
# Randomly toggle innodb_prefix_index_cluster_optimization 5% of the time
if self.rand.randint(0, 20) == 0:
cur.execute("SET GLOBAL innodb_prefix_index_cluster_optimization=1-@@innodb_prefix_index_cluster_optimization")
try:
stmt = None
msg = get_msg(self.do_blob, idx)
# Query primary key 70%, secondary key lookup 20%, secondary key only 10%
r = self.rand.randint(1, 10)
if r <= 7:
cur.execute("SELECT msg_prefix,msg,msg_length,msg_checksum FROM t1 WHERE id=%d" % idx)
res = cur.fetchone()
self.num_primary_select += 1
elif r <= 9:
cur.execute("SELECT msg_prefix,msg,msg_length,msg_checksum FROM t1 WHERE msg_prefix='%s'" % msg[0:255])
res = cur.fetchone()
self.num_secondary_select += 1
# Query only the secondary index
else:
cur.execute("SELECT id, msg_prefix FROM t1 WHERE msg_prefix='%s'" % msg[0:255])
res = cur.fetchall()
self.num_secondary_only_select += 1
# have to continue to next iteration since we arn't fetching other data
continue
if res:
self.validate_msg(res[0], res[1], res[2], res[3], idx)
insert_with_index = False
if insert_or_update:
if res:
if self.rand.randint(0, 1):
stmt = get_update(msg, idx)
else:
stmt = get_insert_on_dup(msg, idx)
insert_with_index = True
self.num_updates += 1
else:
r = self.rand.randint(0, 2)
if r == 0:
stmt = get_insert(msg, idx)
insert_with_index = True
elif r == 1:
stmt = get_insert_on_dup(msg, idx)
insert_with_index = True
else:
stmt = get_insert_null(msg)
self.num_inserts += 1
else:
stmt = "DELETE FROM t1 WHERE id=%d" % idx
self.num_deletes += 1
query_result = cur.execute(stmt)
# 10% probability of checking to see the key exists in secondary index
if self.secondary_checks and self.rand.randint(1, 10) == 1:
cur.execute("SELECT id, msg_prefix FROM t1 WHERE msg_prefix='%s'" % msg[0:255])
res_array = cur.fetchall()
if insert_or_update:
if insert_with_index:
if not self.check_exists(res_array, idx):
print >> self.log, "Error: Inserted row doesn't exist in secondary index"
raise Exception("Error: Inserted row doesn't exist in secondary index")
else:
if self.check_exists(res_array, idx):
print >> self.log, "Error: Deleted row still exists in secondary index"
raise Exception("Error: Deleted row still exists in secondary index")
if (self.loop_num % 100) == 0:
print >> self.log, "Thread %d loop_num %d: result %d: %s" % (self.xid,
self.loop_num, query_result,
stmt)
# 30% commit, 10% rollback, 60% don't end the trx
r = self.rand.randint(1,10)
if r < 4:
self.con.commit()
elif r == 4:
self.con.rollback()
except MySQLdb.Error, e:
if e.args[0] == 2006: # server is killed
print >> self.log, "mysqld down, transaction %d" % self.xid
return
else:
print >> self.log, "mysql error for stmt(%s) %s" % (stmt, e)
try:
self.con.commit()
except Exception, e:
print >> self.log, "commit error %s" % e
if __name__ == '__main__':
global LG_TMP_DIR
pid_file = sys.argv[1]
kill_db_after = int(sys.argv[2])
num_records_before = int(sys.argv[3])
num_workers = int(sys.argv[4])
num_xactions_per_worker = int(sys.argv[5])
user = sys.argv[6]
host = sys.argv[7]
port = int(sys.argv[8])
db = sys.argv[9]
do_blob = int(sys.argv[10])
max_id = int(sys.argv[11])
LG_TMP_DIR = sys.argv[12]
fake_changes = int(sys.argv[13])
checksum = int(sys.argv[14])
secondary_checks = int(sys.argv[15])
checksum_worker = None
workers = []
server_pid = int(open(pid_file).read())
log = open('/%s/main.log' % LG_TMP_DIR, 'a')
# print "kill_db_after = ",kill_db_after," num_records_before = ", | |
<filename>src/outpost/django/research/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-07 14:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
class Migration(migrations.Migration):
initial = True
dependencies = [("base", "0001_initial")]
ops = [
(
"""
CREATE SCHEMA IF NOT EXISTS research;
""",
"""
DROP SCHEMA IF EXISTS "research";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."forschung_programm" (
FORSCHUNG_PROGRAMM_ID numeric,
FORSCHUNG_PROGRAMM_NAME varchar,
AKTIV_JN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'FORSCHUNG_PROGRAMM_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."forschung_programm";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."forschung_art" (
FORSCHUNG_ART_ID numeric,
FORSCHUNG_ART_DE varchar,
FORSCHUNG_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'FORSCHUNG_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."forschung_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."geldgeber" (
GELDGEBER_ID numeric,
GELDGEBER_DE varchar,
GELDGEBER_EN varchar,
STRASSE varchar,
ORT varchar,
POSTLEITZAHL varchar,
LAND_ID numeric,
URL varchar,
GELDGEBER_TYP_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'GELDGEBER',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."geldgeber";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."geldgeber_typ" (
GELDGEBER_TYP_ID numeric,
GELDGEBER_TYP_DE varchar,
GELDGEBER_TYP_EN varchar,
GELDGEBER_TYP_KURZ varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'GELDGEBER_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."geldgeber_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."land" (
LAND_ID numeric,
LAND_DE varchar,
LAND_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'LAND_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."land";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."org_partner_projektfunktion" (
ORG_PARTNER_PROJEKTFUNKTION_ID numeric,
ORG_PARTNER_PROJEKTFUNKTION_DE varchar,
ORG_PARTNER_PROJEKTFUNKTION_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'ORG_PARTNER_PROJEKTFUNKTION_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."org_partner_projektfunktion";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_typ" (
PROJEKT_TYP_ID numeric,
PROJEKT_TYP_DE varchar,
PROJEKT_TYP_EN varchar,
PROJEKT_TYP_KURZ_DE varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt" (
PROJEKT_ID numeric,
ORGEINHEIT_ID numeric,
PROJEKT_TYP_ID numeric,
KURZBEZEICHNUNG varchar,
PROJEKTTITEL_DE varchar,
PROJEKTTITEL_EN varchar,
ORG_PARTNER_PROJEKTFUNKTION_ID numeric,
PROJEKTLEITER_ID numeric,
KONTAKTPERSON_ID numeric,
PROJEKT_STATUS_ID numeric,
PROJEKT_URL varchar,
ABSTRACT_DE varchar,
ABSTRACT_EN varchar,
PROJEKTBEGINN_GEPLANT timestamp,
PROJEKTBEGINN_EFFEKTIV timestamp,
PROJEKTENDE_GEPLANT timestamp,
PROJEKTENDE_EFFEKTIV timestamp,
VERGABE_ART_ID numeric,
FORSCHUNG_ART_ID numeric,
VERANSTALTUNG_ART_ID numeric,
STUDIE_ART_ID numeric,
SPRACHE_ID numeric,
STAMMDATEN_UEBERTRAGUNG timestamp,
FORSCHUNG_PROGRAMM_ID numeric,
FORSCHUNG_SUBPROGRAMM varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_geldgeber" (
PROJEKT_ID numeric,
GELDGEBER_ID numeric,
HAUPTGELDGEBER_JA_NEIN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_GELDGEBER',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_geldgeber";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_status" (
PROJEKT_STATUS_ID numeric,
PROJEKT_STATUS varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_STATUS_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_status";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."sprache" (
SPRACHE_ID numeric,
SPRACHE_DE varchar,
SPRACHE_EN varchar,
SPRACHE_EN_KURZ varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'SPRACHE_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."sprache";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."studie_art" (
STUDIE_ART_ID numeric,
STUDIE_ART_DE varchar,
STUDIE_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'STUDIE_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."studie_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."veranstaltung_art" (
VERANSTALTUNG_ART_ID numeric,
VERANSTALTUNG_ART_DE varchar,
VERANSTALTUNG_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'VERANSTALTUNG_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."veranstaltung_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."vergabe_art" (
VERGABE_ART_ID numeric,
VERGABE_ART_DE varchar,
VERGABE_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'VERGABE_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."vergabe_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_publikation" (
MEDONLINE_PERSON_ID numeric,
PUBLIKATION_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."orgeinheit_publikation" (
PUBLIKATION_ID numeric,
MEDONLINE_ID numeric,
PUBLIKATION_AUTORENSCHAFT_ID numeric,
ZUORDNUNGSDATUM timestamp
)
SERVER sqlalchemy OPTIONS (
tablename 'ORGEINHEIT_PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."orgeinheit_publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_typ" (
PUBLIKATION_TYP_ID numeric,
PUBLIKATION_TYP_DE varchar,
PUBLIKATION_TYP_EN varchar,
SORTIERUNG_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_dokumenttyp" (
PUBLIKATION_DOKUMENTTYP_ID numeric,
PUBLIKATION_DOKUMENTTYP_DE varchar,
PUBLIKATION_DOKUMENTTYP_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_DOKUMENTTYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_dokumenttyp";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation" (
PUBLIKATION_ID varchar,
TITEL varchar,
AUTOR varchar,
JAHR numeric,
QUELLE varchar,
PUBLIKATION_TYP_ID numeric,
PUBLIKATION_DOKUMENTTYP_ID numeric,
SCI_ID varchar,
PUBMED_ID varchar,
DOI varchar,
PMC_ID varchar,
ABSTRACT bytea,
IMPACT_FAKTOR_NORM_MAX numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."klassifikation_oestat_2012" (
KLASSIFIKATION_OESTAT_ID numeric,
KLASSIFIKATION_OESTAT_DE varchar,
KLASSIFIKATION_OESTAT_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'KLASSIFIKATION_OESTAT_2012_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."klassifikation_oestat_2012";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_fachkenntnis" (
PERSON_FACHKENNTNIS_ID numeric,
MEDONLINE_PERSON_ID numeric,
FACHKENNTNIS_DE varchar,
FACHKENNTNIS_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_FACHKENNTNIS',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_fachkenntnis";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_kenntnis" (
PERSON_KENNTNIS_ID numeric,
MEDONLINE_PERSON_ID numeric,
KENNTNIS_DE varchar,
KENNTNIS_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_KENNTNIS',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_kenntnis";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_klass_oestat_2012" (
KLASSIFIKATION_OESTAT_ID numeric,
MEDONLINE_PERSON_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_KLASS_OESTAT_2012',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_klass_oestat_2012";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_weiterbildung" (
PERSON_WEITERBILDUNG_ID numeric,
MEDONLINE_PERSON_ID numeric,
PERSON_WEITERBILDUNG_DE varchar,
PERSON_WEITERBILDUNG_EN varchar,
JAHR varchar,
JAHR_BIS varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_WEITERBILDUNG',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_weiterbildung";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_autorenschaft" (
PUBLIKATION_AUTORENSCHAFT_ID numeric,
PUBLIKATION_AUTORENSCHAFT_DE varchar,
PUBLIKATION_AUTORENSCHAFT_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_AUTORENSCHAFT_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_autorenschaft";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_classification" AS SELECT
KLASSIFIKATION_OESTAT_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[KLASSIFIKATION_OESTAT_DE, KLASSIFIKATION_OESTAT_EN]
) AS name
FROM
"research"."klassifikation_oestat_2012"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_classification";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_classification_person" AS SELECT
KLASSIFIKATION_OESTAT_ID::integer AS classification_id,
MEDONLINE_PERSON_ID::integer AS person_id
FROM
"research"."person_klass_oestat_2012"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_classification_person";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_expertise" AS SELECT
PERSON_FACHKENNTNIS_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[FACHKENNTNIS_DE, FACHKENNTNIS_EN]
) AS name
FROM
"research"."person_fachkenntnis"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_expertise";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_knowledge" AS SELECT
PERSON_KENNTNIS_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[KENNTNIS_DE, KENNTNIS_EN]
) AS name
FROM
"research"."person_kenntnis"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_knowledge";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_education" AS SELECT
PERSON_WEITERBILDUNG_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[PERSON_WEITERBILDUNG_DE, PERSON_WEITERBILDUNG_EN]
) AS name,
JAHR AS from,
JAHR_BIS AS to
FROM
"research"."person_weiterbildung"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_education";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publicationauthorship" AS SELECT
PUBLIKATION_AUTORENSCHAFT_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[PUBLIKATION_AUTORENSCHAFT_DE, PUBLIKATION_AUTORENSCHAFT_EN]
) AS name
FROM
"research"."publikation_autorenschaft"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publicationauthorship";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_program" AS SELECT
FORSCHUNG_PROGRAMM_ID::integer AS id,
FORSCHUNG_PROGRAMM_NAME AS name,
COALESCE((LOWER(AKTIV_JN) = 'n'), FALSE)::boolean AS active
FROM
"research"."forschung_programm"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_program";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectresearch" AS SELECT
FORSCHUNG_ART_ID::integer AS id,
FORSCHUNG_ART_DE AS name
FROM
"research"."forschung_art"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectresearch";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectresearch_id_idx ON "public"."research_projectresearch" ("id");
""",
"""
DROP | |
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(CrashBomberReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Cutman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(RollingCutterReceived)
RollingCutterReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 7:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(RollingCutterReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Gutsman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(SuperArmReceived)
SuperArmReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(SuperArmReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Iceman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(IceSlasherReceived)
IceSlasherReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 3:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(IceSlasherReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Bombman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(HyperBombReceived)
HyperBombReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(HyperBombReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Fireman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(FireStormReceived)
FireStormReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(FireStormReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Elecman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(ThunderBeamReceived)
ThunderBeamReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 7:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(ThunderBeamReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == cy:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Sparkman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(SparkShotReceived)
SparkShotReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(SparkShotReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == | |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from tqdm import tnrange, tqdm_notebook
import collections
import math
import numpy as np
from collections import defaultdict
import threading
import networkx as nx
import pandas as pd
import operator
def graph_dict():
'''
We first choose to use a dictionary for collect all the node with out_edges.
Because each row in the txt was "node\titsadiacentnode" we needed to parse all rows
in order to split the to, adding the first node as key and the second has its "end of one of its edge" node.
Of course each node can have more than one edge, thus we associated to each node-key a list.
'''
graph = defaultdict(list)
list1= list()
with open("wiki-topcats-reduced.txt") as f:
for line in f:
list1 = line.strip().split("\t")
if list1[0] in graph.keys():
graph[list1[0]].append(list1[1])
else:
graph[list1[0]]=[list1[1]]
return(graph)
def create_graph_and_dict():
'''
For the computation of the next method distance_graph, we needed the overall structure of the graph.
Thus, instead of use the dictionary as before, we employed the networkx library.
We setted for each node of the graph the attribute for the categories that each node belongs to; because at the same time we created the dictionary with all the name of categories as key, and the related list with all nodes associated to that category.
Parsing through this last dictionary, we have been able to associate the right list of categories to each node.
'''
G=nx.DiGraph()
with open("wiki-topcats-reduced.txt") as f:
for line in f:
list1 = line.strip().split("\t")
G.add_node(list1[0])
G.add_edge(list1[0],list1[1])
### adding attribute 'Category' to each node of the graph
for i in G:
G.node[i]['Category']=[]
category_dict = defaultdict()
with open("wiki-topcats-categories.txt", "r") as f:
lst2=G.nodes()
for line in f:
cat_list = line.strip().split(";")
category = cat_list[0][9:]
lst = cat_list[1].strip().split(" ")
if len(lst) > 3500:
lst1=[el for el in lst if el in lst2]
category_dict[category] = lst1
#Assign attributes to each node
for cat in category_dict:
lst=category_dict[cat]
for e in lst:
if e in G:
G.node[e]['Category'].append(cat)
return G, category_dict
# In our Algorithm for each root which is in the input category we go through graph, and each time we check each node attributes
#if it is belonging to the categories that we are looking for and at the same time it doesn't belong to input category.
#Therefore , each time the function is called , the nodes in the path of the roots are checked for 4 category
def distance_graph(G, C0, C1,category_dict):
'''
This method computes all procedures in order to return the median (distance) for each category given as input in the 3rd parameter.
Given the graph and the category_dict created above, C0 is always the same for each call, because we want the distance for this category with the others, C1 is a list that has 4 categories.
For each node from the category input C0, we needed to compute all the possible paths until the other nodes of the category.
We took only a slice of them (2000) for not getting stuck because nodes where a lot.
Starting from this idea,
We decided to have 4 categories instead of just one is because the graph is huge and for not iterating each time, checking just for one category, it checks for four.
We wrote something similar to the breadth first search algorithm, with the difference that the shortest path list related to a category is updated if the algorithm finds that that node belongs to one of the category we considered in the list C1.
In this way, we don't have a destination (or a loop for each node that belongs to the category that we want to reach).
We agreed that to save and keep four long lists (shortest_paths) was less heavy than to parse through all the graph, looking for a sepcific node.
Every time we go inside one node, without differences if it was from C0 or from one of the categories in C1,
we added it to SEEN variable, to avoid any possible loop or, of course, to not check again a node already discovered.
Once got the lists with all possible paths, we added the infinites values to the lists.
Because we start from c0 with 2000 nodes, but we go until all the other nodes from the categories of C1, there are some nodes that are not reachable.
And we added "manually" the infinitives: given the length of each shortest path we can get how many nodes have not been reached subtracting the combination of all passible paths (length of the i-th category of C1 multiplied by the length of the c0).
This value is how many nodes of the i-th category of C1 have not been reached. And we added them to the shortest path lists.
And we returned the median related to each category of C1.
'''
c0 = category_dict[C0][0:2000]
shortest_paths_1 = list()
shortest_paths_2 = list()
shortest_paths_3 = list()
shortest_paths_4 = list()
for i in tnrange(len(c0)):
root=c0[i]
step = 0
seen=set([root])
queue=collections.deque([(root,step)])
while queue:
vertex=queue.popleft()
if C1[0] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_1.append(step)
elif C1[1] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_2.append(step)
elif C1[2] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_3.append(step)
elif C1[3] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_4.append(step)
neighbors1 = list(G[vertex[0]])
step=vertex[1]+1
if neighbors1 == []:
continue
for i in neighbors1:
if i not in seen:
queue.append((i,step))
seen.add(i)
for i in range(len(C1)):
lc = len(category_dict[C1[i]])
if len(eval('shortest_paths_%d'% (i+1))) != lc:
diff = abs(len(eval('shortest_paths_%d'% (i+1))) - lc*len(c0))
aux_l = [math.inf for i in range(diff)]
eval("shortest_paths_{}".format(i+1)).extend(aux_l)
return [(C1[0], np.median(np.array(sorted(shortest_paths_1)))), (C1[1], np.median(np.array(sorted(shortest_paths_2)))),
(C1[2], np.median(np.array(sorted(shortest_paths_3)))), (C1[3], np.median(np.array(sorted(shortest_paths_4))))]
#@autojit
def distance_graph2(G, C0, C1,category_dict):
'''
We have done the same as before, but we runned this method for the last 2 categories + the input category.
'''
c0 = category_dict[C0][0:2000]
#with tqdm(total=value) as pbar:
shortest_paths_1 = list()
shortest_paths_2 = list()
shortest_paths_3 = list()
#shortest_paths_4 = list()
for i in tnrange(len(c0)):
root=c0[i]
#pbar.write("proccesed: %d" %c0)
#pbar.update(1)
step = 0
seen=set([root])
queue=collections.deque([(root,step)])
while queue:
vertex=queue.popleft()
if C1[0] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_1.append(step)
elif C1[1] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_2.append(step)
elif C1[2] in G.node[vertex[0]]['Category'] and C0 not in G.node[vertex[0]]['Category']:
shortest_paths_3.append(step)
neighbors1 = list(G[vertex[0]])
step=vertex[1]+1
if neighbors1 == []:
continue
for i in neighbors1:
if i not in seen:
queue.append((i,step))
seen.add(i)
for i in range(len(C1)):
lc = len(category_dict[C1[i]])
if len(eval('shortest_paths_%d'% (i+1))) != lc:
diff = abs(len(eval('shortest_paths_%d'% (i+1))) - lc*len(c0))
aux_l = [math.inf for i in range(diff)]
eval("shortest_paths_{}".format(i+1)).extend(aux_l)
return [(C1[0], np.median(np.array(sorted(shortest_paths_1)))), (C1[1], np.median(np.array(sorted(shortest_paths_2))))
(C1[2], np.median(np.array(sorted(shortest_paths_3))))]#(C1[3], np.median(np.array(sorted(shortest_paths_4))))]
def steps(G,category_dict):
'''
This method is been created for computing the subgraph.
At first, we re-assign for each node only one category: exactly the category to which it belongs and that is the closest category to the input category ( C0 ).
After this, we initialize each node of the original graph with a new attribute: 'Score'.
Hence, we compute the subgraph for the list of nodes present in the input category, as first nodes in the subgraph and separately from the others.
And then we iterate through each following category that is present into the distance ranking.
For each category we create the subgraph from all the nodes (also the previouses),
but the iteration for give scores is done only with the nodes of the category considered;
For each of these nodes we checked in_edges and the nodes related if:
- the nodes haven't still had the score.
- if the nodes belong to that category as well.
- if they already have been assigned the score
In this way we could assign scores to each node of each category
'''
dfg=pd.read_csv('ranking_table.csv')
for e in G:
| |
<gh_stars>1-10
from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
import json
import sys
import io
from contextlib import redirect_stdout
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
import OmniDB_app.include.OmniDatabase as OmniDatabase
from OmniDB_app.include.Session import Session
from datetime import datetime
from RestrictedPython import compile_restricted
from RestrictedPython.Guards import safe_builtins, full_write_guard, \
guarded_iter_unpack_sequence, guarded_unpack_sequence
from RestrictedPython.Utilities import utility_builtins
from RestrictedPython.Eval import default_guarded_getitem
from RestrictedPython.Eval import RestrictionCapableEval
#load plugins to retrieve list of monitoring units of all loaded plugins
from OmniDB_app.views.plugins import monitoring_units
def get_monitor_nodes(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
v_return['v_data'] = []
try:
#Child nodes
v_nodes = v_session.v_omnidb_database.v_connection.Query('''
select node_id, node_name, dbt_st_name, server, port, service, user
from pmon_nodes
''')
for v_node in v_nodes.Rows:
v_node_data = {
'v_id': v_node['node_id'],
'v_name': v_node['node_name'],
'v_technology': v_node['dbt_st_name'],
'v_server': v_node['server'],
'v_port': v_node['port'],
'v_service': v_node['service'],
'v_user': v_node['user']
}
v_return['v_data'].append(v_node_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_monitor_unit_list(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_tab_id = json_object['p_tab_id']
v_mode = json_object['p_mode']
v_database = v_session.v_tab_connections[v_tab_id]
v_query = '''
select unit_id,
title,
case type
when 'chart' then 'Chart'
when 'chart_append' then 'Chart (Append)'
when 'grid' then 'Grid'
when 'graph' then 'Graph'
end type,
user_id,
interval
from mon_units
where dbt_st_name = '{0}'
and (user_id is null or user_id = {1})
order by user_id desc, type
'''.format(v_database.v_db_type,v_session.v_user_id)
v_return['v_data'] = []
v_data = []
v_id_list = []
try:
#plugins units
for mon_unit in monitoring_units:
if mon_unit['dbms'] == v_database.v_db_type:
v_actions = '''
<i title='Edit' class='fas fa-check-circle action-grid action-check' onclick='includeMonitorUnit({0},"{1}")'></i>
'''.format(mon_unit['id'],mon_unit['plugin_name'])
if mon_unit['type'] == 'chart':
v_type = 'Chart'
elif mon_unit['type'] == 'chart_append':
v_type = 'Chart (Append)'
elif mon_unit['type'] == 'grid':
v_type = 'Grid'
elif mon_unit['type'] == 'graph':
v_type = 'Graph'
if v_mode==0:
v_data.append([v_actions,mon_unit['title'],v_type,mon_unit['interval']])
else:
v_data.append([mon_unit['plugin_name'],mon_unit['title'],v_type])
v_id_list.append(mon_unit['id'])
v_units = v_session.v_omnidb_database.v_connection.Query(v_query)
for v_unit in v_units.Rows:
v_actions = '''
<i title='Edit' class='fas fa-check-circle action-grid action-check' onclick='includeMonitorUnit({0})'></i>
'''.format(v_unit['unit_id'])
#custom unit, add edit and delete actions
if v_unit['user_id']!=None:
v_actions += '''
<i title='Edit' class='fas fa-edit action-grid action-edit-monitor' onclick='editMonitorUnit({0})'></i>
<i title='Delete' class='fas fa-times action-grid action-close' onclick='deleteMonitorUnit({0})'></i>
'''.format(v_unit['unit_id'])
if v_mode==0:
v_data.append([v_actions,v_unit['title'],v_unit['type'],v_unit['interval']])
else:
v_data.append(['',v_unit['title'],v_unit['type']])
v_id_list.append(v_unit['unit_id'])
v_return['v_data'] = { 'id_list': v_id_list, 'data': v_data }
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_monitor_unit_details(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_unit_id = json_object['p_unit_id']
v_query = '''
select title,
type,
interval,
coalesce(script_chart,'') as script_chart,
coalesce(script_data,'') as script_data
from mon_units
where unit_id = {0}
'''.format(v_unit_id)
try:
v_unit_details = v_session.v_omnidb_database.v_connection.Query(v_query).Rows[0]
v_return['v_data'] = { 'title': v_unit_details['title'], 'type': v_unit_details['type'], 'interval': v_unit_details['interval'], 'script_chart': v_unit_details['script_chart'], 'script_data': v_unit_details['script_data'] }
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_monitor_units(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_tab_id = json_object['p_tab_id']
v_database = v_session.v_tab_connections[v_tab_id]
v_return['v_data'] = []
#saving units for current user/connection if there are none
v_query = '''
select *
from (
select uuc.uuc_id,mu.unit_id, mu.title, uuc.interval, uuc.plugin_name
from mon_units mu,units_users_connections uuc
where mu.dbt_st_name = '{0}'
and uuc.unit_id = mu.unit_id
and uuc.user_id = {1}
and uuc.conn_id = {2}
and uuc.plugin_name = ''
union all
select uuc.uuc_id,uuc.unit_id, '' as title, uuc.interval, uuc.plugin_name
from units_users_connections uuc
where uuc.user_id = {1}
and uuc.conn_id = {2}
and uuc.plugin_name <> '')
order by uuc_id desc;
'''.format(v_database.v_db_type,v_session.v_user_id,v_database.v_conn_id)
try:
v_count = 0
v_existing_units = v_session.v_omnidb_database.v_connection.Query(v_query)
v_existing_data = []
for v_unit in v_existing_units.Rows:
if v_unit['plugin_name']=='':
v_existing_data.append(
{
'uuc_id': v_unit['uuc_id'],
'unit_id': v_unit['unit_id'],
'title': v_unit['title'],
'interval': v_unit['interval'],
'plugin_name': v_unit['plugin_name'],
}
)
else:
#search plugin data
unit_data = None
for mon_unit in monitoring_units:
if mon_unit['id'] == v_unit['unit_id'] and mon_unit['plugin_name'] == v_unit['plugin_name'] and mon_unit['dbms'] == v_database.v_db_type:
v_existing_data.append(
{
'uuc_id': v_unit['uuc_id'],
'unit_id': v_unit['unit_id'],
'title': mon_unit['title'],
'interval': v_unit['interval'],
'plugin_name': v_unit['plugin_name'],
}
)
break
#save default units
if len(v_existing_data) == 0:
v_query = '''
select mu.unit_id, mu.interval
from mon_units mu
where mu.dbt_st_name = '{0}'
and mu.is_default = 1
'''.format(v_database.v_db_type)
v_units = v_session.v_omnidb_database.v_connection.Query(v_query)
v_session.v_omnidb_database.v_connection.Open();
v_session.v_omnidb_database.v_connection.Execute('BEGIN TRANSACTION;');
for v_unit in v_units.Rows:
v_session.v_omnidb_database.v_connection.Execute('''
insert into units_users_connections values
((select coalesce(max(uuc_id), 0) + 1 from units_users_connections),
{0},
{1},
{2},
{3},
'');
'''.format(v_unit['unit_id'],v_session.v_user_id,v_database.v_conn_id,v_unit['interval']));
#Saving default plugin units
for mon_unit in monitoring_units:
if mon_unit['default'] == True and mon_unit['dbms'] == v_database.v_db_type:
v_session.v_omnidb_database.v_connection.Execute('''
insert into units_users_connections values
((select coalesce(max(uuc_id), 0) + 1 from units_users_connections),
{0},
{1},
{2},
{3},
'{4}');
'''.format(mon_unit['id'],v_session.v_user_id,v_database.v_conn_id,mon_unit['interval'],mon_unit['plugin_name']));
v_session.v_omnidb_database.v_connection.Execute('COMMIT;');
v_session.v_omnidb_database.v_connection.Close();
return get_monitor_units(request)
else:
for v_unit in v_existing_data:
v_unit_data = {
'v_saved_id': v_unit['uuc_id'],
'v_id': v_unit['unit_id'],
'v_title': v_unit['title'],
'v_plugin_name': v_unit['plugin_name'],
'v_interval': v_unit['interval']
}
v_return['v_data'].append(v_unit_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_monitor_unit_template(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_unit_id = json_object['p_unit_id']
v_unit_plugin_name = json_object['p_unit_plugin_name']
if v_unit_plugin_name=='':
v_query = '''
select coalesce(script_chart,'') as script_chart, coalesce(script_data,'') as script_data, type, interval
from mon_units where unit_id = '{0}'
'''.format(v_unit_id)
v_return['v_data'] = ''
try:
v_return['v_data'] = v_session.v_omnidb_database.v_connection.Query(v_query).Rows[0]
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
else:
#search plugin data
for mon_unit in monitoring_units:
if mon_unit['id'] == v_unit_id and mon_unit['plugin_name'] == v_unit_plugin_name:
unit_data = mon_unit
v_return['v_data'] = {
'interval': unit_data['interval'],
'script_chart': unit_data['script_chart'],
'script_data': unit_data['script_data'],
'type': unit_data['type']
}
break
return JsonResponse(v_return)
def save_monitor_unit(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_unit_id = json_object['p_unit_id']
v_unit_name = json_object['p_unit_name']
v_unit_type = json_object['p_unit_type']
v_unit_interval = json_object['p_unit_interval']
v_unit_script_chart = json_object['p_unit_script_chart']
v_unit_script_data = json_object['p_unit_script_data']
v_database_index = json_object['p_database_index']
v_tab_id = json_object['p_tab_id']
v_database = v_session.v_tab_connections[v_tab_id]
#Check database prompt timeout
v_timeout = v_session.DatabaseReachPasswordTimeout(int(v_database_index))
if v_timeout['timeout']:
v_return['v_data'] = {'password_timeout': True, 'message': v_timeout['message'] }
v_return['v_error'] = True
return JsonResponse(v_return)
if v_unit_interval==None:
v_unit_interval = 30
try:
#new unit
if not v_unit_id:
v_session.v_omnidb_database.v_connection.Open()
v_session.v_omnidb_database.v_connection.Execute('''
insert into mon_units values (
(select coalesce(max(unit_id), 0) + 1 from mon_units),'{0}','{1}','{2}','{3}','{4}',0,{5},{6})
'''.format(v_database.v_db_type,v_unit_script_chart.replace("'","''"),v_unit_script_data.replace("'","''"),v_unit_type,v_unit_name,v_session.v_user_id,v_unit_interval))
v_inserted_id = v_session.v_omnidb_database.v_connection.ExecuteScalar('''
select coalesce(max(unit_id), 0) from mon_units
''')
v_session.v_omnidb_database.v_connection.Close()
v_return['v_data'] = v_inserted_id
#existing unit
else:
v_return['v_data'] = v_unit_id
v_session.v_omnidb_database.v_connection.Execute('''
update mon_units
set dbt_st_name = '{0}',
script_chart = '{1}',
script_data = '{2}',
type = '{3}',
title = '{4}',
interval = {5}
where unit_id = {6}
'''.format(v_database.v_db_type,v_unit_script_chart.replace("'", "''"),v_unit_script_data.replace("'", "''"),v_unit_type,v_unit_name,v_unit_interval,v_unit_id))
except Exception as exc:
v_return['v_data'] = {'password_timeout': True, 'message': str(exc) }
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def delete_monitor_unit(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_unit_id = json_object['p_unit_id']
try:
v_session.v_omnidb_database.v_connection.Execute('''
delete from mon_units
where unit_id = {0}
'''.format(v_unit_id))
v_session.v_omnidb_database.v_connection.Execute('''
delete from units_users_connections
where unit_id = {0}
and plugin_name = ''
'''.format(v_unit_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def remove_saved_monitor_unit(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_saved_id = json_object['p_saved_id']
try:
v_session.v_omnidb_database.v_connection.Execute('''
delete from units_users_connections
where uuc_id = {0}
'''.format(v_saved_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def update_saved_monitor_unit_interval(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_saved_id = json_object['p_saved_id']
v_interval = json_object['p_interval']
try:
v_session.v_omnidb_database.v_connection.Execute('''
update units_users_connections
set interval = {0}
where uuc_id = {1}
'''.format(v_interval,v_saved_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def refresh_monitor_units(request):
v_return = {}
v_return['v_data'] = | |
import time
import uuid
import neat
import uuid
import numpy as np
import vrep.vrep as vrep
import threading
import pickle
from datetime import datetime, timedelta
from vision.tracker import get_marker_object
from robot.vrep_robot import VrepRobot
from utility.path_tracking import follow_path, transform_pos_angle, create_grid
from utility.util_functions import scale, euclidean_distance, \
f_wheel_center, f_straight_movements, \
f_obstacle_dist, scale, scale_thymio_sensors, \
normalize_0_1, f_t_obstacle_avoidance, thymio_position, \
flatten_dict, calc_behavioral_features, save_debug_data
try:
from robot.evolved_robot import EvolvedRobot
except ImportError as error:
print(error.__class__.__name__ + ": " + 'DBus works only on linux!')
from multiprocessing import current_process
from vrep.control_env import get_object_handle, get_pose, set_pose
import schedule
def eval_genomes_hardware(individual, settings, genomes, config, generation):
"""Evaluation function to evaluate NEAT genomes on Thymio robot"""
for _, genome in genomes:
_ = eval_genome_hardware(
individual,
settings,
genome,
model=None,
config=config,
generation=generation
)
def eval_genomes_simulation(individual, settings, genomes, config, generation):
"""Evaluation function to evaluate NEAT genomes in VREP simulator"""
for _, genome in genomes:
_ = eval_genome_simulation(
individual,
settings,
None,
config,
generation,
genome
)
def post_eval_genome(individual, settings, genome, model=None, config=None, generation=None):
"""Post evaluation of controllers using NEAT.
Only used for testing controllers evolved using NEAT.
:individual: `VrepRobot` or `EvolvedRobot` (Thymio)
:genome: controller
:config: NN configuration
"""
print('Postevaluation of {0} started!'.format(type(individual).__name__))
# neural network initialization
network = init_network(genome, config, model)
if type(individual).__name__ == 'VrepRobot':
individual.v_chromosome = genome
individual.id = genome.key
# Enable the synchronous mode
vrep.simxSynchronous(settings.client_id, True)
if (vrep.simxStartSimulation(settings.client_id, vrep.simx_opmode_oneshot) == -1):
return
# collistion detection initialization
collision_handle, collision = init_collision(individual.client_id)
dt = 0.05
runtime = 0
steps = 0
while not collision and settings.run_time > runtime:
# The first simulation step waits for a trigger before being executed
vrep.simxSynchronousTrigger(settings.client_id)
_, collision = vrep.simxReadCollision(
settings.client_id, collision_handle, vrep.simx_opmode_buffer)
individual.v_neuro_loop()
# input data to the neural network
if type(network).__name__ == 'FeedForwardNetwork':
net_output = network.activate(
individual.v_norm_sensor_activation)
if type(network).__name__ == 'Sequential':
net_output = network.predict(
(individual.v_norm_sensor_activation).reshape((1, 7)))[0]
# [-2, 2] wheel speed thymio
scaled_output = np.array(
[scale(xi, -2.0, 2.0) for xi in net_output])
# set motor wheel speeds
individual.v_set_motors(*list(scaled_output))
# After this call, the first simulation step is finished
vrep.simxGetPingTime(settings.client_id)
runtime += dt
steps += 1
# Before closing the connection to V-REP, make sure that the last command sent out had time to arrive.
vrep.simxGetPingTime(settings.client_id)
if (vrep.simxStopSimulation(settings.client_id, settings.op_mode) == -1):
return
return individual
elif type(individual).__name__ == 'EvolvedRobot':
init_position = np.array([0.19, 0.22])
# Build Scene and get the obstacles grid
obstacle_grid = build_scene(
settings.config_scene, settings, individual.client_id)
individual.chromosome = genome
individual.id = genome.key
t_xy, t_angle = thymio_position()
if (vrep.simxStartSimulation(settings.client_id, vrep.simx_opmode_oneshot) == -1):
print('Failed to start the simulation\n')
return
# update position and orientation of the robot in vrep
position, orientation = transform_pos_angle(
t_xy, t_angle)
individual.v_set_pos_angle(position, orientation)
# collistion detection initialization
collision_handle, collision = init_collision(individual.client_id)
now = datetime.now()
while datetime.now() - now < timedelta(seconds=settings.run_time):
t_xy, t_angle = thymio_position()
# update position and orientation of the robot in vrep
position, orientation = transform_pos_angle(
t_xy, t_angle)
individual.v_set_pos_angle(position, orientation)
_, collision = vrep.simxReadCollision(
settings.client_id, collision_handle, vrep.simx_opmode_buffer)
# read proximity sensors data
individual.t_read_prox()
# input data to the neural network
if type(network).__name__ == 'FeedForwardNetwork':
net_output = network.activate(individual.n_t_sensor_activation)
if type(network).__name__ == 'Sequential':
net_output = network.predict(
(individual.n_t_sensor_activation).reshape((1, 7)))[0]
# normalize motor wheel wheel_speeds [0.0, 2.0] - robot
scaled_output = np.array([scale(xi, -200, 200)
for xi in net_output])
# set thymio wheel speeds
individual.t_set_motors(*list(scaled_output))
individual.t_stop()
follow_path(
individual,
init_position,
get_marker_object,
vrep,
settings.client_id,
grid=obstacle_grid,
log_time=settings.logtime_data
)
if (vrep.simxStopSimulation(settings.client_id, settings.op_mode) == -1):
print('Failed to stop the simulation')
print('Program ended')
return
return individual
else:
return None
def eval_genome_hardware(individual, settings, genome, model=None, config=None, generation=None):
"""Evaluation function for a single genome encoded with NEAT or DEAP."""
robot_m = get_marker_object(7)
while robot_m.realxy() is None:
# obtain goal marker postion
robot_m = get_marker_object(7)
init_position = np.array([0.19, 0.22])
# Build Scene and get the obstacles grid
obstacle_grid = build_scene(
settings.config_scene, settings, individual.client_id)
# individual reset
individual.n_t_sensor_activation = np.array([])
individual.chromosome = genome
individual.id = genome.key
# simulation specific props
thymio_position = [np.array([0.19, 0.22])]
schedule.every(2).seconds.do(thymio_get_position_every_2s, thymio_position)
collision = False
scaled_output = np.array([])
fitness_agg = np.array([])
dt = 0.05
runtime = 0
steps = 0
# Behavioral Features #1 and #2
wheel_speeds = []
sensor_activations = []
# neural network initialization
network = init_network(genome, config, model)
# get robot marker
robot_m = get_marker_object(7)
if robot_m.realxy() is not None:
# update current position of the robot
robot_current_position = robot_m.realxy()[:2]
# update position and orientation of the robot in vrep
pos, orientation = transform_pos_angle(
robot_current_position, robot_m.orientation())
individual.v_set_pos_angle(pos, orientation)
if (vrep.simxStartSimulation(individual.client_id, vrep.simx_opmode_oneshot) == -1):
print('Failed to start the simulation\n')
return
# collistion detection initialization
collision_handle, collision = init_collision(individual.client_id)
# areas detection initlaization
areas_name = ('area0', 'area1', 'area2')
areas_handle = [(area,) + vrep.simxGetCollisionHandle(
individual.client_id, area, vrep.simx_opmode_blocking) for area in areas_name]
_ = [(handle[0],) + vrep.simxReadCollision(
individual.client_id, handle[2], vrep.simx_opmode_streaming) for handle in areas_handle]
areas_counter = dict([(area, dict(count=0, percentage=0.0, total=0))
for area in areas_name])
now = datetime.now()
# while datetime.now() - now < timedelta(seconds=genome.sim_time):
while not collision and datetime.now() - now < timedelta(seconds=genome.sim_time):
schedule.run_pending()
# get robot marker
robot_m = get_marker_object(7)
if robot_m.realxy() is not None:
# update current position of the robot
robot_current_position = robot_m.realxy()[:2]
# update position and orientation of the robot in vrep
pos, orientation = transform_pos_angle(
robot_current_position, robot_m.orientation())
individual.v_set_pos_angle(pos, orientation)
_, collision = vrep.simxReadCollision(
individual.client_id, collision_handle, vrep.simx_opmode_buffer)
# Behavioral Feature #3
areas = [(handle[0],) + vrep.simxReadCollision(
individual.client_id, handle[2], vrep.simx_opmode_streaming) for handle in areas_handle]
for area, _, detected in areas:
if detected:
areas_counter.get(area).update(
count=areas_counter.get(area)['count']+1)
# read proximity sensors data
individual.t_read_prox()
# input data to the neural network
if type(network).__name__ == 'FeedForwardNetwork':
net_output = network.activate(individual.n_t_sensor_activation)
if type(network).__name__ == 'Sequential':
net_output = network.predict(
(individual.n_t_sensor_activation).reshape((1, 7)))[0]
# normalize motor wheel wheel_speeds [0.0, 2.0] - robot
scaled_output = np.array([scale(xi, -200, 200)
for xi in net_output])
# Collect behavioral feature data
wheel_speeds.append(net_output)
sensor_activations.append(
list(map(lambda x: 1 if x > 0.0 else 0, individual.n_t_sensor_activation)))
# set thymio wheel speeds
individual.t_set_motors(*list(scaled_output))
runtime += dt
steps += 1
# every 10 seconds the robot is in the same position given a threshold stop the simulation
# if round(runtime, 2) % 10.0 == 0.0:
# print(euclidean_distance(thymio_position[0], thymio_position[-1]))
# if (euclidean_distance(thymio_position[0], thymio_position[-1])) < .09:
# collision = True
#
# fitness_t at time stamp
(
fitness_t,
wheel_center,
straight_movements,
obstacles_distance
) = f_t_obstacle_avoidance(
scaled_output, individual.n_t_sensor_activation, 'thymio')
fitness_agg = np.append(fitness_agg, fitness_t)
# dump individual data
if settings.debug:
save_debug_data(
settings.path,
individual.id,
individual.t_sensor_activation,
individual.n_t_sensor_activation,
net_output,
scaled_output,
wheel_center,
straight_movements,
obstacles_distance,
fitness_t,
'THYMIO',
robot_current_position
)
individual.t_stop()
# calculate the fitnesss
fitness = np.sum(fitness_agg)/settings.run_time
schedule.clear()
print('Transfered to thymio genome_id: {} fitness: {:.4f} runtime: {:.2f} s steps: {}'.format(
individual.id, fitness, runtime, steps))
if type(genome).__name__ == 'Individual':
generation = genome.gen
behavioral_features = calc_behavioral_features(
areas_counter,
wheel_speeds,
sensor_activations,
settings.path,
genome.key,
generation,
'THYMIO'
)
if settings.debug:
print('behavioral_features: {0}\n pos_sample: {1}\n'.format(
behavioral_features, thymio_position))
if type(genome).__name__ == 'Individual':
genome.features = behavioral_features
genome.task_fitness = fitness
genome.position = thymio_position
genome.evaluation = 'THYMIO'
genome.weights = network.get_weights()
if type(genome).__name__ == 'DefaultGenome':
genome.features = behavioral_features
genome.fitness = fitness
genome.position = thymio_position
follow_path(
individual,
init_position,
get_marker_object,
vrep,
individual.client_id,
grid=obstacle_grid,
log_time=settings.logtime_data
)
if (vrep.simxStopSimulation(individual.client_id, settings.op_mode) == -1):
print('Failed to stop the simulation')
print('Program ended')
return
time.sleep(1)
return fitness
def eval_genome_simulation(individual, settings, model, config, generation, genome):
"""Evaluation function for multiobjective optimization NSGA-II.
:individual: robotic controller (VREP)
:settings: simulation specific settings
:model: Keras model Feedforward NN
:genome: weights of the NN encoded that are being optimized
:return: (fitness, transferability)
fitness - task dependent fitness value V * (1 - sqr(delta v)) * (1 - max(S_activation))
transferability - measure the distance betweeen simulation and real behavior
"""
# reset the individual
individual.v_reset_init()
individual.chromosome = genome
individual.id = genome.key
# Behavioral Features #1 and #2
wheel_speeds = []
sensor_activations = []
position = []
# evaluation specific props
collision = False
scaled_output = np.array([], ndmin=2)
fitness_agg = np.array([], ndmin=2)
# setting time step to 50 ms (miliseconds)
dt = 0.05
runtime = 0
steps = 0
network = init_network(genome, config, model)
# Enable the synchronous mode
vrep.simxSynchronous(individual.client_id, True)
# start the simulation
if (vrep.simxStartSimulation(individual.client_id, vrep.simx_opmode_oneshot) == -1):
return
# collistion detection initialization
collision_handle, collision = init_collision(individual.client_id)
# areas detection initlaization
areas_name = ('area0', 'area1', 'area2')
areas_handle = [(area,) + vrep.simxGetCollisionHandle(
individual.client_id, area, vrep.simx_opmode_blocking) for area in areas_name]
_ = [(handle[0],) + vrep.simxReadCollision(
individual.client_id, handle[2], vrep.simx_opmode_streaming) for handle in areas_handle]
areas_counter = dict([(area, | |
#!/usr/bin/python
"""
dockECR: open consensus docking and ranking protocol for virtual screening of small molecules
Authors: <NAME>, <NAME>, <NAME> and <NAME>
Please also cite:
<NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Exponential consensus ranking improves the outcome in docking and receptor ensemble docking.
Scientific reports, 9(1), 1-14.
Third-party tools required (put them in the auxiliar/software folder):
AutoDock Vina: http://vina.scripps.edu/download.html - stand-alone tools: vina
Smina: https://sourceforge.net/projects/smina/files/ - stand-alone tools: smina.static
LeDock: http://www.lephar.com/download.htm - stand-alone tools: ledock_linux_x86
rDock: https://sourceforge.net/projects/rdock/files/ - stand-alone tools: rbcavity, rbdock and sdsort
MGL Tools: http://mgltools.scripps.edu/downloads - stand-alone tools: pythonsh
Additional tools:
Open Babel: https://sourceforge.net/projects/openbabel/
"""
########################################################################################
# Authorship
########################################################################################
__credits__ = ["<NAME>","<NAME>","<NAME>","<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
########################################################################################
# Modules to import
########################################################################################
import argparse
import subprocess
import operator
import os
import math
import yaml
import multiprocessing
from auxiliar import calculate_rmsd
########################################################################################
# Preparation functions
########################################################################################
def prepare_pdbqt(target,ligand):
os.system("./auxiliar/software/pythonsh auxiliar/prepare_receptor4.py -r target/{tar}.pdb -o {tar}.pdbqt -U waters".format(tar=target))
os.system("./auxiliar/software/pythonsh auxiliar/prepare_ligand4.py -l ligands/{lig}.pdb -U '' -B -o {lig}.pdbqt".format(lig=ligand))
########################################################################################
def prepare_rdock_cavity(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z):
# Create config
prepare_pdbqt(target,ligand)
config=open("config/config_vina_{}.txt".format(target),"w")
config.write("center_x={}\n".format(center_x))
config.write("center_y={}\n".format(center_y))
config.write("center_z={}\n".format(center_z))
config.write("size_x={}\n".format(size_x))
config.write("size_y={}\n".format(size_y))
config.write("size_z={}\n".format(size_z))
config.write("cpu=1\n")
config.write("exhaustiveness=1\n")
config.write("num_modes = 1\n")
config.close()
os.system("./auxiliar/software/vina --receptor {}.pdbqt --ligand {}.pdbqt --log score.log --out out.pdbqt --config config/config_vina_{}.txt".format(target,ligand,target))
# Split and get the first model
os.system("csplit out.pdbqt /MODEL/ {*}; mv xx01 ligand.pdbqt")
os.system("babel -ipdbqt ligand.pdbqt -osdf ligand.sd")
os.system("babel -ipdb target/{}.pdb -omol2 receptor.mol2".format(target))
os.system("cp auxiliar/rdock.prm config")
os.system("./auxiliar/software/rbcavity -was -d -r config/rdock.prm")
os.system("rm xx* score.log *.pdbqt")
########################################################################################
# Docking functions
########################################################################################
########################################################################################
def run_vina(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z):
# Run vina and store results
print("Docking with vina between target {} and ligand {} ...".format(target,ligand))
os.system("./auxiliar/software/vina --receptor {}.pdbqt --ligand {}.pdbqt --log score_{}.log --out out_{}.pdbqt --config config/config_vina_{}.txt".format(target,ligand,ligand,ligand,target))
os.system("mv out_{}.pdbqt results/vina/{}_{}_out.pdbqt".format(ligand,target,ligand))
########################################################################################
def run_smina(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z):
# Run smina and store results
print("Docking with smina between target {} and ligand {} ...".format(target,ligand))
os.system("./auxiliar/software/smina.static --receptor {}.pdbqt --ligand {}.pdbqt --log score_{}.log --out out_{}.pdbqt --config config/config_smina_{}.txt".format(target,ligand,ligand,ligand,target))
os.system("mv out_{}.pdbqt results/smina/{}_{}_out.pdbqt".format(ligand,target,ligand))
########################################################################################
def run_rdock(target,ligand):
# Convert ligand to sdf file format
os.system("babel -ipdb ligands/{}.pdb -osdf {}.sd".format(ligand,ligand))
# Run rdock and store results
print("Docking with rdock between target {} and ligand {} ...".format(target,ligand))
os.system("./auxiliar/software/rbdock -i {}.sd -o {}_{} -r rdock.prm -p dock.prm -n 10".format(ligand,target,ligand,target))
os.system("./auxiliar/software/sdsort -n -f'SCORE.INTER' {}_{}.sd > {}_{}.sorted".format(target,ligand,target,ligand))
os.system("mv {}_{}.sorted results/rdock/{}_{}_out.sd".format(target,ligand,target,ligand))
#os.system("rm *.sd *.mol2 rdock* dock.prm")
########################################################################################
def run_ledock(target,ligand,xmin,xmax,ymin,ymax,zmin,zmax):
os.system("babel -ipdb ligands/{}.pdb -omol2 {}.mol2".format(ligand,ligand))
os.system("cp target/{}.pdb {}_{}.pdb".format(target,target,ligand))
ligands_file=open("mol-list_{}".format(ligand),"w")
ligands_file.write("{}.mol2".format(ligand))
ligands_file.close()
# Write config file
config=open("config_ledock_{}_{}.txt".format(target,ligand),"w")
config.write("Receptor\n")
config.write("{}_{}.pdb\n".format(target,ligand))
config.write("RMSD\n")
config.write("1.0\n")
config.write("Binding pocket\n")
config.write("{} {}\n".format(xmin,xmax))
config.write("{} {}\n".format(ymin,ymax))
config.write("{} {}\n".format(zmin,zmax))
config.write("Number of binding poses\n")
config.write("10\n")
config.write("Ligands list\n")
config.write("mol-list_{}\n".format(ligand))
config.write("END")
config.close()
# Run ledock and store results
print("Docking with ledock between target {} and ligand {} ...".format(target,ligand))
os.system("auxiliar/software/ledock_linux_x86 config_ledock_{}_{}.txt".format(target,ligand))
os.system("mv {}.dok results/ledock/{}_{}_out.dok".format(ligand,target,ligand))
########################################################################################
# Ranking functions
########################################################################################
def generate_complex(target,ligand,software):
# Generate complexes with the receptor and the docked ligands
bash="grep ATOM ligand.pdb | awk '{{print $4}}' | head -n1 | tail -n1"
ligand_code=subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8")
if ligand_code:
os.system("grep ATOM ligand.pdb | sed 's/{} /{} B/g' > ligand_fixed.pdb".format(ligand_code,ligand_code))
bash="wc -l ligand_fixed.pdb | awk '{print $1}'"
total_lines=int(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
else:
bash="grep HETATM ligand.pdb | awk '{{print $4}}' | head -n1 | tail -n1"
ligand_code=subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8")
os.system("grep HETATM ligand.pdb | sed 's/{} /{} B/g' > ligand_fixed.pdb".format(ligand_code,ligand_code))
bash="wc -l ligand_fixed.pdb | awk '{print $1}'"
total_lines=int(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
# Fix the atom numeration
for i in range(1,total_lines+1):
bash="head -n {} ligand_fixed.pdb | tail -n 1 | awk '{{print $3}}'".format(i)
atom=subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8")
if i<10:
os.system("head -n {} ligand_fixed.pdb | tail -n 1 | sed 's/{} /{}{} /g' >> ligand_fixed_ref.pdb".format(i,atom,atom,i))
else:
os.system("head -n {} ligand_fixed.pdb | tail -n 1 | sed 's/{} /{}{} /g' >> ligand_fixed_ref.pdb".format(i,atom,atom,i))
os.system("sed -i 's/Cl/CL/g' ligand_fixed_ref.pdb")
# Store the files
os.system("mv ligand_fixed_ref.pdb ligand_fixed.pdb")
os.system("cp ligand_fixed.pdb temp_ranking/{}_{}.pdb".format(ligand,software))
#os.system("cp target/{}.pdb .".format(target))
#os.system("cat {}.pdb ligand_fixed.pdb | sed 's/END/TER/g' > complex_{}_{}_{}.pdb".format(target,target,ligand,software))
#os.system("echo 'TER' >> complex_{}_{}_{}.pdb".format(target,ligand,software))
########################################################################################
def score_vina(target,ligand):
# Split and get the first model
os.system("csplit -s results/vina/{}_{}_out.pdbqt /MODEL/ {{*}}; mv xx01 ligand.pdbqt".format(target,ligand))
bash="grep VINA ligand.pdbqt | awk '{print $4}'"
score_v = float(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
# Generate complex
os.system("babel -ipdbqt ligand.pdbqt -opdb ligand.pdb")
software="vina"
generate_complex(target,ligand,software)
#os.system("mv complex_{}_{}_vina.pdb complexes/vina".format(target,ligand))
os.system("rm xx* *.pdb *.pdbqt")
return score_v
########################################################################################
def score_smina(target,ligand):
# Split and get the first model
os.system("csplit -s results/smina/{}_{}_out.pdbqt /MODEL/ {{*}}; mv xx01 ligand.pdbqt".format(target,ligand))
bash="grep minimizedAffinity ligand.pdbqt | awk '{print $3}'"
score_s = float(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
# Generate complex
os.system("babel -ipdbqt ligand.pdbqt -opdb ligand.pdb")
software="smina"
generate_complex(target,ligand,software)
#os.system("mv complex_{}_{}_smina.pdb complexes/smina".format(target,ligand))
os.system("rm xx* *.pdb *.pdbqt")
return score_s
########################################################################################
def score_ledock(target,ligand):
# Split and get the first model
os.system("csplit -s results/ledock/{}_{}_out.dok /REMARK/ {{*}}; mv xx02 ligand.pdb".format(target,ligand))
bash="grep Cluster ligand.pdb | awk '{print $8}'"
score_l = float(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
# Generate complex
os.system("grep -v REMARK ligand.pdb | sed 's/ATOM /HETATM/g' > ligand_mod.pdb")
os.system("mv ligand_mod.pdb ligand.pdb")
software="ledock"
generate_complex(target,ligand,software)
#os.system("mv complex_{}_{}_ledock.pdb complexes/ledock".format(target,ligand))
os.system("rm xx* *.pdb")
return score_l
########################################################################################
def score_rdock(target,ligand):
# Split and get the first model
os.system("babel -isdf results/rdock/{}_{}_out.sd -opdb {}_{}_out.pdb".format(target,ligand,target,ligand))
os.system("csplit -s {}_{}_out.pdb /MODEL/ {{*}}; mv xx01 ligand.pdb".format(target,ligand))
os.system("csplit -s results/rdock/{}_{}_out.sd /SCORE/ {{*}}; mv xx01 score.txt".format(target,ligand))
bash="head -n 2 score.txt | tail -n 1"
score_r = float(subprocess.check_output(['bash','-c', bash]).strip().decode("utf-8"))
# Generate complex
software="rdock"
generate_complex(target,ligand,software)
#os.system("mv complex_{}_{}_rdock.pdb complexes/rdock".format(target,ligand))
os.system("rm xx* *.pdb score.txt")
return score_r
########################################################################################
# Main execution
########################################################################################
if __name__ == '__main__':
# Script arguments
parser = argparse.ArgumentParser(description='dockECR: open consensus docking and ranking protocol')
parser.add_argument('-l', dest='list_ligands', action='store',required=True,
help='File with the list of ligands names available in the ligand folder')
parser.add_argument('-s', dest='list_software', action='store',required=True,
help='File with the list of software that will be included in the consensus')
parser.add_argument('-m', dest='mode', action='store',required=True,
help='Mode the script will be run. Options: (i) docking, (ii) ranking')
parser.add_argument('-t', dest='list_targets', action='store',required=True,
help='List with the name of the PDB structures used as targets')
#####################################################################################
# Assignment of parameters
#####################################################################################
args = parser.parse_args()
# Map the main parameters
list_software=[x.strip() for x in open(args.list_software)] # List of software
list_ligands=[x.strip() for x in open(args.list_ligands)] # List of ligands
list_targets=[x.strip() for x in open(args.list_targets)] # List of ligands
mode=args.mode # Mode: docking or ranking
####################################################################################
# Run docking protocols
####################################################################################
if mode=="docking":
# Create required folders
os.system("mkdir -p config results/vina results/smina results/ledock results/rdock")
# Iterate over the targets
for target in list_targets:
try:
config_file=[x.strip() for x in open("config_{}.txt".format(target))]
for line in config_file:
fields=line.split()
if fields[0]=="center_x:": center_x=float(fields[1])
if fields[0]=="center_y:": center_y=float(fields[1])
if fields[0]=="center_z:": center_z=float(fields[1])
if fields[0]=="size_x:": size_x=int(fields[1])
if fields[0]=="size_y:": size_y=int(fields[1])
if fields[0]=="size_z:": size_z=int(fields[1])
except:
print("A config file with the center and size of the box for target {} is necessary to run the protocol. Also avoid blank lines. Exiting ...".format(target))
exit()
# Size parameter for ledock
xmin=center_x-(size_x/2); xmax=center_x+(size_x/2)
ymin=center_y-(size_y/2); ymax=center_y+(size_y/2)
zmin=center_z-(size_z/2); zmax=center_z+(size_z/2)
# Iterate over the software list
for software in list_software:
if software == "vina":
# Create config file
config=open("config/config_vina_{}.txt".format(target),"w")
config.write("center_x={}\n".format(center_x))
config.write("center_y={}\n".format(center_y))
config.write("center_z={}\n".format(center_z))
config.write("size_x={}\n".format(size_x))
config.write("size_y={}\n".format(size_y))
config.write("size_z={}\n".format(size_z))
config.write("cpu=1\n")
config.write("exhaustiveness=1\n")
config.write("num_modes = 10\n")
config.close()
pool=multiprocessing.Pool()
for ligand in list_ligands:
prepare_pdbqt(target,ligand)
# Create parallel jobs
pool.apply_async(run_vina, args=(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z,))
pool.close()
pool.join()
# Delete all temporal files
os.system("rm *.pdbqt score_*.log")
if software == "smina":
# Create config file
config=open("config/config_smina_{}.txt".format(target),"w")
config.write("center_x={}\n".format(center_x))
config.write("center_y={}\n".format(center_y))
config.write("center_z={}\n".format(center_z))
config.write("size_x={}\n".format(size_x))
config.write("size_y={}\n".format(size_y))
config.write("size_z={}\n".format(size_z))
config.write("cpu = 1\n")
config.write("exhaustiveness=1\n")
config.write("num_modes = 10\n")
config.write("scoring = vinardo\n")
config.close()
pool=multiprocessing.Pool()
for ligand in list_ligands:
prepare_pdbqt(target,ligand)
# Create parallel jobs
pool.apply_async(run_smina, args=(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z,))
pool.close()
pool.join()
# Delete all temporal files
os.system("rm *.pdbqt score_*.log")
if software == "ledock":
pool=multiprocessing.Pool()
for ligand in list_ligands:
# Create parallel jobs
pool.apply_async(run_ledock, args=(target,ligand,xmin,xmax,ymin,ymax,zmin,zmax,))
pool.close()
pool.join()
# Delete all temporal files
os.system("rm *.pdb *.mol2 mol-list_* config_ledock_*")
if software == "rdock":
pool=multiprocessing.Pool()
for i,ligand in enumerate(list_ligands):
if i==0:
prepare_rdock_cavity(target,ligand,center_x,center_y,center_z,size_x,size_y,size_z)
os.system("babel -ipdb target/{}.pdb -omol2 receptor.mol2".format(target))
os.system("cp config/rdock* .")
os.system("cp auxiliar/dock.prm .")
run_rdock(target,ligand)
os.system("cp config/rdock* .")
os.system("cp auxiliar/dock.prm .")
else:
# Create parallel jobs
pool.apply_async(run_rdock, args=(target,ligand,))
pool.close()
pool.join()
# Delete all temporal files
os.system("rm *.sd *.mol2 rdock* dock.prm")
####################################################################################
# Run ranking protocols
####################################################################################
if mode=="ranking":
# Create required folders
os.system("mkdir ranks")
sigma=float(len(list_ligands))*0.05
# Create general dictionaries for the final ranking
ecr_vina={}
ecr_smina={}
ecr_ledock={}
ecr_rdock={}
ecr_rmsd={}
ecr_rmsd_ecr={}
# Iterate over the list of targets
for target in list_targets:
os.system("mkdir temp_ranking")
for software in list_software:
if software == "vina":
ranked_ligands_vina={}
for ligand in list_ligands:
score_v=score_vina(target,ligand)
ranked_ligands_vina[ligand]=score_v
print(ranked_ligands_vina)
sorted_x = sorted(ranked_ligands_vina.items(), key=operator.itemgetter(1))
rank_file_vina=open("ranks/rank_vina_{}.txt".format(target),"w")
ecr_vina[target]={}
for j,element in enumerate(sorted_x):
rank_file_vina.write("{}\t{}\t{}\n".format(j+1,element[0],element[1]))
ecr_vina[target][element[0]]=j+1
rank_file_vina.close()
if software == "smina":
ranked_ligands_smina={}
for ligand in list_ligands:
score_s=score_smina(target,ligand)
ranked_ligands_smina[ligand]=score_s
print(ranked_ligands_smina)
sorted_x = sorted(ranked_ligands_smina.items(), key=operator.itemgetter(1))
rank_file_smina=open("ranks/rank_smina_{}.txt".format(target),"w")
ecr_smina[target]={}
for j,element in enumerate(sorted_x):
rank_file_smina.write("{}\t{}\t{}\n".format(j+1,element[0],element[1]))
ecr_smina[target][element[0]]=j+1
rank_file_smina.close()
if software == "ledock":
ranked_ligands_ledock={}
for ligand in list_ligands:
score_l=score_ledock(target,ligand)
ranked_ligands_ledock[ligand]=score_l
print(ranked_ligands_ledock)
sorted_x = sorted(ranked_ligands_ledock.items(), key=operator.itemgetter(1))
rank_file_ledock=open("ranks/rank_ledock_{}.txt".format(target),"w")
ecr_ledock[target]={}
for j,element in enumerate(sorted_x):
rank_file_ledock.write("{}\t{}\t{}\n".format(j+1,element[0],element[1]))
ecr_ledock[target][element[0]]=j+1
rank_file_ledock.close()
if software == "rdock":
ranked_ligands_rdock={}
for ligand in list_ligands:
score_r=score_rdock(target,ligand)
ranked_ligands_rdock[ligand]=score_r
print(ranked_ligands_rdock)
sorted_x = sorted(ranked_ligands_rdock.items(), key=operator.itemgetter(1))
rank_file_rdock=open("ranks/rank_rdock_{}.txt".format(target),"w")
ecr_rdock[target]={}
for j,element in enumerate(sorted_x):
rank_file_rdock.write("{}\t{}\t{}\n".format(j+1,element[0],element[1]))
ecr_rdock[target][element[0]]=j+1
rank_file_rdock.close()
# Calculate full RMSD
ranked_ligands_rmsd={}
for | |
str]] = (),
) -> operation.Operation:
r"""Creates a new DnsAuthorization in a given project and
location.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_create_dns_authorization():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
dns_authorization = certificate_manager_v1.DnsAuthorization()
dns_authorization.domain = "domain_value"
request = certificate_manager_v1.CreateDnsAuthorizationRequest(
parent="parent_value",
dns_authorization_id="dns_authorization_id_value",
dns_authorization=dns_authorization,
)
# Make the request
operation = client.create_dns_authorization(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.CreateDnsAuthorizationRequest, dict]):
The request object. Request for the
`CreateDnsAuthorization` method.
parent (str):
Required. The parent resource of the dns authorization.
Must be in the format ``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dns_authorization (google.cloud.certificate_manager_v1.types.DnsAuthorization):
Required. A definition of the dns
authorization to create.
This corresponds to the ``dns_authorization`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dns_authorization_id (str):
Required. A user-provided name of the
dns authorization.
This corresponds to the ``dns_authorization_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.certificate_manager_v1.types.DnsAuthorization` A DnsAuthorization resource describes a way to perform domain authorization
for certificate issuance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, dns_authorization, dns_authorization_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.CreateDnsAuthorizationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.CreateDnsAuthorizationRequest):
request = certificate_manager.CreateDnsAuthorizationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if dns_authorization is not None:
request.dns_authorization = dns_authorization
if dns_authorization_id is not None:
request.dns_authorization_id = dns_authorization_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_dns_authorization]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.DnsAuthorization,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def update_dns_authorization(
self,
request: Union[certificate_manager.UpdateDnsAuthorizationRequest, dict] = None,
*,
dns_authorization: certificate_manager.DnsAuthorization = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a DnsAuthorization.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_update_dns_authorization():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
dns_authorization = certificate_manager_v1.DnsAuthorization()
dns_authorization.domain = "domain_value"
request = certificate_manager_v1.UpdateDnsAuthorizationRequest(
dns_authorization=dns_authorization,
)
# Make the request
operation = client.update_dns_authorization(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.UpdateDnsAuthorizationRequest, dict]):
The request object. Request for the
`UpdateDnsAuthorization` method.
dns_authorization (google.cloud.certificate_manager_v1.types.DnsAuthorization):
Required. A definition of the dns
authorization to update.
This corresponds to the ``dns_authorization`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.certificate_manager_v1.types.DnsAuthorization` A DnsAuthorization resource describes a way to perform domain authorization
for certificate issuance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([dns_authorization, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.UpdateDnsAuthorizationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.UpdateDnsAuthorizationRequest):
request = certificate_manager.UpdateDnsAuthorizationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if dns_authorization is not None:
request.dns_authorization = dns_authorization
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_dns_authorization]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("dns_authorization.name", request.dns_authorization.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.DnsAuthorization,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def delete_dns_authorization(
self,
request: Union[certificate_manager.DeleteDnsAuthorizationRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single DnsAuthorization.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_delete_dns_authorization():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.DeleteDnsAuthorizationRequest(
name="name_value",
)
# Make the request
operation = client.delete_dns_authorization(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.DeleteDnsAuthorizationRequest, dict]):
The request object. Request for the
`DeleteDnsAuthorization` method.
name (str):
Required. A name of the dns authorization to delete.
Must be in the format
``projects/*/locations/*/dnsAuthorizations/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.DeleteDnsAuthorizationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.DeleteDnsAuthorizationRequest):
request = certificate_manager.DeleteDnsAuthorizationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_dns_authorization]
# Certain fields should be provided within the metadata | |
from numpy import random
from model import *
from data import *
from custom_metrics import *
from pre_processing_data import *
from pathlib import Path
import sys
import argparse
import os
import os.path
import glob
import traceback
import math
from datetime import datetime
import pytz
import tensorflow as tf
from tensorflow.keras.callbacks import *
class UnetHelper:
# training vars
model = None
batch_size = 1
steps_per_epoch = 25
epochs = 5
# image sizes
target_size = (752, 1008)
input_shape = (752, 1008, 3)
# paths
base_folder = '/Users/charles/Downloads/mestrado/hedychium_coronarium/all_splits/hedychium_coronarium/'
train_folder = base_folder + "train/"
augmentation_folder = train_folder + "aug/"
validation_folder = base_folder + "val/"
test_folder = base_folder + "test/"
image_folder = "images"
label_folder = "masks"
patience = 5
tz = pytz.timezone("Brazil/East")
start_time = datetime.now(tz=tz)
path = datetime.now().strftime("%Y.%m.%d_%H%M%S")
my_train_gene = None
my_validation_gene = None
flag_multi_class = False
early_stopping_monitor = "val_auc"
early_stopping_monitor_mode = "max"
class_weight = None
model_monitor = "val_auc"
model_monitor_mode = "max"
validation_steps = 10
use_numpy = False
learning_rate = 1e-4
momentum = 0.90
use_sgd = False
use_euclidean = True
check_train_class_weights = False
use_augmentation = True
use_splits = False
def main(self, args):
if args.t == -2:
self.playground()
if args.t == -1:
self.show_arguments()
if args.t == 0:
self.train(args)
elif args.t == 1:
self.test(args)
elif args.t == 2:
self.show_summary(args)
elif args.t == 3:
self.show_summary(args)
elif args.t == 4:
self.get_fbetaScore(args.b, args.p, args.r)
elif args.t == 5:
PreProcessingData.crop_images_in_tiles(self.base_folder,
self.train_folder,
self.validation_folder,
self.test_folder,
self.image_folder,
self.label_folder,
256,
256,
threshold = 20,
force_delete = False)
elif args.t == 6:
PreProcessingData.crop_all_images_in_tiles(self.base_folder,
self.image_folder,
self.label_folder,
2000,
1500,
threshold = 5,
force_delete = False,
validate_class_to_discard = True,
move_ignored_to_test = False)
elif args.t == 7:
PreProcessingData.get_train_class_weights('../../datasets/all', use_splits=self.use_splits)
elif args.t == 8:
self.compare_result()
elif args.t == 9:
PreProcessingData.copy_exif_from_image_to_mask(self.base_folder, self.image_folder, self.label_folder)
def playground(self):
import PIL
from PIL import Image
qtd, imgs = self.get_files_count(self.train_folder + self.image_folder)
for i, file_name in enumerate(imgs):
img = self.load_img("images", file_name, False)
y_true = self.load_img("masks", file_name)
y_pred_fake = self.generate_fake_img(self.load_img("masks", file_name))
tdi = CustomMetricsAndLosses.transformada_distancia_invertida(y_true, y_pred_fake)
tdi_img = tdi
tdi = (tdi / 255).astype('float32')
mse = np.mean(np.square(y_pred_fake - y_true), axis=-1).astype('float32')
tdi = np.mean(tdi[:,:], axis=-1)
loss = mse + (CustomMetricsAndLosses.alpha * tdi)
print(f'loss: {loss}')
tdi[tdi>0.5] = 255
tdi[tdi<=0.5] = 0
tdi = tdi.astype('uint8')
loss2 = np.zeros( loss.shape + (3,) )
loss2[:,:,0] = loss
loss2[:,:,1] = loss
loss2[:,:,2] = loss
loss2[loss2>0.5] = 255
loss2[loss2<=0.5] = 0
loss2 = loss2.astype('uint8')
w = 256
h = 256
composed_img = Image.new('RGB', (w*3, h*3), color="gray")
composed_img.paste(Image.fromarray(img, 'RGB'), (0, 0))
composed_img.paste(Image.fromarray(y_true, 'RGB'), (w, 0))
composed_img.paste(Image.fromarray(y_pred_fake, 'RGB'), (w*2, 0))
composed_img.paste(Image.fromarray(self.norm_img_in_black_white(tdi_img), 'RGB'), (0, h))
composed_img.paste(Image.fromarray(self.norm_img_in_black_white(tdi), 'L'), (w, h))
composed_img.paste(Image.fromarray(self.norm_img_in_black_white(loss), 'L'), (w*2, h))
composed_img.paste(Image.fromarray(self.norm_img_in_black_white(loss2), 'RGB'), (0, h*2))
composed_img.show()
if(i>10):
break
def load_img(self, file_type, file_name, norm_img_bw = True):
img = io.imread(os.path.join(self.train_folder, file_type, file_name), as_gray=False)
if(norm_img_bw):
img = self.norm_img_in_black_white(img)
return img
def norm_img_in_black_white(self, img, rgb=True):
norm = img
norm[norm>127.5] = 255
norm[norm<=127.5] = 0
if(rgb):
return norm.astype('uint8')
else:
return norm[:,:,0].astype('uint8') # 2 dims
def generate_fake_img(self, img):
from random import randrange
# change some pixels (5%)
for i in range(0, 3250):
x = randrange(256) # 256x256px
y = randrange(256) # 256x256px
color = randrange(255) # 0 - 255
img[x][y] = color
img[x][y] = color
img[x][y] = color
return img
def generate_fake_predict(self, mask):
y_pred_fake = np.random.rand(256, 256, 3) * mask
y_pred_fake[y_pred_fake >= 127.5] = 255
y_pred_fake[y_pred_fake < 127.5] = 0
y_pred_fake = np.abs(np.mean(y_pred_fake, axis=2) > 0.5) * 255
return y_pred_fake
def show_arguments(self):
print("batch_size: ", self.batch_size)
print("target_size: ", self.target_size)
print("input_shape: ", self.input_shape)
print("steps_per_epoch: ", self.steps_per_epoch)
print("epochs: ", self.epochs)
print("base_folder: ", self.base_folder)
print("train_folder: ", self.train_folder)
print("validation_folder: ", self.validation_folder)
print("augmentation_folder: ", self.augmentation_folder)
print("test_folder: ", self.test_folder)
print("image_folder: ", self.image_folder)
print("label_folder: ", self.label_folder)
print("patience: ", self.patience)
print("flag_multi_class: ", self.flag_multi_class)
print("early_stopping_monitor: ", self.early_stopping_monitor)
print("early_stopping_monitor_mode: ", self.early_stopping_monitor_mode)
print("class_weight: ", self.class_weight)
print("model_monitor: ", self.model_monitor)
print("model_monitor_mode: ", self.model_monitor_mode)
print("validation_steps: ", self.validation_steps)
print("use_numpy: ", self.use_numpy)
print("learning_rate: ", self.learning_rate)
print("momentum:", self.momentum)
print("use_sgd:", self.use_sgd)
print("use_euclidean:", self.use_euclidean)
print("check_train_class_weights", self.check_train_class_weights)
print("use_augmentation", self.use_augmentation)
print("use_splits", self.use_splits)
def set_arguments(
self,
batch_size=2,
steps_per_epoch=50,
epochs=15,
target_size=(256, 256),
input_shape=(256, 256, 3),
base_folder="../hedychium_coronarium/",
image_folder="images",
label_folder="masks",
patience=5,
flag_multi_class=False,
early_stopping_monitor="val_mean_iou",
early_stopping_monitor_mode ="auto",
model_monitor = "val_binary_accuracy",
model_monitor_mode = "auto",
class_weights = None,
validation_steps=200,
use_numpy = False,
learning_rate = 1e-4,
momentum = 0.90,
use_sgd = False,
use_euclidean = False,
check_train_class_weights = False,
use_augmentation = False,
use_splits = False,
):
self.batch_size = batch_size
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.target_size = target_size
self.input_shape = input_shape
self.base_folder = base_folder
self.train_folder = base_folder + "train_splits/"
self.augmentation_folder = self.train_folder + "aug/"
self.validation_folder = base_folder + "val_splits/"
self.test_folder = base_folder + "test_splits/"
self.image_folder = image_folder
self.label_folder = label_folder
self.patience = patience
self.flag_multi_class = flag_multi_class
self.early_stopping_monitor = early_stopping_monitor
self.early_stopping_monitor_mode = early_stopping_monitor_mode
self.model_monitor = model_monitor
self.model_monitor_mode = model_monitor_mode
self.class_weights = class_weights
self.validation_steps = validation_steps
self.use_numpy = use_numpy
self.learning_rate = learning_rate
self.momentum = momentum
self.use_sgd = use_sgd
self.use_euclidean = use_euclidean
self.check_train_class_weights = check_train_class_weights
self.use_augmentation = use_augmentation
self.use_splits = use_splits
def get_folder_name(self, base_path):
now = datetime.now()
self.path = now.strftime("%Y%m%d_%H%M%S")
Path(base_path).mkdir(parents=True, exist_ok=True)
return base_path
def get_files_count(
self,
path,
ext=".JPG"
):
parts = len(path.split("/"))
imgs = glob.glob(path + "/*" + ext)
for i, item in enumerate(imgs):
imgs[i] = imgs[i].split("/")[parts]
return len(imgs), imgs
def get_some_weight(
self,
path='train_weights/',
ext='.hdf5'
):
files = glob.glob(path + "/*" + ext)
total = len(files)
last_file_aux = 0
last_file = None
for i, item in enumerate(files):
print(f'{i}/{total-1} - {item}')
arr = item.replace('/','_').split('_')
file_name_date = int(arr[len(arr)-3] + arr[len(arr)-2])
if(not last_file or file_name_date > int(last_file_aux)):
last_file_aux = file_name_date
last_file = item
return last_file
def arguments(
self,
):
# show options, get arguments and validate
parser = argparse.ArgumentParser(description="Informe os parametros:")
parser.add_argument(
"--t",
default=0,
type=int,
help="Informe o tipo '--t -1' parametros, '--t 0' treino, '--t 1' teste, '--t 2' sumario', '--t 3' avaliacao, '--t 4' f-beta-score",
)
parser.add_argument(
"--g",
default=0,
type=int,
help="Gerar arquivos '--g 0' para nao gerar arquivos ou '--g 1' para gerar",
)
parser.add_argument(
"--q",
default=0,
type=int,
help="Quantidade de arquivos para teste '--q 0' para nao gerar arquivos ou '--q 1' para gerar",
)
parser.add_argument(
"--n",
default=None,
type=str,
help="Informe o nome do arquivo de pesos para executar o teste ou ler o sumario",
)
parser.add_argument(
"--b",
default=None,
type=float,
help="Informe o beta para calcular o f-beta score",
)
parser.add_argument(
"--p",
default=None,
type=float,
help="Informe o precision para calcular o f-beta score",
)
parser.add_argument(
"--r",
default=None,
type=float,
help="Informe o recall para calcular o f-beta score",
)
args = parser.parse_args()
if args.t != -1 and args.t != 0 and args.t != 1 and args.t != 2 and args.t != 4:
print(
"Tipo invalido! Informe o tipo corretamente: --t -1' parametros, '--t 0' para treino, '--t 1' para teste', '--t 2' para exibir o sumario, '--t 4' f-beta-score"
)
sys.exit()
if args.g != 0 and args.g != 1:
print(
"Parametro para a geracao de arquivos invalido! Informe corretamente: '--g 0' para nao gerar arquivos ou '--g 1' para gerar"
)
sys.exit()
if (args.t == 2) and not args.n:
print("Parametro invalido! Informe corretamente: '--n [file_name]'")
sys.exit()
if (args.t == 4) and (not args.b or not args.p or not args.r):
print(
"Parametro invalido! Informe corretamente: '--b [beta], --p [precision], --r [recall]'"
)
sys.exit()
return args
def generate_my_gen(self, args):
data_gen_args = dict()
if(self.use_augmentation):
data_gen_args = dict(
zoom_range = [0.90, 1.10], # alterar
brightness_range = [0.90, 1.10], # alterar
shear_range = 0.05,
#width_shift_range=[-0.2, 0.2],
#height_shift_range=[-0.2, 0.2],
horizontal_flip=True, # remover
vertical_flip=True, # remover
fill_mode="wrap"# remover
)
else:
print('without augmentation')
save_to_dir = None
if args.g != 0:
save_to_dir = self.get_folder_name(self.augmentation_folder)
if (not self.use_numpy):
self.my_train_gene = Data.data_generator(
self.batch_size,
self.train_folder,
self.image_folder,
self.label_folder,
data_gen_args,
flag_multi_class=args.flag_multi_class,
target_size=self.target_size,
image_color_mode="rgb",
save_to_dir=save_to_dir
)
self.my_validation_gene = Data.data_generator(
self.batch_size,
self.validation_folder,
self.image_folder,
self.label_folder,
data_gen_args,
flag_multi_class=args.flag_multi_class,
target_size=self.target_size,
image_color_mode="rgb",
save_to_dir=save_to_dir)
return (self.my_train_gene, self.my_validation_gene)
else:
self.my_train_gene_npy = Data.gene_data_npy(self.train_folder, flag_multi_class=args.flag_multi_class)
self.my_validation_gene_npy = Data.gene_data_npy(self.validation_folder, flag_multi_class=args.flag_multi_class)
return (self.my_train_gene_npy, self.my_validation_gene_npy)
def train(self, args, generator_train = None, generator_val = None):
# define TensorBoard directory and TensorBoard callback
tb_cb = self.create_tensor_board_callback()
# just for debugging
#tf.config.run_functions_eagerly(True)
try:
self.show_execution_time(original_msg="Starting now...", write_in_file=True)
model = self.get_model(pretrained_weights=args.n)
if(not generator_train or not generator_val):
(generator_train, generator_val) = self.generate_my_gen(args)
earlystopper = EarlyStopping(
patience=self.patience,
verbose=1,
monitor=self.early_stopping_monitor,
mode=self.early_stopping_monitor_mode,
)
model_checkpoint = ModelCheckpoint(
f"train_weights/{self.path}_unet.hdf5",
monitor=self.model_monitor,
mode=self.model_monitor_mode,
verbose=1,
save_best_only=True,
save_weights_only=False
)
| |
s.isSignal(s.schedule[5])
return 1
def testIsCheckpoint():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
assert os.path.exists(td)
test17 = os.path.join(td, "17.sched")
assert os.path.exists(test17)
s = Schedule(test17)
assert s.getNumberOfCheckpoints() == 1
assert not s.isCheckpoint(s.schedule[0])
assert not s.isCheckpoint(s.schedule[1])
assert not s.isCheckpoint(s.schedule[2])
assert not s.isCheckpoint(s.schedule[3])
assert s.isCheckpoint(s.schedule[4])
assert not s.isCheckpoint(s.schedule[5])
assert not s.isCheckpoint(s.schedule[6])
return 1
def testConsolidateForward():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
#sched = [2, 0 ,0, 1, 1, 1, 0]
assert os.path.exists(td)
test5 = os.path.join(td, "05.sched")
assert os.path.exists(test5)
s = Schedule(test5)
newsched = s.consolidateFrontierForward(0)
assert newsched != Schedule()
assert newsched.getThreadOrder() == ["0", "2", "1", "0"]
assert len(newsched.schedule) == 7
assert newsched.schedule[0].chosen == "0"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "2"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "1"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "0"
assert newsched.schedule[2].addr == s.schedule[3].addr
assert newsched.schedule[3].addr == s.schedule[1].addr
s = Schedule(test5)
newsched = s.consolidateFrontierForward(1)
assert newsched == Schedule()
s = Schedule(test5)
newsched = s.consolidateFrontierForward(2)
assert newsched == Schedule()
s = Schedule(test5)
newsched = s.consolidateFrontierForward(3)
assert newsched != Schedule()
assert newsched.getThreadOrder() == ["0", "2", "0", "1"]
assert len(newsched.schedule) == 7
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "0"
assert newsched.schedule[4].chosen == "1"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "1"
assert newsched.schedule[6].addr == s.schedule[5].addr
assert newsched.schedule[4].addr == "0"
s = Schedule(test5)
newsched = s.consolidateFrontierForward(4)
assert newsched == Schedule()
s = Schedule(test5)
newsched = s.consolidateFrontierForward(5)
assert newsched == Schedule()
s = Schedule(test5)
newsched = s.consolidateFrontierForward(6)
assert newsched == Schedule()
assert os.path.exists(td)
test9 = os.path.join(td, "09.sched")
assert os.path.exists(test9)
s = Schedule(test9)
newsched = s.consolidateFrontierForward(5)
assert newsched != Schedule()
assert newsched.getThreadOrder() == ["0", "2", "0", "1", "3", "1", "0"]
assert len(newsched.schedule) == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "3"
assert newsched.schedule[6].chosen == "1"
assert newsched.schedule[7].chosen == "0"
assert newsched.schedule[6].addr == s.schedule[7].addr
assert newsched.schedule[7].addr == s.schedule[6].addr
return 1
def testConsolidateReverse():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
assert os.path.exists(td)
test9 = os.path.join(td, "09.sched")
assert os.path.exists(test9)
s = Schedule(test9)
length = s.getScheduleLength() - 1
assert length == 7
newsched = s.consolidateFrontierBackward(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "1", "3", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "1"
assert newsched.schedule[2].chosen == "3"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "0"
assert newsched.schedule[6].chosen == "0"
assert newsched.schedule[7].chosen == "0"
assert newsched.schedule[5].addr == s.schedule[7].addr
assert newsched.schedule[1].addr == s.schedule[1].addr
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "1"
assert newsched.schedule[5].chosen == "3"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
assert newsched.schedule[5].addr == s.schedule[6].addr
assert newsched.schedule[4].addr == s.schedule[4].addr
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "3", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "3"
assert newsched.schedule[4].chosen == "1"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
assert newsched.schedule[4].addr == s.schedule[5].addr
assert newsched.schedule[3].addr == s.schedule[3].addr
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched == Schedule()
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched == Schedule()
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched == Schedule()
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched == Schedule()
length -= 1
newsched = s.consolidateFrontierBackward(length)
assert newsched == Schedule()
try:
length -= 1
newsched = s.consolidateFrontierBackward(length)
except:
pass
else:
assert False
return 1
def testBlockExtend():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
assert os.path.exists(td)
test14 = os.path.join(td, "14.sched")
assert os.path.exists(test14)
#frontier = 0
#start : 0,2,0,0,1,3,1,3,0
#end : 0,0,0,0,1,3,1,3,0
s = Schedule(test14)
length = 0
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "1", "3", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "0"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
#frontier = 1
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,2,2,1,3,1,3,0
length += 1
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "1", "3", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "2"
assert newsched.schedule[2].chosen == "2"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
#frontier = 2
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,1,3,0
length += 1
newsched = s.blockExtend(length)
assert newsched == Schedule()
#frontier = 3
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,0,3,1,3,0
length += 1
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "3", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "0"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
#frontier = 4
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,1,3,0
length += 1
newsched = s.blockExtend(length)
assert newsched == Schedule()
#frontier = 5
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,3,3,0
length += 1
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "1", "3", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "3"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "0"
#frontier = 6
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,1,1,0
length += 1
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "1", "3", "1", "0"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "1"
assert newsched.schedule[7].chosen == "0"
#frontier = 7
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,1,3,3
length += 1
newsched = s.blockExtend(length)
assert newsched != Schedule()
assert newsched.getScheduleLength() == s.getScheduleLength()
assert newsched.getThreadOrder() == ["0", "2", "0", "1", "3", "1", "3"]
assert newsched.getScheduleLength() == 8
assert newsched.schedule[0].chosen == "2"
assert newsched.schedule[1].chosen == "0"
assert newsched.schedule[2].chosen == "0"
assert newsched.schedule[3].chosen == "1"
assert newsched.schedule[4].chosen == "3"
assert newsched.schedule[5].chosen == "1"
assert newsched.schedule[6].chosen == "3"
assert newsched.schedule[7].chosen == "3"
#frontier = 8
#start : 0,2,0,0,1,3,1,3,0
#end : 0,2,0,0,1,3,1,3,0
length += 1
try:
newsched = s.blockExtend(length)
except:
pass
else:
assert False
return 1
#list slicing does shallow copies--oops
def testFailureOne():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
assert os.path.exists(td)
test9 = os.path.join(td, "09.sched")
assert os.path.exists(test9)
s = Schedule(test9)
length = s.getScheduleLength() - 1
assert length == 7
newsched = s.consolidateFrontierBackward(length)
assert newsched != Schedule()
assert s.schedule[0].caller != "5"
newsched.schedule[0].caller = "5"
assert newsched.schedule[0].caller == "5"
assert s.schedule[0].caller != "5"
return 1
def testFailureTwo():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
assert os.path.exists(td)
test5 = os.path.join(td, "05.sched")
assert os.path.exists(test5)
s = Schedule(test5)
newsched = s.consolidateFrontierForward(3)
assert newsched != Schedule()
assert s.schedule[0].caller != "banana"
newsched.schedule[0].caller = "banana"
assert newsched.schedule[0].caller == "banana"
assert s.schedule[0].caller != "banana"
return 1
def testFailureThree():
tr = os.environ.get("THRILLE_ROOT")
assert tr is not None
td = os.path.join(tr, "tests", "fwdrev")
test5 = os.path.join(td, "05.sched")
assert os.path.exists(test5)
s = Schedule(test5)
tmp = s.removeFinalBlockOfThread("1")
assert tmp != Schedule()
assert s.schedule[0].addr != "shallow_copy_fail"
tmp.schedule[0].addr = "shallow_copy_fail"
assert tmp.schedule[0].addr == "shallow_copy_fail"
assert s.schedule[0].addr != "shallow_copy_fail"
return | |
by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
"""
if not self.layerType == "esriNAServerServiceAreaLayer":
raise ValueError("The solveServiceArea operation is supported on a network "
"layer of Service Area type only")
url = self._url + "/solveServiceArea"
params = {
"f" : "json",
"facilities": facilities
}
if not barriers is None:
params['barriers'] = barriers
if not polylineBarriers is None:
params['polylineBarriers'] = polylineBarriers
if not polygonBarriers is None:
params['polygonBarriers'] = polygonBarriers
if not travelMode is None:
params['travelMode'] = travelMode
if not attributeParameterValues is None:
params['attributeParameterValues'] = attributeParameterValues
if not defaultBreaks is None:
params['defaultBreaks'] = defaultBreaks
if not excludeSourcesFromPolygons is None:
params['excludeSourcesFromPolygons'] = excludeSourcesFromPolygons
if not mergeSimilarPolygonRanges is None:
params['mergeSimilarPolygonRanges'] = mergeSimilarPolygonRanges
if not outputLines is None:
params['outputLines'] = outputLines
if not outputPolygons is None:
params['outputPolygons'] = outputPolygons
if not overlapLines is None:
params['overlapLines'] = overlapLines
if not overlapPolygons is None:
params['overlapPolygons'] = overlapPolygons
if not splitLinesAtBreaks is None:
params['splitLinesAtBreaks'] = splitLinesAtBreaks
if not splitPolygonsAtBreaks is None:
params['splitPolygonsAtBreaks'] = splitPolygonsAtBreaks
if not trimOuterPolygon is None:
params['trimOuterPolygon'] = trimOuterPolygon
if not trimPolygonDistance is None:
params['trimPolygonDistance'] = trimPolygonDistance
if not trimPolygonDistanceUnits is None:
params['trimPolygonDistanceUnits'] = trimPolygonDistanceUnits
if not returnFacilities is None:
params['returnFacilities'] = returnFacilities
if not returnBarriers is None:
params['returnBarriers'] = returnBarriers
if not returnPolylineBarriers is None:
params['returnPolylineBarriers'] = returnPolylineBarriers
if not returnPolygonBarriers is None:
params['returnPolygonBarriers'] = returnPolygonBarriers
if not outSR is None:
params['outSR'] = outSR
if not accumulateAttributeNames is None:
params['accumulateAttributeNames'] = accumulateAttributeNames
if not impedanceAttributeName is None:
params['impedanceAttributeName'] = impedanceAttributeName
if not restrictionAttributeNames is None:
params['restrictionAttributeNames'] = restrictionAttributeNames
if not restrictUTurns is None:
params['restrictUTurns'] = restrictUTurns
if not outputGeometryPrecision is None:
params['outputGeometryPrecision'] = outputGeometryPrecision
if not outputGeometryPrecisionUnits is None:
params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits
if not useHierarchy is None:
params['useHierarchy'] = useHierarchy
if not timeOfDay is None:
params['timeOfDay'] = timeOfDay
if not timeOfDayIsUTC is None:
params['timeOfDayIsUTC'] = timeOfDayIsUTC
if not travelDirection is None:
params['travelDirection'] = travelDirection
if not returnZ is None:
params['returnZ'] = returnZ
if method.lower() == "post":
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class ClosestFacilityNetworkLayer(NetworkLayer):
"""
The Closest Facility Network Layer which has common properties of Network
Layer as well as some attributes unique to Closest Facility Network Layer
only.
"""
#specific to Closest Facility
_directionsLanguage = None
_directionsLengthUnits = None
_outputLineType = None
_timeOfDayIsUTC = None
_travelDirection = None
_defaultCutoffValue = None
_facilityCount = None
_directionsTimeAttribute = None
_timeOfDayUsage = None
_directionsSupportedLanguages = None
_directionsStyleNames = None
_timeOfDay = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
proxy_url=None, proxy_port=None,
initialize=False):
"""initializes all properties"""
NetworkLayer.__init__(self, url)
#----------------------------------------------------------------------
def __init(self):
""" initializes all the properties """
params = {
"f" : "json"
}
# TODO handle spaces in the url, 'Closest Facility' should be 'Closest+Facility'
self._url = self._url.replace(' ','+')
json_dict = self._get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print (k, " - attribute not implemented in ClosestFacilityNetworkLayer.")
del k,v
#----------------------------------------------------------------------
@property
def directionsLanguage(self):
if self._directionsLanguage is None:
self.__init()
return self._directionsLanguage
#----------------------------------------------------------------------
@property
def directionsLengthUnits(self):
if self._directionsLengthUnits is None:
self.__init()
return self._directionsLengthUnits
#----------------------------------------------------------------------
@property
def outputLineType(self):
if self._outputLineType is None:
self.__init()
return self._outputLineType
#----------------------------------------------------------------------
@property
def timeOfDayIsUTC(self):
if self._timeOfDayIsUTC is None:
self.__init()
return self._timeOfDayIsUTC
#----------------------------------------------------------------------
@property
def travelDirection(self):
if self._travelDirection is None:
self.__init()
return self._travelDirection
#----------------------------------------------------------------------
@property
def defaultCutoffValue(self):
if self._defaultCutoffValue is None:
self.__init()
return self._defaultCutoffValue
#----------------------------------------------------------------------
@property
def facilityCount(self):
if self._facilityCount is None:
self.__init()
return self._facilityCount
#----------------------------------------------------------------------
@property
def directionsTimeAttribute(self):
if self._directionsTimeAttribute is None:
self.__init()
return self._directionsTimeAttribute
#----------------------------------------------------------------------
@property
def timeOfDayUsage(self):
if self._timeOfDayUsage is None:
self.__init()
return self._timeOfDayUsage
#----------------------------------------------------------------------
@property
def directionsSupportedLanguages(self):
if self._directionsSupportedLanguages is None:
self.__init()
return self._directionsSupportedLanguages
#----------------------------------------------------------------------
@property
def directionsStyleNames(self):
if self._directionsStyleNames is None:
self.__init()
return self._directionsStyleNames
#----------------------------------------------------------------------
@property
def timeOfDay(self):
if self._timeOfDay is None:
self.__init()
return self._timeOfDay
#----------------------------------------------------------------------
def solveClosestFacility(self,incidents,facilities,method="POST",
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
travelMode=None,
attributeParameterValues=None,
returnDirections=None,
directionsLanguage=None,
directionsStyleName=None,
directionsLengthUnits=None,
directionsTimeAttributeName=None,
returnCFRoutes=True,
returnFacilities=False,
returnIncidents=False,
returnBarriers=False,
returnPolylineBarriers=False,
returnPolygonBarriers=False,
outputLines=None,
defaultCutoff=None,
defaultTargetFacilityCount=None,
travelDirection=None,
outSR=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
useHierarchy=True,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None,
timeOfDay=None,
timeOfDayIsUTC=None,
timeOfDayUsage=None,
returnZ=False):
"""The solve operation is performed on a network layer resource of
type closest facility (layerType is esriNAServerClosestFacilityLayer).
You can provide arguments to the solve route operation as query
parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis.
incidents - The set of incidents loaded as network locations
during analysis. Incidents can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If incidents are not specified,
preloaded incidents from the map document are used
in the analysis.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple comma
/ semi-colon based syntax or as a JSON structure. If
barriers are not specified, preloaded barriers from the
map document are used in the analysis. If an empty json
object is passed ('{}') preloaded barriers are ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}')
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}') preloaded
polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the routing
service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
returnDirections - If true, directions will be generated and returned
with the analysis results. Default is true.
directionsLanguage - The language to be used when computing directions.
The default is as defined in the network layer. The
list of supported languages can be found in REST
layer description.
directionsOutputType - Defines content, verbosity of returned
directions. The default is esriDOTStandard.
Values: esriDOTComplete | esriDOTCompleteNoEvents
| esriDOTInstructionsOnly | esriDOTStandard |
esriDOTSummaryOnly
directionsStyleName - The style to be used when returning the directions.
The default is as defined in the network layer. The
list of supported styles can be found in REST
layer description.
directionsLengthUnits - The length units to use when computing directions.
The default is as defined in the network layer.
Values: esriNAUFeet | esriNAUKilometers |
esriNAUMeters | esriNAUMiles |
esriNAUNauticalMiles | esriNAUYards |
esriNAUUnknown
directionsTimeAttributeName - The name of network attribute to use for
the drive time when computing directions.
The default is as defined in the network
layer.
returnCFRoutes - If true, closest facilities routes will be returned
with the analysis results. Default is true.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnIncidents - If true, incidents will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers | |
Evolve a one dimensional MPS, MPO or PMPS state using tMPS as described in
the module's documentation.
The initial state, Hamiltonians and certain parameters are required. The
output is a list of times and a list of the evolved states at these times.
Those states might be subsystems of the whole evolved system,
which allows for the user to keep memory consumption small by
focusing on the subsystems of interest.
.. todo::
Raise exception if hamiltonians are not of the right dimension
.. todo::
Implement tracking of compression errors.
.. todo::
Get variable compression to work (might involve changing mpnum).
Args:
state (mpnum.MPArray):
The state to be evolved in time. The state has to be an MPS, MPO or
PMPS, depending on which method is chosen
hamiltonians (list):
Either a list containing the Hamiltonian acting on every single site
and the Hamiltonian acting on every two adjacents sites, like
``[H_i, H_ij]``, or a list containing a list of Hamiltonians acting
on the single sites and a list of Hamiltonians acting on each two
adjacent sites, like ``[[h1, h2, h3, ...], [h12, h23, h34, ...]]``
num_trotter_slices (int):
Number of Trotter slices to be used for evolution over time equal to
the largest t in ts.
method (str):
Which method to use. Either 'mps', 'mpo' or 'pmps'.
trotter_order (int):
Order of Trotter-Suzuki decomposition to be used. Currently only 2
and 4 are implemented
ts (list[float]):
The times for which the evolution should be computed and the state
of the full system or a subsystem returned (i.e. it's reduced
density matrix). The algorithm will calculate the
evolution using the given number of Trotter steps for the largest
number in ts. On the way there it will store the evolved states for
smaller times. NB: Beware of memory overload since len(t) number of
mpnum.MPArrays will be stored
trotter_compr (dict):
Compression parameters used in the iterations of Trotter (in the
form required by :func:`mpnum.MPArray.compress`. If unsure, look at
https://github.com/dseuss/mpnum/blob/master/examples/mpnum_intro.ipynb .)
If omitted, some default compression will be used that will
probably work but might lead to problems. See
:func:`_set_compr_params` for more information.
compr (dict):
Parameters for the compression which is executed on every MPA during
the calculations, except for the Trotter calculation, where
trotter_compr is used. compr = dict(method='svd', rank=10) would for
example ensure that the ranks of any MPA never exceed 10 during all
of the calculations. An accepted relative error for the
compression can be provided in addition to or instead of ranks,
which would lead to e.g.
compr = dict(method='svd', rank=10, relerr=1e-12).
If omitted, some default compression will be used that will
probably work but might lead to problems. See
:func:`_set_compr_params` for more information.
subsystems (list):
A list defining for which subsystem the reduced density matrix or
whether the full state should be returned for a time in ``ts``.
This can be a list of the length same as that of ``ts`` looking
like ``[[a1, b1], [a2, b2], ...]`` or just a list like ``[a, b]``.
In the first case the respective subsystem for every entry in ts
will be returned, in the second case the same subsystem will be
returned for all entries in ``ts``. ``[a, b]`` will lead to a
return of the reduced density matrix of the sites from ``a`` up to,
but not including, ``b``. For example ``[0, 3]`` if the reduced
density matrix of the first three sites should be returned. A time
can occur twice in ``ts`` and then different subsystems to be
returned can be defined for that same time. If this parameter is
omitted, the full system will be returned for every time in ``ts``.
v (int):
Level of verbose output. 0 means no output, 1 means that some
basic output showing the progress of calculations is produced. 2
will in addition show the bond dimensions of the state after every
couple of iterations, 3 will show bond dimensions after every
Trotter iteration.
Returns:
list[list[float], list[list[int]], list[mpnum.MPArray]]:
A list with five items: (i) The list of times for which the density
matrices have been computed (ii) The list indicating which
subsystems of the system are returned at the respective time of the
first list (iii) The list of density matrices as MPO or PMPS as
mpnum.MPArray, depending on the input "method". If that was MPS, the
full states will still be MPSs, the reduced ones will be MPOs.
"""
if compr is None: compr, _ = _set_compr_params()
if trotter_compr is None: _, trotter_compr = _set_compr_params()
state.compress(**compr)
state = normalize(state, method)
if len(state) < 3:
raise ValueError("State has too few sites")
if (np.array(ts) == 0).all():
raise ValueError(
"No time evolution requested by the user. Check your input 't'")
if subsystems == None:
subsystems = [0, len(state)]
step_numbers, tau = _times_to_steps(ts, num_trotter_slices)
subsystems = _get_subsystems_list(subsystems, len(step_numbers))
us = _trotter_slice(hamiltonians=hamiltonians, tau=tau,
num_sites=len(state), trotter_order=trotter_order,
compr=compr)
if v != 0:
print("Time evolution operator for Trotter slice calculated, "
"starting "
"Trotter iterations...")
return _time_evolution(state, us, step_numbers, subsystems, tau, method,
trotter_compr, v)
def _time_evolution(state, us, step_numbers, subsystems, tau, method,
trotter_compr, v):
"""
Implements time-evolution via Trotter-Suzuki decomposition
Args:
state (mpnum.MPArray):
The state to be evolved in time
us (list[mpnum.MPArray]):
List of ordered operator exponentials for a single Trotter slice
step_numbers (list[int]):
List of time steps as generated by :func:`_times_to_steps`
subsystems (list[list[int]]):
Sites for which the subsystem states should be returned at the
respective times
tau (float):
Duration of one Trotter slice. As defined in :func:`_times_to_steps`
method (str):
Which method to use. Either 'mps', 'mpo' or 'pmps'.
trotter_compr (dict):
Compression parameters used in the iterations of Trotter-Suzuki
decomposition.
v (int):
Level of verbose output. 0 means no output, 1 means that some
basic output showing the progress of calculations is produced. 2
will in addition show the bond dimensions of the state after every
couple of iterations, 3 will show bond dimensions after every
Trotter iteration.
Returns:
list[list[float], list[list[int]], list[mpnum.MPArray]:
A list with five items: (i) The list of times for which the density
matrices have been computed (ii) The list indicating which
subsystems of the system are returned at the respective time of the
first list (iii) The list of density matrices as MPO or PMPS as
mpnum.MPArray, depending on the input "method". If that was MPS, the
full states will still be MPSs, the reduced ones will be MPOs.
"""
c = Counter(step_numbers)
times = []
states = []
compr_errors = []
trot_errors = []
var_compression = False
if trotter_compr['method'] == 'var':
var_compression = True
accumulated_overlap = 1
accumulated_trotter_error = 0
for i in range(max(step_numbers) + 1):
for j in range(c[i]):
_append(times, states, compr_errors, trot_errors, tau, i, j,
step_numbers, subsystems, state, accumulated_overlap,
accumulated_trotter_error, method)
for u in us:
if var_compression:
trotter_compr['startmpa'] = mp.MPArray.copy(state)
state = mp.dot(u, state)
accumulated_overlap *= state.compress(**trotter_compr)
if method == 'mpo':
for u in us:
if var_compression:
trotter_compr['startmpa'] = mp.MPArray.copy(state)
state = mp.dot(state, u.T.conj())
accumulated_overlap *= state.compress(**trotter_compr)
state = normalize(state, method)
accumulated_trotter_error += tau ** 3
if (v == 1 or v == 2) and np.sqrt(i + 1) % 1 == 0 and i < \
step_numbers[-1]:
print(str(i + 1) + " Trotter iterations finished...")
if v == 2:
print("Ranks: " + str(state.ranks))
if v == 3 and i < step_numbers[-1]:
print(str(i + 1) + " Trotter iterations finished...")
print("Ranks: " + str(state.ranks))
if v != 0:
print("Done with time evolution")
return times, subsystems, states # , compr_errors, trot_errors
def _append(times, states, compr_errors, trot_errors, tau, i, j, step_numbers,
subsystems, state, accumulated_overlap,
accumulated_trotter_error, method):
"""
Function to append time evolved state etc to output of
:func:`_time_evolution`
Args:
times (list[float]):
List containing the times to which the states are evolved
states (list[mpnum.MPArray]):
List containing the evolved states
compr_errors (list[float]):
List containing the respective compression errors
trot_errors (list[float]):
List containing the respective Trotter errors
tau (float):
The time of one Trotter | |
<gh_stars>1-10
import pandas as pd
import os
import datetime
import numpy as np
import statsmodels.formula.api as sml
import QUANTAXIS as QA
import datetime
import numpy as np
import statsmodels.formula.api as sml
from QAStrategy.qastockbase import QAStrategyStockBase
import matplotlib.pyplot as plt
import scipy.stats as scs
import matplotlib.mlab as mlab
from easyquant.indicator.base import *
import json
from easyquant import MongoIo
import statsmodels.api as sm
from multiprocessing import Process, Pool, cpu_count, Manager
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor,as_completed
executor = ThreadPoolExecutor(max_workers=cpu_count() * 2)
mongo = MongoIo()
databuf_mongo = Manager().dict()
databuf_tdxfunc = Manager().dict()
pool_size = cpu_count()
# print("pool size=%d" % pool_size)
def tdx_base_func(data, code_list = None):
"""
准备数据
"""
# highs = data.high
# start_t = datetime.datetime.now()
# print("begin-tdx_base_func:", start_t)
CLOSE=data.close
C=data.close
花 = SLOPE(EMA(C, 3), 3)
神 = SLOPE(EMA(C, 7), 7)
买 = IFAND(COUNT(花 < 神, 5)==4 , 花 >= 神,1,0)
卖 = IFAND(COUNT(花 >= 神, 5)==4, 花 < 神,1,0)
钻石 = IFAND(CROSS(花, 神), CLOSE / REF(CLOSE, 1) > 1.03, 1, 0)
买股 = IFAND(买, 钻石,1,0)
# return pd.DataFrame({'FLG': 后炮}).iloc[-1]['FLG']
# return 后炮.iloc[-1]
# 斜率
data = data.copy()
# data['bflg'] = IF(REF(后炮,1) > 0, 1, 0)
data['bflg'] = 买股
data['sflg'] = 卖
# print("code=%s, bflg=%s" % (code, data['bflg'].iloc[-1]))
# data['beta'] = 0
# data['R2'] = 0
# beta_rsquared = np.zeros((len(data), 2),)
#
# for i in range(N - 1, len(highs) - 1):
# #for i in range(len(highs))[N:]:
# df_ne = data.iloc[i - N + 1:i + 1, :]
# model = sml.ols(formula='high~low', data = df_ne)
# result = model.fit()
#
# # beta = low
# beta_rsquared[i + 1, 0] = result.params[1]
# beta_rsquared[i + 1, 1] = result.rsquared
#
# data[['beta', 'R2']] = beta_rsquared
# 日收益率
data['ret'] = data.close.pct_change(1)
# 标准分
# data['beta_norm'] = (data['beta'] - data.beta.rolling(M).mean().shift(1)) / data.beta.rolling(M).std().shift(1)
#
# beta_norm = data.columns.get_loc('beta_norm')
# beta = data.columns.get_loc('beta')
# for i in range(min(M, len(highs))):
# data.iat[i, beta_norm] = (data.iat[i, beta] - data.iloc[:i - 1, beta].mean()) / data.iloc[:i - 1, beta].std() if (data.iloc[:i - 1, beta].std() != 0) else np.nan
# data.iat[2, beta_norm] = 0
# data['RSRS_R2'] = data.beta_norm * data.R2
# data = data.fillna(0)
#
# # 右偏标准分
# data['beta_right'] = data.RSRS_R2 * data.beta
# if code == '000732':
# print(data.tail(22))
return do_buy_sell_fun(data)
# return data
def do_tdx_func(key):
databuf_tdxfunc[key] = tdx_func(databuf_mongo[key])
def tdx_func(datam, code_list = None):
"""
准备数据
"""
# highs = data.high
start_t = datetime.datetime.now()
print("begin-tdx_func:", start_t)
dataR = pd.DataFrame()
if code_list is None:
code_list = datam.index.levels[1]
for code in code_list:
data=datam.query("code=='%s'" % code)
data = tdx_base_func(data)
if len(dataR) == 0:
dataR = data
else:
dataR = dataR.append(data)
end_t = datetime.datetime.now()
print(end_t, 'tdx_func spent:{}'.format((end_t - start_t)))
return dataR.sort_index()
def tdx_func_mp():
start_t = datetime.datetime.now()
print("begin-tdx_func_mp :", start_t)
pool = Pool(cpu_count())
for i in range(pool_size):
pool.apply_async(do_tdx_func, args=(i, ))
pool.close()
pool.join()
# todo begin
dataR = pd.DataFrame()
for i in range(pool_size):
if len(dataR) == 0:
dataR = databuf_tdxfunc[i]
else:
dataR = dataR.append(databuf_tdxfunc[i])
# print(len(dataR))
dataR.sort_index()
# todo end
end_t = datetime.datetime.now()
print(end_t, 'tdx_func_mp spent:{}'.format((end_t - start_t)))
return dataR
def buy_sell_fun(datam, code, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
price = datam.query("code=='%s'" % code)
# data = price.copy()
data = price.copy()
return do_buy_sell_fun(data)
def do_buy_sell_fun_nm(data, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
# price = datam.query("code=='%s'" % code)
# # data = price.copy()
# data = price.copy()
data['flag'] = 0 # 买卖标记
data['position'] = 0 # 持仓标记
data['hold_price'] = 0 # 持仓价格
bflag = data.columns.get_loc('bflg')
sflag = data.columns.get_loc('sflg')
# beta = data.columns.get_loc('beta')
flag = data.columns.get_loc('flag')
position_col = data.columns.get_loc('position')
close_col = data.columns.get_loc('close')
high_col = data.columns.get_loc('high')
open_col = data.columns.get_loc('open')
hold_price_col = data.columns.get_loc('hold_price')
position = 0 # 是否持仓,持仓:1,不持仓:0
sflg = 0
hdays = 0
for i in range(1,data.shape[0] - 1):
# 开仓
if position > 0:
hdays = hdays + 1
else:
hdays = 0
if data.iat[i, bflag] > 0 and position == 0:
data.iat[i, flag] = 1
data.iat[i, position_col] = 1
data.iat[i + 1, flag] = 1
data.iat[i + 1, position_col] = 1
data.iat[i + 1, hold_price_col] = data.iat[i+1, open_col]
position = 1
print("buy : date=%s code=%s price=%.2f" % (data.iloc[i+1].name[0], data.iloc[i+1].name[1], data.iloc[i+1].close))
# hdays = 0
elif data.iat[i, sflag] > 0 and position == 1:
cprice = data.iat[i, close_col]
# oprice = data.iat[i, open_col]
hold_price = data.iat[i, hold_price_col]
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
# 保持
else:
data.iat[i + 1, position_col] = data.iat[i, position_col]
data.iat[i + 1, hold_price_col] = data.iat[i, hold_price_col]
data['nav'] = (1+data.close.pct_change(1).fillna(0) * data.position).cumprod() - 1
return data
def do_buy_sell_fun(data, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
# price = datam.query("code=='%s'" % code)
# # data = price.copy()
# data = price.copy()
data['flag'] = 0 # 买卖标记
data['position'] = 0 # 持仓标记
data['hold_price'] = 0 # 持仓价格
bflag = data.columns.get_loc('bflg')
# beta = data.columns.get_loc('beta')
flag = data.columns.get_loc('flag')
position_col = data.columns.get_loc('position')
close_col = data.columns.get_loc('close')
high_col = data.columns.get_loc('high')
open_col = data.columns.get_loc('open')
hold_price_col = data.columns.get_loc('hold_price')
position = 0 # 是否持仓,持仓:1,不持仓:0
sflg = 0
hdays = 0
for i in range(1,data.shape[0] - 1):
# 开仓
if position > 0:
hdays = hdays + 1
else:
hdays = 0
if data.iat[i, bflag] > 0 and position == 0:
sflg = 0
if data.iat[i+1,open_col] < data.iat[i,close_col] * 1.05\
and data.iat[i+1,open_col] > data.iat[i,close_col]:
# if data.iat[i+1,open_col] > data.iat[i,close_col] * 1.07:
data.iat[i + 1, flag] = 1
data.iat[i + 1, position_col] = 1
data.iat[i + 1, hold_price_col] = data.iat[i+1, open_col]
position = 1
print("buy : date=%s code=%s price=%.2f" % (data.iloc[i+1].name[0], data.iloc[i+1].name[1], data.iloc[i+1].close))
# hdays = 0
else:
data.iat[i + 1, position_col] = data.iat[i, position_col]
data.iat[i + 1, hold_price_col] = data.iat[i, hold_price_col]
# pass
# 平仓
# elif data.iat[i, bflag] == S2 and position == 1:
elif data.iat[i, position_col] > 0 and position == 1:
cprice = data.iat[i, close_col]
# oprice = data.iat[i, open_col]
hold_price = data.iat[i, hold_price_col]
if cprice < hold_price * 0.95:
sflg = -1
elif cprice > hold_price * 1.1 and sflg <= 0:
sflg = 1
high_price = data.iat[i, high_col]
elif cprice > hold_price * 1.2 and sflg < 2:
sflg = 2
high_price = data.iat[i, high_col]
elif cprice > hold_price * 1.3 and sflg < 3:
sflg = 3
high_price = data.iat[i, high_col]
elif cprice > hold_price * 1.4 and sflg < 4:
sflg = 4
high_price = data.iat[i, high_col]
elif cprice > hold_price * 1.5 and sflg < 5:
sflg = 5
high_price = data.iat[i, high_col]
if sflg < 0:# or cprice > hprice * 1.2:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell -5 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 5 and high_price / cprice > 1.08:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell 50 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 4 and high_price / cprice > 1.07:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell 40 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 3 and high_price / cprice > 1.06:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell 30 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 2 and high_price / cprice > 1.05:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell 20 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 1 and high_price / cprice > 1.04:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position = 0
print("sell 10 : date=%s code=%s price=%.2f" % (data.iloc[i].name[0], data.iloc[i].name[1], data.iloc[i].close))
sflg = 0
elif sflg == 0 and hdays > 3:
data.iat[i, flag] = -1
data.iat[i + 1, position_col] = 0
data.iat[i + 1, hold_price_col] = 0
position | |
<filename>translate/translate_base.py
# coding: UTF-8
import re
import lxml.etree
import lxml.html
import html
import json
import os
import sys
import warnings
from collections import defaultdict
from docx import Document
from google.cloud.translate_v2 import Client
from openpyxl import load_workbook
from openpyxl.cell import Cell
from pptx import Presentation
from pptx.oxml import CT_TextLineBreak
CREDS = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
DOCX_STYLE_PROPERTY = {
'font': {
'color': {
'rgb': {}, 'theme_color': {}, 'type': {},
},
'name': {}, 'size': {}, 'bold': {}, 'italic': {}, 'underline': {}, 'strike': {},
'double_strike': {}, 'subscript': {}, 'superscript': {}, 'no_proof': {}, 'hidden': {},
'highlight_color': {}, 'math': {}, 'rtl': {}, 'outline': {}, 'emboss': {}, 'shadow': {}, 'imprint': {},
'all_caps': {}, 'small_caps': {}, 'web_hidden': {}, 'complex_script': {}, 'cs_bold': {}, 'cs_italic': {},
'snap_to_grid': {}, 'spec_vanish': {},
},
}
LANGUAGE_PROPERTIES = {
'en': {'lang': 'English'},
'fr': {'lang': 'French'},
'ja': {
'font': 'Hiragino Sans W3',
# could also use 'MS Gothic', and for Windows could only find 'Hiragino Sans GB W3'
'lang': 'Japanese',
'overwrite': {
' / ': '/', # what was this for?
},
}
}
TRANSLATION_RULES = {
'ignore_styles': ['Code', 'Normal-No spellcheck', 'Legal Text'],
}
class GoogleTranslate(object):
""" Establish a Google Cloud Translate client to translate passages of text. """
def __init__(self, creds, target_lang, source_lang=None, online=True, history=None, show=0):
self.client = Client.from_service_account_json(creds)
self.target_lang = target_lang
self.source_lang = source_lang
self.online = online
self.history = bool(history) and online
self.history_file = history
self.show = show
self.prepare_translation()
self.stats = defaultdict(dict)
def prepare_translation(self):
""" Reset stats; initialise translation dictionary using history if applicable and available. """
self.cloud_requests = 0
self.dummy_text = 0
self.dict_hits = 0
self.empty_strings = 0
self.translated = {}
if self.history:
if os.path.exists(self.history_file):
try:
with open(self.history_file, 'r') as f:
self.translated = json.load(f)
except (IOError, OSError):
fallback_history_file = os.path.join(os.environ['HOME'], 'history_file.json')
warnings.warn("'%s' is a directory - saving history in home directory as:\n '%s' " % (
self.history_file, fallback_history_file
))
self.history_file = fallback_history_file
def request_translation(self, string):
return self.client.translate(string, target_language=self.target_lang, source_language=self.source_lang)
def translate(self, string):
""" Translate a single text element. """
if string and string not in self.translated:
if self.show:
print("{}{}".format(string[:self.show], " ..." if len(string) > self.show else ""))
if self.online:
translation = self.request_translation(string)['translatedText']
# TODO: if not self.source_lang, look at ['detectedSourceLanguage']
translation = html.unescape(translation)
if self.target_lang in LANGUAGE_PROPERTIES and 'overwrite' in LANGUAGE_PROPERTIES[self.target_lang]:
for each in LANGUAGE_PROPERTIES[self.target_lang]['overwrite']:
if each in translation:
translation = LANGUAGE_PROPERTIES[self.target_lang]['overwrite'][each].join(
(translation.split(each))
)
self.translated.update({string: translation})
self.cloud_requests += 1
else:
self.translated.update({string: "%s(%s)" % (self.target_lang, string)})
self.dummy_text += 1
string = u'{}'.format(self.translated[string])
elif string:
string = u'{}'.format(self.translated[string])
self.dict_hits += 1
else:
self.empty_strings += 1
return string
def multi_line(self, string):
lines = string.split('\n')
return "\n".join([self.translate(line) for line in lines])
def save_history(self):
""" Save the translation dictionary as a history file. """
if self.history:
try:
with open(self.history_file, 'w') as f:
json.dump(self.translated, f)
except (IOError, OSError):
fallback_history_file = os.path.join(os.environ['HOME'], 'history_file.json')
warnings.warn("'%s' is not writeable - saving history in home directory as:\n '%s' " % (
self.history_file, fallback_history_file
))
self.history_file = fallback_history_file
with open(self.history_file, 'w') as f:
json.dump(self.translated, f)
def update_stats(self):
""" Provide a set of summary stats on how the translation was done. """
language_pair = '%s-%s' % (self.source_lang, self.target_lang)
for stat in ['cloud_requests', 'dummy_text', 'dict_hits', 'empty_strings']:
self.stats[language_pair].update({stat: getattr(self, stat)})
self.stats[language_pair].update({'history': len(self.translated)})
class TranslateBase(object):
""" Build translation framework, independent of file format. """
def __init__(self, filepath, filename, translator, target=None, condense=False, cross_check=False):
self.filepath = filepath
self.source = filename
self.filename, self.ext = filename.rsplit('.', 1)
self.translator = translator
self.condense = condense
self.cross_check = hasattr(self.translator, 'source_lang') and bool(self.translator.source_lang) and cross_check
if cross_check and not bool(self.translator.source_lang):
warnings.warn('Not possible to translate back: no source language was given')
if not target:
self.target = self.add_lang_to_filename(self.source)
else:
self.target = target
def execute(self, translate_method):
""" Method to combine translation and cross_check as in subclass.__init__ for .docx etc """
translate_method()
self.translator.save_history()
self.translator.update_stats()
if self.cross_check:
self.swap_languages()
translate_method()
self.translator.save_history()
self.translator.update_stats()
# Google does not close the session: Connection='keep-alive'. The line below did not work.
# self.translator.client._connection.http.close()
def swap_languages(self):
""" Swap languages and re-translate as a crude way of assessing the quality of the translation. """
self.translator.target_lang, self.translator.source_lang = (
self.translator.source_lang, self.translator.target_lang
)
if self.translator.history:
self.translator.history_file = self.add_lang_to_filename(self.translator.history_file)
self.source, self.target = (self.target, self.add_lang_to_filename(self.target))
self.translator.prepare_translation()
def add_lang_to_filename(self, filename, lang=None):
""" Insert language code into a filename. """
if not lang and hasattr(self.translator, 'target_lang'):
lang = self.translator.target_lang
if '.' in filename:
body, extn = filename.rsplit('.', 1)
else:
body, extn = filename, self.ext
return '%s_%s.%s' % (body, lang, extn)
def set_language(self, document_object):
""" Configure the language settings in the output document. """
if self.translator.target_lang in LANGUAGE_PROPERTIES:
if 'lang' in LANGUAGE_PROPERTIES[self.translator.target_lang]:
document_object.core_properties.language = LANGUAGE_PROPERTIES[self.translator.target_lang]['lang']
if not isinstance(self, TranslatePptx) and 'font' in LANGUAGE_PROPERTIES[self.translator.target_lang]:
document_object.styles["Normal"].font.name = LANGUAGE_PROPERTIES[self.translator.target_lang]['font']
# TODO: [future] self.document.styles["Normal"] language = LANGUAGE_PROPERTIES[self.translator.target_lang]['lang']
# (if/when supported by python-docx)
def condense_runs(self, paragraph, brk_run=None):
if len(paragraph.runs) > 1:
self._previous_run = paragraph.runs[0]
for i, run in enumerate(paragraph.runs[1:]):
if all((
self._previous_run.text,
run.text,
(not isinstance(self, TranslatePptx) or not brk_run[i + 1]),
self.same_style_runs(self._previous_run, run, paragraph, DOCX_STYLE_PROPERTY)
)):
self._previous_run.text += run.text
run.text = ''
else:
self._previous_run = run
def translate_paragraphs(self, document_object):
""" Perform the translation of each text element """
for paragraph in document_object:
translate_style = any((
isinstance(self, TranslatePptx),
isinstance(self, TranslateDocx)
and paragraph.style.name not in TRANSLATION_RULES['ignore_styles'],
))
brk_run = self.break_runs(paragraph) if isinstance(self, TranslatePptx) else None
if translate_style:
if self.condense:
self.condense_runs(paragraph, brk_run=brk_run)
for run in paragraph.runs:
if run.text: # and '\n' in run.text:
run.text = self.translator.multi_line(run.text)
def same_style_runs(self, reference, comparison, para_style, attrs):
"""
A method for comparing styles.
Does not pick up any of the following:
a. <w:proofErr w:type="spellStart"/> (desirable as a separate submission to avoid mis-translation)
d. any differences other than the style attributes in DOCX_STYLE_PROPERTY
(list based on @property in docx Run class)
"""
for attr in attrs:
if reference and comparison and hasattr(reference, attr) and hasattr(comparison, attr):
sub_ref = getattr(reference, attr)
sub_comp = getattr(comparison, attr)
# if None, then need to look at the parent style
if hasattr(para_style, attr):
par_comp = getattr(para_style, attr)
if sub_ref is None:
sub_ref = par_comp
if sub_comp is None:
sub_comp = par_comp
else:
par_comp = None
if attrs[attr]:
if not self.same_style_runs(sub_ref, sub_comp, par_comp, attrs[attr]):
return False
elif sub_ref != sub_comp:
return False
else:
pass
elif reference and comparison and hasattr(reference, attr) != hasattr(comparison, attr):
return False
else:
pass
return True
class PreserveWhitespace(object):
# TODO: incorporate in other translators
def __init__(self, string):
body = re.escape(string.strip())
m = re.search(f'(?P<body>{body})', string)
self.body = m["body"] if m else ''
start, finish = m.span()
self.leading_ws = string[:start]
self.trailing_ws = string[finish:]
self.full_text = self.body.join([self.leading_ws, self.trailing_ws])
def replace(self, body):
return body.join([self.leading_ws, self.trailing_ws])
class TranslateExcel(TranslateBase):
"""
Translate text in an Excel (.xlsx) spreadsheet file
Previously released version of openpyxl does not support preservation of embedded images.
"""
# TODO: [future] check not removing embedded images, once this is available in openpyxl
# TODO: [future] preserve rich text formatting within a cell if/when this is available in openpyxl
# Note: MergeCell formatting is not working in openpyxl 2.5.5 but does work in development code
def __init__(self, filepath, filename, translator, target=None, condense=False, cross_check=False):
super(TranslateExcel, self).__init__(filepath, filename, translator,
target=target, condense=condense, cross_check=cross_check)
self.wb = load_workbook(os.path.join(self.filepath, self.source))
self.execute(self.translate)
def translate(self):
""" Perform the translation of each text element """
for sheetname in self.wb.sheetnames:
for row in self.wb[sheetname]:
for cell in row:
if isinstance(cell, Cell):
if cell.value and cell.data_type == 's':
cell.value = self.translator.multi_line(cell.value)
self.wb.save(os.path.join(self.filepath, self.target))
class TranslateDocx(TranslateBase):
""" Translate text in a Word (.docx) document file """
def __init__(self, filepath, filename, translator, target=None, condense=False, cross_check=False):
super(TranslateDocx, self).__init__(filepath, filename, translator,
target=target, condense=condense, cross_check=cross_check)
self.document = Document(os.path.join(self.filepath, self.source))
self.execute(self.translate)
def translate(self):
""" Break the document up into text elements for translation """
self.set_language(self.document)
self.translate_paragraphs(self.document.paragraphs)
for table in self.document.tables:
for row in table.rows:
for cell in row.cells:
self.translate_paragraphs(cell.paragraphs)
# TODO: [future] translate headers/footers/text_boxes (if/when supported by python-docx)
# for section in document.sections:
# # print(document.sections[section].footer)
# print(section._sectPr)
self.document.save(os.path.join(self.filepath, self.target))
class TranslatePptx(TranslateBase):
""" Translate text in a PowerPoint (.pptx) presentation file """
def __init__(self, filepath, filename, translator, target=None, condense=False, cross_check=False):
super(TranslatePptx, self).__init__(filepath, filename, translator,
target=target, condense=condense, cross_check=cross_check)
self.prs = Presentation(os.path.join(self.filepath, self.source))
self.execute(self.translate)
def translate(self):
""" Perform the translation of each text element """
self.set_language(self.prs)
for slide in self.prs.slides:
for shape in slide.shapes:
if shape.has_text_frame:
self.translate_paragraphs(shape.text_frame.paragraphs)
elif shape.has_table:
for row in shape.table.rows:
for cell in row.cells:
if hasattr(cell, 'text_frame'):
self.translate_paragraphs(cell.text_frame.paragraphs)
# TODO: [future] add SmartArt | |
20000000000000000000000),
("0x4c4e6f13fb5e3f70c3760262a03e317982691d10", 100000000000000000000),
("0x664e43119870af107a448db1278b044838ffcdaf", 400000000000000000000),
("0x8da1178f55d97772bb1d24111a404a4f8715b95d", 878149000000000000000),
("0x5e6e9747e162f8b45c656e0f6cae7a84bac80e4e", 2000000000000000000000),
("0xc7eac31abce6d5f1dea42202b6a674153db47a29", 591000000000000000000),
("0xd96711540e2e998343d4f590b6fc8fac3bb8b31d", 1758944000000000000000),
("0x9da4ec407077f4b9707b2d9d2ede5ea5282bf1df", 4000000000000000000000),
("0xf60c1b45f164b9580e20275a5c39e1d71e35f891", 2000000000000000000000),
("0xeb6394a7bfa4d28911d5a5b23e93f35e340c2294", 78000000000000000000),
("0xa89ac93b23370472daac337e9afdf642543f3e57", 10000000000000000000000),
("0xbb618e25221ad9a740b299ed1406bc3934b0b16d", 1000000000000000000000),
("0x817ac33bd8f847567372951f4a10d7a91ce3f430", 200015000000000000000),
("0xfe6a895b795cb4bf85903d3ce09c5aa43953d3bf", 3400000000000000000000),
("0x3673954399f6dfbe671818259bb278e2e92ee315", 200000000000000000000000),
("0xdf0ff1f3d27a8ec9fb8f6b0cb254a63bba8224a5", 4367636000000000000000),
("0xff12e49d8e06aa20f886293c0b98ed7eff788805", 4000000000000000000000),
("0x5aef16a226dd68071f2483e1da42598319f69b2c", 2000000000000000000000),
("0x0266ab1c6b0216230b9395443d5fa75e684568c6", 1000000000000000000000),
("0x14a7352066364404db50f0d0d78d754a22198ef4", 1880000000000000000000),
("0x444caf79b71338ee9aa7c733b02acaa7dc025948", 40000000000000000000),
("0x64e2de21200b1899c3a0c0653b5040136d0dc842", 20000000000000000000000),
("0x36e156610cd8ff64e780d89d0054385ca76755aa", 14000000000000000000000),
("0x0a6ebe723b6ed1f9a86a69ddda68dc47465c2b1b", 1185000000000000000000),
("0x38bf2a1f7a69de0e2546adb808b36335645da9ff", 2000320000000000000000),
("0x39f44663d92561091b82a70dcf593d754005973a", 199999000000000000000),
("0x24b9e6644f6ba4cde126270d81f6ab60f286dff4", 133700000000000000000),
("0x9b59eb213b1e7565e45047e04ea0374f10762d16", 2000000000000000000000),
("0x309544b6232c3dd737f945a03193d19b5f3f65b9", 1087440000000000000000),
("0xb28bb39f3466517cd46f979cf59653ee7d8f152e", 450000000000000000000),
("0x9da8e22ca10e67fea44e525e4751eeac36a31194", 260000000000000000000),
("0x4f8ae80238e60008557075ab6afe0a7f2e74d729", 100000000000000000000),
("0x74ed33acf43f35b98c9230b9e6642ecb5330839e", 681872000000000000000),
("0x22842ab830da509913f81dd1f04f10af9edd1c55", 2000000000000000000000),
("0xa8f37f0ab3a1d448a9e3ce40965f97a646083a34", 329800000000000000000),
("0x582b70669c97aab7d68148d8d4e90411e2810d56", 999972000000000000000),
("0xd5e55100fbd1956bbed2ca518d4b1fa376032b0b", 100000000000000000000),
("0xb7cc6b1acc32d8b295df68ed9d5e60b8f64cb67b", 300000000000000000000),
("0xe081ca1f4882db6043d5a9190703fde0ab3bf56d", 400000000000000000000),
("0xc02077449a134a7ad1ef7e4d927affeceeadb5ae", 18200000000000000000),
("0xe09fea755aee1a44c0a89f03b5deb762ba33006f", 1100070000000000000000),
("0xb3717731dad65132da792d876030e46ac227bb8a", 1000000000000000000000),
("0x157eb3d3113bd3b597714d3a954edd018982a5cb", 2000000000000000000000),
("0xdc57345b38e0f067c9a31d9deac5275a10949321", 200000000000000000000),
("0x40ea5044b204b23076b1a5803bf1d30c0f88871a", 14000000000000000000000),
("0x2bab0fbe28d58420b52036770a12f9952aea6911", 3820000000000000000000),
("0xadaa0e548c035affed64ca678a963fabe9a26bfd", 70000000000000000000),
("0xbb48eaf516ce2dec3e41feb4c679e4957641164f", 3820000000000000000000),
("<KEY>", 22000000000000000000000),
("0x03cb98d7acd817de9d886d22fab3f1b57d92a608", 1600000000000000000000),
("0xf88900db737955b1519b1a7d170a18864ce590eb", 18200000000000000000),
("0x757fa55446c460968bb74b5ebca96c4ef2c709c5", 1015200000000000000000),
("0xda855d53477f505ec4c8d5e8bb9180d38681119c", 5600000000000000000000),
("0xe41aea250b877d423a63ba2bce2f3a61c0248d56", 260000000000000000000),
("0x8262169b615870134eb4ac6c5f471c6bf2f789fc", 462500000000000000000),
("0x66b0c100c49149935d14c0dc202cce907cea1a3d", 1970000000000000000000),
("0x854c0c469c246b83b5d1b3eca443b39af5ee128a", 1600000000000000000000),
("0xeb6810691d1ae0d19e47bd22cebee0b3ba27f88a", 2499922000000000000000),
("0x24dcc24bd9c7210ceacfb30da98ae04a4d7b8ab9", 1000000000000000000000),
("0xe31b4eef184c24ab098e36c802714bd4743dd0d4", 200000000000000000000),
("0x99b8c824869de9ed24f3bff6854cb6dd45cc3f9f", 1880000000000000000000),
("0x2ae73a79aea0278533accf21070922b1613f8f32", 3097417000000000000000),
("0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a", 10000000000000000000000),
("0x1b4bbcb18165211b265b280716cb3f1f212176e8", 472325000000000000000),
("0xe177e0c201d335ba3956929c571588b51c5223ae", 2000000000000000000000),
("0x1945fe377fe6d4b71e3e791f6f17db243c9b8b0f", 2185500000000000000000),
("0x3e9b34a57f3375ae59c0a75e19c4b641228d9700", 17900000000000000000),
("0xa4d6c82eddae5947fbe9cdfbd548ae33d91a7191", 8000000000000000000000),
("0xbad4425e171c3e72975eb46ac0a015db315a5d8f", 2000000000000000000000),
("0xa2d2aa626b09d6d4e4b13f7ffc5a88bd7ad36742", 4639390000000000000000),
("0xb61c34fcacda701a5aa8702459deb0e4ae838df8", 35000000000000000000000),
("0x145e0600e2a927b2dd8d379356b45a2e7d51d3ae", 2545843000000000000000),
("0x8df339214b6ad1b24663ce716034749d6ef838d9", 11000000000000000000000),
("0x8fd9a5c33a7d9edce0997bdf77ab306424a11ea9", 2000000000000000000000),
("0x097da12cfc1f7c1a2464def08c29bed5e2f851e9", 20000000000000000000),
("0xddabf13c3c8ea4e3d73d78ec717afafa430e5479", 41600000000000000000000),
("0x9eeb07bd2b7890195e7d46bdf2071b6617514ddb", 2000000000000000000000),
("0x819af9a1c27332b1c369bbda1b3de1c6e933d640", 314308000000000000000),
("0xd7d2c6fca8ad1f75395210b57de5dfd673933909", 340000000000000000000),
("0xcdd5d881a7362c9070073bdfbc75e72453ac510e", 842000000000000000000),
("0xe9ac36376efa06109d40726307dd1a57e213eaa9", 194000000000000000000),
("0x1bea4df5122fafdeb3607eddda1ea4ffdb9abf2a", 346000000000000000000),
("0x3e5e93fb4c9c9d1246f8f247358e22c3c5d17b6a", 150000000000000000000),
("0x6c1ddd33c81966dc8621776071a4129482f2c65f", 40000000000000000000000),
("0x2ccb66494d0af689abf9483d365d782444e7dead", 1000000000000000000000),
("0x19571a2b8f81c6bcf66ab3a10083295617150003", 492500000000000000000),
("0x38ac664ee8e0795e4275cb852bcba6a479ad9c8d", 20000000000000000000),
("0xc4803bb407c762f90b7596e6fde194931e769590", 4000000000000000000000),
("0x93507e9e8119cbceda8ab087e7ecb071383d6981", 14000000000000000000000),
("0xb672734afcc224e2e609fc51d4f059732744c948", 295500000000000000000),
("0xfbbbebcfbe235e57dd2306ad1a9ec581c7f9f48f", 40000000000000000000),
("0x8c81410ea8354cc5c65c41be8bd5de733c0b111d", 9550000000000000000000),
("0x942c6b8c955bc0d88812678a236725b32739d947", 1550000000000000000000),
("0xd2e817738abf1fb486583f80c350318bed860c80", 240010000000000000000),
("0xbff5df769934b8943ca9137d0efef2fe6ebbb34e", 100000000000000000000),
("0x6c4e426e8dc005dfa3516cb8a680b02eea95ae8e", 1337000000000000000000),
("0xf645dd7c890093e8e4c8aa92a6bb353522d3dc98", 134000000000000000000),
("0x4bac846af4169f1d95431b341d8800b22180af1a", 20000000000000000000),
("0x0514954c3c2fb657f9a06f510ea22748f027cdd3", 400000000000000000000),
("0x163dca73d7d6ea3f3e6062322a8734180c0b78ef", 2941400000000000000000),
("0xfeaca2ac74624bf348dac9985143cfd652a4be55", 26148245000000000000000),
("0xfe80e9232deaff19baf99869883a4bdf0004e53c", 855680000000000000000),
("0x17108dab2c50f99de110e1b3b3b4cd82f5df28e7", 980000000000000000000),
("0x837a645dc95c49549f899c4e8bcf875324b2f57c", 600400000000000000000),
("0x762998e1d75227fced7a70be109a4c0b4ed86414", 20000000000000000000),
("0xc0a7e8435dff14c25577739db55c24d5bf57a3d9", 49250000000000000000000),
("0xaead88d689416b1c91f2364421375b7d3c70fb2e", 2000000000000000000000),
("0x9279b2228cec8f7b4dda3f320e9a0466c2f585ca", 5000000000000000000000),
("0x36726f3b885a24f92996da81625ec8ad16d8cbe6", 1543723000000000000000),
("0x3951e48e3c869e6b72a143b6a45068cdb9d466d0", 20000000000000000000),
("0xf5d61ac4ca95475e5b7bffd5f2f690b316759615", 31040000000000000000000),
("0x158a0d619253bf4432b5cd02c7b862f7c2b75636", 135733000000000000000),
("0xe56d431324c92911a1749df292709c14b77a65cd", 8200000000000000000000),
("0x9976947eff5f6ae5da08dd541192f378b428ff94", 8000000000000000000000),
("0x83210583c16a4e1e1dac84ebd37e3d0f7c57eba4", 2000000000000000000000),
("0xdcb64df43758c7cf974fa660484fbb718f8c67c1", 20000000000000000000000),
("0xd4205592844055b3c7a1f80cefe3b8eb509bcde7", 178973000000000000000),
("0xd0648a581b3508e135a2935d12c9657045d871ca", 8022000000000000000000),
("0xe7d17524d00bad82497c0f27156a647ff51d2792", 20000000000000000000),
("0x21582e99e502cbf3d3c23bdffb76e901ac6d56b2", 100000000000000000000),
("0xe61f280915c774a31d223cf80c069266e5adf19b", 880000000000000000000),
("0x03c91d92943603e752203e05340e566013b90045", 802200000000000000000),
("0x22561c5931143536309c17e832587b625c390b9a", 4000000000000000000000),
("0xe399c81a1d701b44f0b66f3399e66b275aaaf8c1", 1000000000000000000000),
("0x7f8dbce180ed9c563635aad2d97b4cbc428906d9", 2674000000000000000000),
("0x9f61beb46f5e853d0a8521c7446e68e34c7d0973", 560000000000000000000),
("0x6d3f2ba856ccbb0237fa7661156b14b013f21240", 1000000000000000000000),
("0x5f742e487e3ab81af2f94afdbe1b9b8f5ccc81bc", 2172412000000000000000),
("0xb600feab4aa96c537504d96057223141692c193a", 400000000000000000000),
("0xfab487500df20fb83ebed916791d561772adbebf", 1999980000000000000000),
("0xf8704c16d2fd5ba3a2c01d0eb20484e6ecfa3109", 200000000000000000000),
("0x3f1bc420c53c002c9e90037c44fe6a8ef4ddc962", 173000000000000000000),
("0x82e577b515cb2b0860aafe1ce09a59e09fe7d040", 600000000000000000000),
("0xbc999e385c5aebcac8d6f3f0d60d5aa725336d0d", 2000000000000000000000),
("0xe16ce35961cd74bd590d04c4ad4a1989e05691c6", 146000000000000000000),
("0xeb76424c0fd597d3e341a9642ad1ee118b2b579d", 4000000000000000000000),
("0xc440c7ca2f964b6972ef664a2261dde892619d9c", 20000000000000000000000),
("0x460d5355b2ceeb6e62107d81e51270b26bf45620", 2005500000000000000000),
("0xfcada300283f6bcc134a91456760b0d77de410e0", 2000000000000000000000),
("0xbe8d7f18adfe5d6cc775394989e1930c979d007d", 1000000000000000000000),
("0xa7f9220c8047826bd5d5183f4e676a6d77bfed36", 153368000000000000000),
("0x98d204f9085f8c8e7de23e589b64c6eff692cc63", 2000000000000000000000),
("0x5a2916b8d2e8cc12e207ab464d433e2370d823d9", 2000000000000000000000),
("0xc42d6aeb710e3a50bfb44d6c31092969a11aa7f3", 150052000000000000000),
("0x04ce45f600db18a9d0851b29d9393ebdaafe3dc5", 20000000000000000000),
("0x7a1370a742ec2687e761a19ac5a794329ee67404", 2999988000000000000000),
("0xda2ad58e77deddede2187646c465945a8dc3f641", 660000000000000000000),
("0xec58bc0d0c20d8f49465664153c5c196fe59e6be", 400000000000000000000),
("0xf8063af4cc1dd9619ab5d8bff3fcd1faa8488221", 2000000000000000000000),
("0xb9231eb26e5f9e4b4d288f03906704fab96c87d6", 19700000000000000000000),
("0x6e5c2d9b1c546a86eefd5d0a5120c9e4e730190e", 199600000000000000000),
("0xe49936a92a8ccf710eaac342bc454b9b14ebecb1", 2000000000000000000000),
("0x21dbdb817a0d8404c6bdd61504374e9c43c9210e", 9999917000000000000000),
("0x5cebe30b2a95f4aefda665651dc0cf7ef5758199", 18200000000000000000),
("0x597038ff91a0900cbbab488af483c790e6ec00a0", 10000000000000000000000),
("0x0fa5d8c5b3f294efd495ab69d768f81872508548", 2000000000000000000000),
("0xfeef3b6eabc94affd3310c1c4d0e65375e131119", 20000000000000000000),
("0x1ce81d31a7923022e125bf48a3e03693b98dc9dd", 2000000000000000000000),
("0x5887dc6a33dfed5ac1edefe35ef91a216231ac96", 250000000000000000000),
("0x4e8e47ae3b1ef50c9d54a38e14208c1abd3603c2", 2235000000000000000000),
("0xe845e387c4cbdf982280f6aa01c40e4be958ddb2", 25000000000000000000000),
("0x71d9494e50c5dd59c599dba3810ba1755e6537f0", 4000000000000000000000),
("0x6eb5578a6bb7c32153195b0d8020a6914852c059", 660000000000000000000000),
("0x543f8c674e2462d8d5daa0e80195a8708e11a29e", 63940000000000000000),
("0xa0459ef3693aacd1647cd5d8929839204cef53be", 1000000000000000000000),
("0xdda371e600d30688d4710e088e02fdf2b9524d5f", 6920000000000000000000),
("0xdd4dd6d36033b0636fcc8d0938609f4dd64f4a86", 60000000000000000000),
("0x3bd624b548cb659736907ed8aa3c0c705e24b575", 2000000000000000000000),
("0x414599092e879ae25372a84d735af5c4e510cd6d", 400000000000000000000),
("0x3d66cd4bd64d5c8c1b5eea281e106d1c5aad2373", 1951100000000000000000),
("0x5948bc3650ed519bf891a572679fd992f8780c57", 197000000000000000000),
("0x8b74a7cb1bb8c58fce267466a30358adaf527f61", 13620000000000000000000),
("0x3f10800282d1b7ddc78fa92d8230074e1bf6aeae", 4925000000000000000000),
("0x32dbb6716c54e83165829a4abb36757849b6e47d", 1000000000000000000000),
("0xe6b3ac3f5d4da5a8857d0b3f30fc4b2b692b77d7", 1460000000000000000000),
("0x052a58e035f1fe9cdd169bcf20970345d12b9c51", 1490000000000000000000),
("0x581bdf1bb276dbdd86aedcdb397a01efc0e00c5b", 1000000000000000000000),
("0x604e9477ebf4727c745bcabbedcb6ccf29994022", 1000060000000000000000),
("0x59b96deb8784885d8d3b4a166143cc435d2555a1", 1337000000000000000000),
("0x37d980a12ee3bf23cc5cdb63b4ae45691f74c837", 2000000000000000000000),
("0x3bfbd3847c17a61cf3f17b52f8eba1b960b3f39f", 3000000000000000000000),
("0x49c941e0e5018726b7290fc473b471d41dae80d1", 500000000000000000000),
("0xf26bcedce3feadcea3bc3e96eb1040dfd8ffe1a0", 775000000000000000000),
("0xd0944aa185a1337061ae20dc9dd96c83b2ba4602", 200000000000000000000),
("0x904caa429c619d940f8e6741826a0db692b19728", 1000000000000000000000),
("0xb95c9b10aa981cf4a67a71cc52c504dee8cf58bd", 4000000000000000000000),
("0x15874686b6733d10d703c9f9bec6c52eb8628d67", 2000000000000000000000),
("0x1374facd7b3f8d68649d60d4550ee69ff0484133", 269700000000000000000),
("0xb0e469c886593815b3495638595daef0665fae62", 1940000000000000000000),
("0x47ff6feb43212060bb1503d7a397fc08f4e70352", 2000000000000000000000),
("0xc60b04654e003b4683041f1cbd6bc38fda7cdbd6", 2000000000000000000000),
("0x3ecdb532e397579662b2a46141e78f8235936a5f", 66850000000000000000),
("0xb3a8c2cb7d358e5739941d945ba9045a023a8bbb", 1000000000000000000000),
("0x32ef5cdc671df5562a901aee5db716b9be76dcf6", 2000000000000000000000),
("0xc94110e71afe578aa218e4fc286403b0330ace8d", 2000000000000000000000),
("0x9b43dcb95fde318075a567f1e6b57617055ef9e8", 3940000000000000000000),
("0xefeea010756f81da4ba25b721787f058170befbd", 32470000000000000000),
("0xc88255eddcf521c6f81d97f5a42181c9073d4ef1", 290793000000000000000),
("0xdd47189a3e64397167f0620e484565b762bfbbf4", 1850000000000000000000),
("0x82f39b2758ae42277b86d69f75e628d958ebcab0", 40000000000000000000000),
("0xe37f5fdc6ec97d2f866a1cfd0d3a4da4387b22b5", 10000000000000000000000),
("0x62331df2a3cbee3520e911dea9f73e905f892505", 2000000000000000000000),
("0x8c5d16ed65e3ed7e8b96ca972bc86173e3500b03", 2000000000000000000000),
("0x8b9841862e77fbbe919470935583a93cf027e450", 2000054000000000000000),
("0xc8dd27f16bf22450f5771b9fe4ed4ffcb30936f4", 197000000000000000000),
("0xdec8a1a898f1b895d8301fe64ab3ad5de941f689", 787803000000000000000),
("0x61c4ee7c864c4d6b5e37ea1331c203739e826b2f", 30063000000000000000),
("0x3250e3e858c26adeccadf36a5663c22aa84c4170", 5000000000000000000000),
("0x299e0bca55e069de8504e89aca6eca21d38a9a5d", 55500000000000000000),
("0xd50f7fa03e389876d3908b60a537a6706304fb56", 100000000000000000000),
("0x69073269729e6414b26ec8dc0fd935c73b579f1e", 30000000000000000000000),
("0x14fcd1391e7d732f41766cdacd84fa1deb9ffdd2", 2000000000000000000000),
("0x823768746737ce6da312d53e54534e106f967cf3", 20000000000000000000),
("0x882f75708386653c80171d0663bfe30b017ed0ad", 2000000000000000000000),
("0xa25b086437fd2192d0a0f64f6ed044f38ef3da32", 335000000000000000000),
("0x5a9c8b69fc614d69564999b00dcb42db67f97e90", 3429227000000000000000),
("0xa2b701f9f5cdd09e4ba62baebae3a88257105885", 1000000000000000000000),
("0x5e7b8c54dc57b0402062719dee7ef5e37ea35d62", 2877224000000000000000),
("0x7ffabfbc390cbe43ce89188f0868b27dcb0f0cad", 6370000000000000000000),
("0xb5cdbc4115406f52e5aa85d0fea170d2979cc7ba", 1337000000000000000000),
("0x263814309de4e635cf585e0d365477fc40e66cf7", 146000000000000000000),
("0x24cff0e9336a9f80f9b1cb968caf6b1d1c4932a4", 200200000000000000000),
("0xd3a941c961e8ca8b1070f23c6d6d0d2a758a4444", 200000000000000000000),
("0xa97beb3a48c45f1528284cb6a95f7de453358ec6", 31000000000000000000000),
("0x4dd131c74a068a37c90aded4f309c2409f6478d3", 400008000000000000000),
("0x653675b842d7d8b461f722b4117cb81dac8e639d", 31000000000000000000),
("0x561be9299b3e6b3e63b79b09169d1a948ae6db01", 500000000000000000000),
("0xdc067ed3e12d711ed475f5156ef7e71a80d934b9", 9550000000000000000000),
("0x08d97eadfcb7b064e1ccd9c8979fbee5e77a9719", 266063000000000000000),
("0x6e4c2ab7db026939dbd3bc68384af660a61816b2", 167000000000000000000),
("0xbf4c73a7ede7b164fe072114843654e4d8781dde", 2000000000000000000000),
("0xf504943aaf16796e0b341bbcdf21d11cc586cdd1", 9000000000000000000000),
("0xea81ca8638540cd9d4d73d060f2cebf2241ffc3e", 1970000000000000000000),
("0x9944fee9d34a4a880023c78932c00b59d5c82a82", 750022000000000000000),
("0x12f460ae646cd2780fd35c50a6af4b9accfa85c6", 1000000000000000000000),
("0x4e232d53b3e6be8f895361d31c34d4762b12c82e", 1760000000000000000000),
("0x6bb2aca23fa1626d18efd6777fb97db02d8e0ae4", 40000000000000000000000),
("0xbc4e471560c99c8a2a4b1b1ad0c36aa6502b7c4b", 12000000000000000000000),
("0x2e2cbd7ad82547b4f5ff8b3ab56f942a6445a3b0", 200000000000000000000),
("0x21ecb2dfa65779c7592d041cd2105a81f4fd4e46", 1000000000000000000000),
("0x34318625818ec13f11835ae97353ce377d6f590a", 1520000000000000000000),
("0xa7ef35ce87eda6c28df248785815053ec97a5045", 4999998000000000000000),
("0x6a514e6242f6b68c137e97fea1e78eb555a7e5f7", 20000000000000000000),
("0x9340b5f678e45ee05eb708bb7abb6ec8f08f1b6b", 6000000000000000000000),
("0x43cc08d0732aa58adef7619bed46558ad7774173", 4443926000000000000000),
("0x12e9a4ad2ad57484dd700565bddb46423bd9bd31", 19999800000000000000000),
("0xebbeeb259184a6e01cccfc2207bbd883785ac90a", 619966000000000000000),
("0x704ab1150d5e10f5e3499508f0bf70650f028d4b", 4000000000000000000000),
("0xfc361105dd90f9ede566499d69e9130395f12ac8", 395000000000000000000000),
("0xc1b9a5704d351cfe983f79abeec3dbbbae3bb629", 20000000000000000000),
("0x66f50406eb1b11a946cab45927cca37470e5a208", 2000000000000000000000),
("0x53942e7949d6788bb780a7e8a0792781b1614b84", 15899600000000000000000),
("0x32ba9a7d0423e03a525fe2ebeb661d2085778bd8", 20000000000000000000000),
("0x11c0358aa6479de21866fe21071924b65e70f8b9", 36400000000000000000000),
("0x76cb9c8b69f4387675c48253e234cb7e0d74a426", 7396300000000000000000),
("0x9f5f44026b576a4adb41e95961561d41039ca391", 250000000000000000000),
("0x533a73a4a2228eee05c4ffd718bbf3f9c1b129a7", 6000000000000000000000),
("0xdcc52d8f8d9fc742a8b82767f0555387c563efff", 500000000000000000000),
("0xf456a75bb99655a7412ce97da081816dfdb2b1f2", 200000000000000000000),
("0xd0c101fd1f01c63f6b1d19bc920d9f932314b136", 20000000000000000000000),
("0xdabc225042a6592cfa13ebe54efa41040878a5a2", 259550000000000000000),
("0x38eec6e217f4d41aa920e424b9525197041cd4c6", 4428166000000000000000),
("0x8a247d186510809f71cffc4559471c3910858121", 1790000000000000000000),
("0x4f152b2fb8659d43776ebb1e81673aa84169be96", 2000000000000000000000),
("0xb4496ddb27799a222457d73979116728e8a1845b", 2610331000000000000000),
("0x4a4053b31d0ee5dbafb1d06bd7ac7ff3222c47d6", 1400000000000000000000),
("0x0f7bea4ef3f73ae0233df1e100718cbe29310bb0", 2000000000000000000000),
("0xc836e24a6fcf29943b3608e662290a215f6529ea", 292000000000000000000),
("0x1765361c2ec2f83616ce8363aae21025f2566f40", 5000000000000000000000),
("0xb6e6c3222b6b6f9be2875d2a89f127fb64100fe2", 8008000000000000000000),
("0x01bbc14f67af0639aab1441e6a08d4ce7162090f", 1309500000000000000000),
("0xaf2058c7282cf67c8c3cf930133c89617ce75d29", 6920000000000000000000),
("0x464d9c89cce484df000277198ed8075fa63572d1", 20000000000000000000),
("0x50cd97e9378b5cf18f173963236c9951ef7438a5", 1400000000000000000000),
("0xcb47bd30cfa8ec5468aaa6a94642ced9c819c8d4", 4000000000000000000000),
("0x6b10f8f8b3e3b60de90aa12d155f9ff5ffb22c50", 2000000000000000000000),
("0x09b7a988d13ff89186736f03fdf46175b53d16e0", 6000000000000000000000),
("0x5bfafe97b1dd1d712be86d41df79895345875a87", 500000000000000000000),
("0xa06cd1f396396c0a64464651d7c205efaf387ca3", 1999944000000000000000),
("0xfc0096b21e95acb8d619d176a4a1d8d529badbef", 384601000000000000000),
("0xa74444f90fbb54e56f3ac9b6cfccaa4819e4614a", 20000000000000000000),
("0x3c15b3511df6f0342e7348cc89af39a168b7730f", 1000000000000000000000),
("0x3d6ff82c9377059fb30d9215723f60c775c891fe", 250066000000000000000),
("0xa524a8cccc49518d170a328270a2f88133fbaf5d", 294500000000000000000),
("0x8a7a06be199a3a58019d846ac9cbd4d95dd757de", 3000200000000000000000),
("0xd744ac7e5310be696a63b003c40bd039370561c6", 1670000000000000000000),
("0xfe362688845fa244cc807e4b1130eb3741a8051e", 1000000000000000000000),
("0xb2d0360515f17daba90fcbac8205d569b915d6ac", 6000000000000000000000),
("0xc53594c7cfb2a08f284cc9d7a63bbdfc0b319732", 49200000000000000000000),
("0xb3c228731d186d2ded5b5fbe004c666c8e469b86", 29000000000000000000),
("0x63e414603e80d4e5a0f5c18774204642258208e4", 5000000000000000000000),
("0x826ce5790532e0548c6102a30d3eac836bd6388f", 18000000000000000000000),
("0xc5e812f76f15f2e1f2f9bc4823483c8804636f67", 73000000000000000000),
("0x116fef5e601642c918cb89160fc2293ba71da936", 802200000000000000000),
("0x08b84536b74c8c01543da88b84d78bb95747d822", 200000000000000000000),
("0x04a80afad53ef1f84165cfd852b0fdf1b1c24ba8", 58000000000000000000),
("0x2b0362633614bfcb583569438ecc4ea57b1d337e", 20000000000000000000000),
("0xe95179527deca5916ca9a38f215c1e9ce737b4c9", 10000000000000000000000),
("0x2c5df866666a194b26cebb407e4a1fd73e208d5e", 1000000000000000000000),
("0x529e824fa072582b4032683ac7eecc1c04b4cac1", 2000000000000000000000),
("0x78634371e17304cbf339b1452a4ce438dc764cce", 10000000000000000000000),
("0xe172dfc8f80cd1f8cd8539dc26082014f5a8e3e8", 3000000000000000000000),
("0xb07618328a901307a1b7a0d058fcd5786e9e72fe", 30239500000000000000000),
("0xb0571153db1c4ed7acaefe13ecdfdb72e7e4f06a", 80520000000000000000000),
("0xad910a23d6850613654af786337ad2a70868ac6d", 1999800000000000000000),
("0x4da5edc688b0cb62e1403d1700d9dcb99ffe3fd3", 2000000000000000000000),
("0xbe2471a67f6047918772d0e36839255ed9d691ae", 4000000000000000000000),
("0x28868324337e11ba106cb481da962f3a8453808d", 2000000000000000000000),
("0xd8f94579496725b5cb53d7985c989749aff849c0", 17000000000000000000000),
("0x4981c5ff66cc4e9680251fc4cd2ff907cb327865", 750000000000000000000),
("0xfd2872d19e57853cfa16effe93d0b1d47b4f93fb", 4000000000000000000000),
("0x63c8dfde0b8e01dadc2e748c824cc0369df090b3", 3880000000000000000000),
("0xc4dd048bfb840e2bc85cb53fcb75abc443c7e90f", 3716000000000000000000),
("0xf579714a45eb8f52c3d57bbdefd2c15b2e2f11df", 1560000000000000000000),
("0xcc7b0481cc32e6faef2386a07022bcb6d2c3b4fc", 3160000000000000000000),
("0xa0aa5f0201f04d3bbeb898132f7c11679466d901", 36600000000000000000),
("0xf3df63a97199933330383b3ed7570b96c4812334", 2000000000000000000000),
("0x42732d8ef49ffda04b19780fd3c18469fb374106", 425068000000000000000),
("0x6f92d6e4548c78996509ee684b2ee29ba3c532b4", 1000000000000000000000),
("0xfff4bad596633479a2a29f9a8b3f78eefd07e6ee", 100000000000000000000),
("0xac4460a76e6db2b9fcd152d9c7718d9ac6ed8c6f", 200000000000000000000),
("0x553b6b1c57050e88cf0c31067b8d4cd1ff80cb09", 400000000000000000000),
("0x84b6b6adbe2f5b3e2d682c66af1bc4905340c3ed", 619333000000000000000),
("0x9f4a7195ac7c151ca258cafda0cab083e049c602", 1537100000000000000000),
("0x2955c357fd8f75d5159a3dfa69c5b87a359dea8c", 2000000000000000000000),
("0x11d7844a471ef89a8d877555583ceebd1439ea26", 10098000000000000000000),
("0x34b454416e9fb4274e6addf853428a0198d62ee1", 407000000000000000000),
("0x308dd21cebe755126704b48c0f0dc234c60ba9b1", 200000000000000000000),
("0x381db4c8465df446a4ce15bf81d47e2f17c980bf", 32000000000000000000000),
("0x1abc4e253b080aeb437984ab05bca0979aa43e1c", 1000000000000000000000),
("0x53e35b12231f19c3fd774c88fec8cbeedf1408b2", 512000000000000000000),
("0x69e2e2e704307ccc5b5ca3f164fece2ea7b2e512", 7000000000000000000000),
("0x1914f1eb95d1277e93b6e61b668b7d77f13a11a1", 970000000000000000000),
("0x50e13023bd9ca96ad4c53fdfd410cb6b1f420bdf", 200000000000000000000),
("0x46224f32f4ece5c8867090d4409d55e50b18432d", 6000000000000000000000),
("0xff83855051ee8ffb70b4817dba3211ed2355869d", 400000000000000000000),
("0xfb39189af876e762c71d6c3e741893df226cedd6", 4000000000000000000000),
("0x9875623495a46cdbf259530ff838a1799ec38991", 2000000000000000000000),
("0xe1b39b88d9900dbc4a6cdc481e1060080a8aec3c", 2000000000000000000000),
("0x5baf6d749620803e8348af3710e5c4fbf20fc894", 5003680000000000000000),
("0x9c54e4ed479a856829c6bb42da9f0b692a75f728", 7520000000000000000000),
("0x486a6c8583a84484e3df43a123837f8c7e2317d0", 323378000000000000000),
("0xd235d15cb5eceebb61299e0e827fa82748911d89", 4000000000000000000000),
("0x47d792a756779aedf1343e8883a6619c6c281184", 2000000000000000000000),
("0x70c213488a020c3cfb39014ef5ba6404724bcaa3", 1940000000000000000000),
("0x133c490fa5bf7f372888e607d958fab7f955bae1", 1580000000000000000000),
("0xa9e194661aac704ee9dea043974e9692ded84a5d", 482400000000000000000),
("0xbc6b58364bf7f1951c309e0cba0595201cd73f9a", 1812400000000000000000),
("0x2309d34091445b3232590bd70f4f10025b2c9509", 10000000000000000000000),
("0xd89bc271b27ba3ab6962c94a559006ae38d5f56a", 2000000000000000000000),
("0xff0e2fec304207467e1e3307f64cbf30af8fd9cd", 2000000000000000000000),
("0xc0b0b7a8a6e1acdd05e47f94c09688aa16c7ad8d", 64234000000000000000),
("0xb66f92124b5e63035859e390628869dbdea9485e", 9850000000000000000000),
("0xa9e6e25e656b762558619f147a21985b8874edfe", 2000000000000000000000),
("0xa43e1947a9242b355561c30a829dfeeca2815af8", 3878255000000000000000),
("0x8b20ad3b94656dbdc0dd21a393d8a7d9e02138cb", 3000000000000000000000),
("0xaca2a838330b17302da731d30db48a04f0f207c1", 1337000000000000000000),
("0xfa60868aafd4ff4c5c57914b8ed58b425773dfa9", 8557400000000000000000),
("0x1848003c25bfd4aa90e7fcb5d7b16bcd0cffc0d8", 1000000000000000000000),
("0xb4b185d943ee2b58631e33dff5af6854c17993ac", 1000000000000000000000),
("0x7719888795ad745924c75760ddb1827dffd8cda8", 1999980000000000000000),
("0xccd521132d986cb96869842622a7dda26c3ed057", 2000000000000000000000),
("0x253e32b74ea4490ab92606fda0aa257bf23dcb8b", 10000000000000000000000),
("0x3712367e5e55a96d5a19168f6eb2bc7e9971f869", 1000000000000000000000),
("0x8f29a14a845ad458f2d108b568d813166bcdf477", 10000000000000000000000),
("0x51a8c2163602a32ee24cf4aa97fd9ea414516941", 62904000000000000000),
("0x61cea71fa464d62a07063f920b0cc917539733d8", 1670000000000000000000),
("0x6f81f3abb1f933b1df396b8e9cc723a89b7c9806", 280000000000000000000),
("0x61b1b8c012cd4c78f698e470f90256e6a30f48dd", 200000000000000000000),
("0x4f3f2c673069ac97c2023607152981f5cd6063a0", 600000000000000000000),
("0xe2efa5fca79538ce6068bf31d2c516d4d53c08e5", 131200000000000000000),
("0x2383c222e67e969190d3219ef14da37850e26c55", 2000000000000000000000),
("0xeac3af5784927fe9a598fc4eec38b8102f37bc58", 1000000000000000000000),
("0x4fe56ab3bae1b0a44433458333c4b05a248f8241", 2180000000000000000000),
("0xfe9cfc3bb293ddb285e625f3582f74a6b0a5a6cd", 1970000000000000000000),
("0xf48e1f13f6af4d84b371d7de4b273d03a263278e", 600000000000000000000),
("0x1ba9228d388727f389150ea03b73c82de8eb2e09", 7258000000000000000000),
("0x37a7a6ff4ea3d60ec307ca516a48d3053bb79cbb", 2000000000000000000000),
("0xe33840d8bca7da98a6f3d096d83de78b70b71ef8", 2000000000000000000000),
("0x8e7fd23848f4db07906a7d10c04b21803bb08227", 1000000000000000000000),
("0x07d4334ec385e8aa54eedaeadb30022f0cdfa4ab", 2629946000000000000000),
("0xd4b085fb086f3d0d68bf12926b1cc3142cae8770", 3700000000000000000000),
("0x5a87f034e6f68f4e74ffe60c64819436036cf7d7", 20000000000000000000),
("0xc00ab080b643e1c2bae363e0d195de2efffc1c44", 500000000000000000000),
("0x22f3c779dd79023ea92a78b65c1a1780f62d5c4a", 1970000000000000000000),
("0xc7d5c7054081e918ec687b5ab36e973d18132935", 182000000000000000000),
("0x9662ee021926682b31c5f200ce457abea76c6ce9", 670500000000000000000),
("0x116a09df66cb150e97578e297fb06e13040c893c", 2000000000000000000000),
("0xb7240af2af90b33c08ae9764103e35dce3638428", 8464547000000000000000),
("0xe8b28acda971725769db8f563d28666d41ddab6c", 10000000000000000000000),
("0x17d4918dfac15d77c47f9ed400a850190d64f151", 2000000000000000000000),
("0xc42250b0fe42e6b7dcd5c890a6f0c88f5f5fb574", 149800000000000000000),
("0x5da2a9a4c2c0a4a924cbe0a53ab9d0c627a1cfa0", 733202000000000000000),
("0x5869fb867d71f1387f863b698d09fdfb87c49b5c", 3666000000000000000000),
("0xd49a75bb933fca1fca9aa1303a64b6cb44ea30e1", 10000000000000000000000),
("0x76331e30796ce664b2700e0d4153700edc869777", 2000000000000000000000),
("0x8a5fb75793d043f1bcd43885e037bd30a528c927", 356500000000000000000),
("0xfc0ee6f7c2b3714ae9916c45566605b656f32441", 1760000000000000000000),
("0xbf50ce2e264b9fe2b06830617aedf502b2351b45", 1000000000000000000000),
("0x0f6000de1578619320aba5e392706b131fb1de6f", 499986000000000000000),
("0xc953f934c0eb2d0f144bdab00483fd8194865ce7", 2000000000000000000000),
("0x24fd9a6c874c2fab3ff36e9afbf8ce0d32c7de92", 1337000000000000000000),
("0xc6cd68ec35362c5ad84c82ad4edc232125912d99", 27750000000000000000000),
("0x2a67660a1368efcd626ef36b2b1b601980941c05", 133700000000000000000),
("0x9deb39027af877992b89f2ec4a1f822ecdf12693", 2000000000000000000000),
("0xc12f881fa112b8199ecbc73ec4185790e614a20f", 2000000000000000000000),
("0xd58a52e078a805596b0d56ea4ae1335af01c66eb", 267400000000000000000),
("0x4d7cfaa84cb33106800a8c802fb8aa463896c599", 1790000000000000000000),
("0x0ee391f03c765b11d69026fd1ab35395dc3802a0", 200000000000000000000),
("0xa192f06ab052d5fd7f94eea8318e827815fe677a", 131400000000000000000),
("0x8f0ab894bd3f4e697dbcfb859d497a9ba195994a", 39501652000000000000000),
("0x387eeafd6b4009deaf8bd5b85a72983a8dcc3487", 4000000000000000000000),
("0x03b0f17cd4469ddccfb7da697e82a91a5f9e7774", 20000000000000000000),
("0x11172b278ddd44eea2fdf4cb1d16962391c453d9", 935900000000000000000000),
("0x33d172ab075c51db1cd40a8ca8dbff0d93b843bb", 5727139000000000000000),
("0x909b5e763a39dcc795223d73a1dbb7d94ca75ac8", 2000000000000000000000),
("0x0ca12ab0b9666cf0cec6671a15292f2653476ab2", 210000600000000000000000),
("0x6b5ae7bf78ec75e90cb503c778ccd3b24b4f1aaf", 800000000000000000000),
("0xd9e3857efd1e202a441770a777a49dcc45e2e0d3", 223500000000000000000),
("0xd703c6a4f11d60194579d58c2766a7ef16c30a29", 2000000000000000000000),
("0x838bd565f99fde48053f7917fe333cf84ad548ab", 200000000000000000000),
("0x8168edce7f2961cf295b9fcd5a45c06cdeda6ef5", 200000000000000000000),
("0xde50868eb7e3c71937ec73fa89dd8b9ee10d45aa", 1000000000000000000000),
("0x087498c0464668f31150f4d3c4bcdda5221ba102", 20000000000000000000),
("0x613fab44b16bbe554d44afd178ab1d02f37aeaa5", 2000000000000000000000),
("0xe2ee691f237ee6529b6557f2fcdd3dcf0c59ec63", 5450048000000000000000),
("0xa9ed377b7d6ec25971c1a597a3b0f3bead57c98f", 400000000000000000000),
("0x175feeea2aa4e0efda12e1588d2f483290ede81a", 200000000000000000000),
("0xb51ddcb4dd4e8ae6be336dd9654971d9fec86b41", 421133000000000000000),
("0x92c0f573eccf62c54810ee6ba8d1f113542b301b", 3384000000000000000000),
("0xa109e18bb0a39c9ef82fa19597fc5ed8e9eb6d58", 1640000000000000000000),
("0xf74e6e145382b4db821fe0f2d98388f45609c69f", 100000000000000000000),
("0x378f37243f3ff0bef5e1dc85eb4308d9340c29f9", 2000200000000000000000),
("0x84e9949680bece6841b9a7e5250d08acd87d16cd", 200000000000000000000),
("0x882bd3a2e9d74110b24961c53777f22f1f46dc5d", 13370000000000000000000),
("0xacce01e0a70610dc70bb91e9926fa9957f372fba", 537000000000000000000),
("0xc5f687717246da8a200d20e5e9bcac60b67f3861", 28650000000000000000),
("0xe14617f6022501e97e7b3e2d8836aa61f0ff2dba", 200000000000000000000),
("0x076ee99d3548623a03b5f99859d2d785a1778d48", 200000000000000000000),
("0x2c424ee47f583cdce07ae318b6fad462381d4d2b", 4000000000000000000000),
("0xf98250730c4c61c57f129835f2680894794542f3", 4000000000000000000000),
("0xed1b24b6912d51b334ac0de6e771c7c0454695ea", 40000000000000000000),
("0xffd5170fd1a8118d558e7511e364b24906c4f6b3", 60085000000000000000),
("0xbf49c14898316567d8b709c2e50594b366c6d38c", 733202000000000000000),
("0x65ea26eabbe2f64ccccfe06829c25d4637520225", 700000000000000000000),
("0x5c5419565c3aad4e714e0739328e3521c98f05cc", 528000000000000000000),
("0xc53b50fd3b2b72bc6c430baf194a515585d3986d", 20000000000000000000),
("0x2b74c373d04bfb0fd60a18a01a88fbe84770e58c", 40000000000000000000),
("0xd97f4526dea9b163f8e8e33a6bcf92fb907de6ec", 284000000000000000000),
("0xa4a49f0bc8688cc9e6dc04e1e08d521026e65574", 200000000000000000000),
("0x575c00c2818210c28555a0ff29010289d3f82309", 10000000000000000000000),
("0x3f1233714f204de9de4ee96d073b368d8197989f", 38606000000000000000),
("0xf964d98d281730ba35b2e3a314796e7b42fedf67", 1543800000000000000000),
("0x1deec01abe5c0d952de9106c3dc30639d85005d6", 2000000000000000000000),
("0x12d60d65b7d9fc48840be5f891c745ce76ee501e", 21359400000000000000000),
("0x5c6136e218de0a61a137b2b3962d2a6112b809d7", 294273000000000000000),
("0xcd43258b7392a930839a51b2ef8ad23412f75a9f", 2000000000000000000000),
("0xdb3f258ab2a3c2cf339c4499f75a4bd1d3472e9e", 1500000000000000000000),
("0x0edd4b580ff10fe06c4a03116239ef96622bae35", 197000000000000000000),
("0x1d157c5876c5cad553c912caf6ce2d5277e05c73", 2000000000000000000000),
("0xcda1b886e3a795c9ba77914e0a2fe5676f0f5ccf", 106024000000000000000),
("0xf50cbafd397edd556c0678988cb2af5c2617e0a2", 716000000000000000000),
("0x327bb49e754f6fb4f733c6e06f3989b4f65d4bee", 20000000000000000000),
("0xc44bdec8c36c5c68baa2ddf1d431693229726c43", 100000000000000000000000),
("0x34e2849bea583ab0cc37975190f322b395055582", 7780340000000000000000),
("0x9221c9ce01232665741096ac07235903ad1fe2fc", 126489000000000000000),
("0xff3ded7a40d3aff0d7a8c45fa6136aa0433db457", 1999800000000000000000),
("0x10b5b34d1248fcf017f8c8ffc408ce899ceef92f", 267400000000000000000),
("0xf1a1f320407964fd3c8f2e2cc8a4580da94f01ea", 2000040000000000000000),
("0x6c800d4b49ba07250460f993b8cbe00b266a2553", 492500000000000000000),
("0xf827d56ed2d32720d4abf103d6d0ef4d3bcd559b", 26265000000000000000),
("0xffb9c7217e66743031eb377af65c77db7359dcda", 40000000000000000000),
("0x530319db0a8f93e5bb7d4dbf4816314fbed8361b", 2000000000000000000000),
("0x9c28a2c4086091cb5da226a657ce3248e8ea7b6f", 280000000000000000000),
("0xdb23a6fef1af7b581e772cf91882deb2516fc0a7", 200000000000000000000),
("0x6636d7ac637a48f61d38b14cfd4865d36d142805", 500000000000000000000),
("0xb3c260609b9df4095e6c5dff398eeb5e2df49985", 254030000000000000000),
("0x58e5c9e344c806650dacfc904d33edba5107b0de", 19100000000000000000),
("0x4f67396d2553f998785f704e07a639197dd1948d", 300080000000000000000),
("0x510d8159cc945768c7450790ba073ec0d9f89e30", 2560000000000000000000),
("0x593c48935beaff0fde19b04d309cd530a28e52ce", 4000000000000000000000),
("0xc27f4e08099d8cf39ee11601838ef9fc06d7fc41", 1790000000000000000000),
("0x07723e3c30e8b731ee456a291ee0e798b0204a77", 2000000000000000000000),
("0x0a652e2a8b77bd97a790d0e91361c98890dbb04e", 1000000000000000000000),
("0x671015b97670b10d5e583f3d62a61c1c79c5143f", 400000000000000000000),
("0x7cc24a6a958c20c7d1249660f7586226950b0d9a", 1970000000000000000000),
("0x6ef9e8c9b6217d56769af97dbb1c8e1b8be799d2", 182000000000000000000),
("0x5c4368918ace6409c79eca80cdaae4391d2b624e", 4000000000000000000000),
("0x043707071e2ae21eed977891dc79cd5d8ee1c2da", 2000000000000000000000),
("0x39bfd978689bec048fc776aa15247f5e1d7c39a2", 20000000000000000000000),
("0x05915d4e225a668162aee7d6c25fcfc6ed18db03", 66348000000000000000),
("0x3f551ba93cd54693c183fb9ad60d65e1609673c9", 2000000000000000000000),
("0xa8c0b02faf02cb5519dda884de7bbc8c88a2da81", 16700000000000000000),
("0xbd0c5cd799ebc48642ef97d74e8e429064fee492", 326000000000000000000),
("0x0a931b449ea8f12cdbd5e2c8cc76bad2c27c0639", 23031000000000000000),
("0x2ea5fee63f337a376e4b918ea82148f94d48a626", 1864242000000000000000),
("0xcc6c2df00e86eca40f21ffda1a67a1690f477c65", 3160000000000000000000),
("0xe5e37e19408f2cfbec83349dd48153a4a795a08f", 4200000000000000000000),
("0xf555a27bb1e2fd4e2cc784caee92939fc06e2fc9", 2000000000000000000000),
("0xdcf9719be87c6f46756db4891db9b611d2469c50", 1000000000000000000000),
("0x8e2f9034c9254719c38e50c9aa64305ed696df1e", 4728000000000000000000),
("0xa01f12d70f44aa7b113b285c22dcdb45873454a7", 18200000000000000000),
("0xbce40475d345b0712dee703d87cd7657fc7f3b62", 7750000000000000000000),
("0xbb19bf91cbad74cceb5f811db27e411bc2ea0656", 17600000000000000000),
("0xacc062702c59615d3444ef6214b8862b009a02ed", 1499936000000000000000),
("0x449ac4fbe383e36738855e364a57f471b2bfa131", 197000000000000000000000),
("0xad59a78eb9a74a7fbdaefafa82eada8475f07f95", 500000000000000000000),
("0x6b6577f3909a4d6de0f411522d4570386400345c", 1880000000000000000000),
("0x79bf2f7b6e328aaf26e0bb093fa22da29ef2f471", 1790000000000000000000),
("0x940f715140509ffabf974546fab39022a41952d2", 1400000000000000000000),
("0x1d572edd2d87ca271a6714c15a3b37761dcca005", 127674000000000000000),
("0xd78ecd25adc86bc2051d96f65364866b42a426b7", 3877300000000000000000),
("0xf9729d48282c9e87166d5eef2d01eda9dbf78821", 99981000000000000000),
("0x17762560e82a93b3f522e0e524adb8612c3a7470", 1000000000000000000000),
("0xd500e4d1c9824ba9f5b635cfa3a8c2c38bbd4ced", 400000000000000000000),
("0xa11effab6cf0f5972cffe4d56596e98968144a8f", 1670000000000000000000),
("0xf64ecf2117931c6d535a311e4ffeaef9d49405b8", 2674000000000000000000),
("0x229cc4711b62755ea296445ac3b77fc633821cf2", 39481000000000000000),
("0xfc989cb487bf1a7d17e4c1b7c4b7aafdda6b0a8d", 20000000000000000000),
("0xea8527febfa1ade29e26419329d393b940bbb7dc", 1999944000000000000000),
("0xbce13e22322acfb355cd21fd0df60cf93add26c6", 200000000000000000000),
("0x19ff244fcfe3d4fa2f4fd99f87e55bb315b81eb6", 200000000000000000000),
("0xd2581a55ce23ab10d8ad8c44378f59079bd6f658", 8800000000000000000000),
("0x4073fa49b87117cb908cf1ab512da754a932d477", 1970000000000000000000),
("0xb6a82933c9eadabd981e5d6d60a6818ff806e36b", 400000000000000000000),
("0xc79806032bc7d828f19ac6a640c68e3d820fa442", 20000000000000000000),
("0x577b2d073c590c50306f5b1195a4b2ba9ecda625", 373600000000000000000),
("0x7f13d760498d7193ca6859bc95c901386423d76c", 5000000000000000000000),
("0x416784af609630b070d49a8bcd12235c6428a408", 20000000000000000000000),
("0xfbe71622bcbd31c1a36976e7e5f670c07ffe16de", 400000000000000000000),
("0xa5698035391e67a49013c0002079593114feb353", 240000000000000000000),
("0xab2871e507c7be3965498e8fb462025a1a1c4264", 775000000000000000000),
("0x9c78fbb4df769ce2c156920cfedfda033a0e254a", 1970000000000000000000),
("0x95e6f93dac228bc7585a25735ac2d076cc3a4017", 6000000000000000000000),
("0x3c1f91f301f4b565bca24751aa1f761322709ddd", 1790000000000000000000),
("0xf77f9587ff7a2d7295f1f571c886bd33926a527c", 1999800000000000000000),
("0x755f587e5efff773a220726a13d0f2130d9f896b", 1000000000000000000000),
("0x8c6aa882ee322ca848578c06cb0fa911d3608305", 600000000000000000000),
("0x492cb5f861b187f9df21cd4485bed90b50ffe22d", 499928000000000000000),
("0x95a577dc2eb3ae6cb9dfc77af697d7efdfe89a01", 136000000000000000000),
("0x4173419d5c9f6329551dc4d3d0ceac1b701b869e", 88000000000000000000),
("0x456ae0aca48ebcfae166060250525f63965e760f", 300000000000000000000),
("0x81f8de2c283d5fd4afbda85dedf9760eabbbb572", 3000000000000000000000),
("0xcd0af3474e22f069ec3407870dd770443d5b12b0", 2626262000000000000000),
("0x283c2314283c92d4b064f0aef9bb5246a7007f39", 200000000000000000000),
("0x29b3f561ee7a6e25941e98a5325b78adc79785f3", 100000000000000000000),
("0xcd4306d7f6947ac1744d4e13b8ef32cb657e1c00", 499986000000000000000),
("0xd9ec2efe99ff5cf00d03a8317b92a24aef441f7e", 2000000000000000000000),
("0x83dbf8a12853b40ac61996f8bf1dc8fdbaddd329", 970000000000000000000),
("0x9d93fab6e22845f8f45a07496f11de71530debc7", 1998000000000000000000),
("0xfd204f4f4aba2525ba728afdf78792cbdeb735ae", 2000000000000000000000),
("0x99fad50038d0d9d4c3fbb4bce05606ecadcd5121", 2000000000000000000000),
("0xd206aaddb336d45e7972e93cb075471d15897b5d", 600000000000000000000),
("0x428a1ee0ed331d7952ccbe1c7974b2852bd1938a", 2208370000000000000000),
("0x690228e4bb12a8d4b5e0a797b0c5cf2a7509131e", 1880000000000000000000),
("0xfa3a1aa4488b351aa7560cf5ee630a2fd45c3222", 878850000000000000000),
("0x0372e852582e0934344a0fed2178304df25d4628", 20000000000000000000000),
("0x35ea2163a38cdf9a123f82a5ec00258dae0bc767", 4000000000000000000000),
("0xd1fed0aee6f5dfd7e25769254c3cfad15adeccaa", 730000000000000000000),
("0xc05b740620f173f16e52471dc38b9c514a0b1526", 140000000000000000000),
("0x87e3062b2321e9dfb0875ce3849c9b2e3522d50a", 10000000000000000000000),
("0x303fbaebbe46b35b6e5b74946a5f99bc1585cae7", 878148000000000000000),
("0xe7a8e471eafb798f4554cc6e526730fd56e62c7d", 1000000000000000000000),
("0xad7dd053859edff1cb6f9d2acbed6dd5e332426f", 1970000000000000000000),
("0xdc4345d6812e870ae90c568c67d2c567cfb4f03c", 6700000000000000000000),
("0xa6a08252c8595177cc2e60fc27593e2379c81fb1", 20055000000000000000),
("0xa9af21acbe482f8131896a228036ba51b19453c3", 49999000000000000000),
("0x86e3fe86e93da486b14266eadf056cbfa4d91443", 2000000000000000000000),
("0x744b03bba8582ae5498e2dc22d19949467ab53fc", 500000000000000000000),
("0xd3118ea3c83505a9d893bb67e2de142d537a3ee7", 20000000000000000000),
("0xb32f1c2689a5ce79f1bc970b31584f1bcf2283e7", 20000000000000000000),
("0x4828e4cbe34e1510afb72c2beeac8a4513eaebd9", 3940000000000000000000),
("0xb07bcc085ab3f729f24400416837b69936ba8873", 2000140000000000000000),
("0xbdc74873af922b9df474853b0fa7ff0bf8c82695", 3999000000000000000000),
("0x15ebd1c7cad2aff19275c657c4d808d010efa0f5", 200550000000000000000),
("0xcbc04b4d8b82caf670996f160c362940d66fcf1a", 6000000000000000000000),
("0x8197948121732e63d9c148194ecad46e30b749c8", 4000000000000000000000),
("0x69797bfb12c9bed682b91fbc593591d5e4023728", 10000000000000000000000),
("0xbe9b8c34b78ee947ff81472eda7af9d204bc8466", 150000000000000000000),
("0xdf3f57b8ee6434d047223def74b20f63f9e4f955", 250500000000000000000),
("0xa3ae1879007d801cb5f352716a4dd8ba2721de3d", 200000000000000000000000),
("0xcb4bb1c623ba28dc42bdaaa6e74e1d2aa1256c2a", 1999944000000000000000),
("0xe03c00d00388ecbf4f263d0ac778bb41a57a40d9", 1000072000000000000000),
("0xfc2c1f88961d019c3e9ea33009152e0693fbf88a", 8000000000000000000000),
("0x8599cbd5a6a9dcd4b966be387d69775da5e33c6f", 58180000000000000000000),
("0xb7a31a7c38f3db09322eae11d2272141ea229902", 2000000000000000000000),
("0x231a15acc199c89fa9cb22441cc70330bdcce617", 500000000000000000000),
("0x3fbed6e7e0ca9c84fbe9ebcf9d4ef9bb49428165", 2000000000000000000000),
("0x92cfd60188efdfb2f8c2e7b1698abb9526c1511f", 2000000000000000000000),
("0x5c936f3b9d22c403db5e730ff177d74eef42dbbf", 75000000000000000000),
("0x931fe712f64207a2fd5022728843548bfb8cbb05", 2000000000000000000000),
("0x08d54e83ad486a934cfaeae283a33efd227c0e99", 1039000000000000000000),
("0xa339a3d8ca280e27d2415b26d1fc793228b66043", 1013600000000000000000),
("0x581f34b523e5b41c09c87c298e299cbc0e29d066", 1131607000000000000000),
("0xcaaa68ee6cdf0d34454a769b0da148a1faaa1865", 7216000000000000000000),
("0x0838a7768d9c2aca8ba279adfee4b1f491e326f1", 200000000000000000000),
("0xdde77a4740ba08e7f73fbe3a1674912931742eeb", 19867021000000000000000),
("0xcbe810fe0fecc964474a1db97728bc87e973fcbd", 10000000000000000000000),
("0x86c28b5678af37d727ec05e4447790f15f71f2ea", 200000000000000000000),
("0xdd6c062193eac23d2fdbf997d5063a346bb3b470", 20000000000000000000),
("0x5975b9528f23af1f0e2ec08ac8ebaa786a2cb8e0", 345827000000000000000),
("0xe29d8ae452dcf3b6ac645e630409385551faae0a", 80276000000000000000),
("0x2fbc85798a583598b522166d6e9dda121d627dbc", 200000000000000000000),
("0x7a36aba5c31ea0ca7e277baa32ec46ce93cf7506", 20000000000000000000000),
("0xdbcbcd7a57ea9db2349b878af34b1ad642a7f1d1", 200000000000000000000),
("0x92aae59768eddff83cfe60bb512e730a05a161d7", 1708015000000000000000),
("0xa5e93b49ea7c509de7c44d6cfeddef5910deaaf2", 2000000000000000000000),
("0xe33d980220fab259af6a1f4b38cf0ef3c6e2ea1a", 2000000000000000000000),
("0x8ed0af11ff2870da0681004afe18b013f7bd3882", 4000000000000000000000),
("0xf23e5c633221a8f7363e65870c9f287424d2a960", 1380000000000000000000),
("0x96334bfe04fffa590213eab36514f338b864b736", 400000000000000000000),
("0xfa1f1971a775c3504fef5079f640c2c4bce7ac05", 2000000000000000000000),
("0xdf44c47fc303ac76e74f97194cca67b5bb3c023f", 591000000000000000000),
("0x4b74f5e58e2edf76daf70151964a0b8f1de0663c", 324020000000000000000),
("0xe38b91b35190b6d9deed021c30af094b953fdcaa", 33340000000000000000),
("0x6b38de841fad7f53fe02da115bd86aaf662466bd", 1730000000000000000000),
("0x11675a25554607a3b6c92a9ee8f36f75edd3e336", 159800000000000000000),
("0x0ba8705bf55cf219c0956b5e3fc01c4474a6cdc1", 94963000000000000000),
("0x0f05f120c89e9fbc93d4ab0c5e2b4a0df092b424", 30000000000000000000000),
("0xfdd1195f797d4f35717d15e6f9810a9a3ff55460", 18200000000000000000),
("0x63a61dc30a8e3b30a763c4213c801cbf98738178", 1000000000000000000000),
("0xe5bdf34f4ccc483e4ca530cc7cf2bb18febe92b3", 126260000000000000000),
("0xd6e09e98fe1300332104c1ca34fbfac554364ed9", 2000000000000000000000),
("0x5bd6862d517d4de4559d4eec0a06cad05e2f946e", 200000000000000000000),
("0x7294ec9da310bc6b4bbdf543b0ef45abfc3e1b4d", 22000000000000000000000),
("0xae34861d342253194ffc6652dfde51ab44cad3fe", 466215000000000000000),
("0xf50ae7fab4cfb5a646ee04ceadf9bf9dd5a8e540", 3999952000000000000000),
("0xdd2bdfa917c1f310e6fa35aa8af16939c233cd7d", 400000000000000000000),
("0xe0060462c47ff9679baef07159cae08c29f274a9", 2000000000000000000000),
("0xb7d12e84a2e4c4a6345af1dd1da9f2504a2a996e", 200000000000000000000),
("0xf5500178cb998f126417831a08c2d7abfff6ab5f", 1308923000000000000000),
("0xfd377a385272900cb436a3bb7962cdffe93f5dad", 2000000000000000000000),
("0xa4a83a0738799b971bf2de708c2ebf911ca79eb2", 600000000000000000000),
("0x52a5e4de4393eeccf0581ac11b52c683c76ea15d", 19999800000000000000000),
("0xb07fdeaff91d4460fe6cd0e8a1b0bd8d22a62e87", 5260000000000000000000),
("0x35f5860149e4bbc04b8ac5b272be55ad1aca58e0", 200000000000000000000),
("0xfb135eb15a8bac72b69915342a60bbc06b7e077c", 20000000000000000000000),
("0x02d4a30968a39e2b3498c3a6a4ed45c1c6646822", 2000000000000000000000),
("0xe44b7264dd836bee8e87970340ed2b9aed8ed0a5", 5772100000000000000000),
("0xe90a354cec04d69e5d96ddc0c5138d3d33150aa0", 499971000000000000000),
("0x693d83be09459ef8390b2e30d7f7c28de4b4284e", | |
result.shape[:2]
bottom = result[row-2:row, 0:col]
bordersize = 5
result_border = cv2.copyMakeBorder(
result,
top = bordersize,
bottom = bordersize,
left = bordersize,
right = bordersize,
borderType = cv2.BORDER_CONSTANT, value = [0,0,0] )
# same as above
row, col= result_clean.shape[:2]
bottom = result_clean[row-2:row, 0:col]
bordersize = 5
result_clean_border = cv2.copyMakeBorder(
result_clean,
top = bordersize,
bottom = bordersize,
left = bordersize,
right = bordersize,
borderType = cv2.BORDER_CONSTANT, value = [0,0,0] )
# vertically concatenate the matchesDrawn and clean images created before.
result_vertical_concat = np.concatenate(
(result_border, result_clean_border),
axis = 0)
# Take the image_destination and turn it into a Path object.
# Then add the image names to the new path.
# # TODO: For some reason it says the 'image_destination' object is
# a str type at this point in the program even though it is not.
# Look into why.
image_path = image_destination.joinpath(str(len(good_points)) +
"___" +
re.sub(".jpg", "", os.path.basename(primary_image.image_title)) +
"___" +
re.sub(".jpg", ".JPG", os.path.basename(secondary_image.image_title))
)
# Finally, write the finished image to the output folder.
cv2.imwrite(str(image_path), result_vertical_concat, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
################################################################################
def score_boosting(primary_image, secondary_image, good_points, parameters):
'uses image characteristics to boost scores'
score = len(good_points)
if (primary_image.station == secondary_image.station):
if (primary_image.camera == secondary_image.camera):
if (primary_image.date == secondary_image.date):
score = score * float(parameters['score_boosting']['date_score'])
else:
score = score * float(parameters['score_boosting']['camera_score'])
else:
score = score * float(parameters['score_boosting']['station_score'])
return score
################################################################################
def make_lut_u():
return np.array([[[i,255-i,0] for i in range(256)]],dtype=np.uint8)
def make_lut_v():
return np.array([[[0,255-i,i] for i in range(256)]],dtype=np.uint8)
################################################################################
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the variance
# of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
################################################################################
def histogram_equalization(image):
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
hist_image = cv2.equalizeHist(gray)
print("done with equalization")
return hist_image
################################################################################
def edge_sharpening(image):
kernel = np.array([[-1,-1,-1,],[-1,9,-1],[-1,-1,-1]])
sharp_image = cv2.filter2D(np.asarray(image), -1, kernel)
return sharp_image
################################################################################
def red(intensity):
iI = intensity
i_min = 86
i_max = 230
o_min = 0
o_max = 255
#io = ((iI-i_min)/(i_max-i_min))*255
io = (iI-i_min)*(((o_max-o_min)/(i_max - i_min)) + o_min)
return io
def green(intensity):
iI = intensity
i_min = 90
i_max = 225
o_min = 0
o_max = 255
#io = ((iI-i_min)/(i_max-i_min))*255
io = (iI-i_min)*(((o_max-o_min)/(i_max - i_min)) + o_min)
return io
def blue(intensity):
iI = intensity
i_min = 100
i_max = 210
o_min = 0
o_max = 255
#io = ((iI-i_min)/(i_max-i_min))*255
io = (iI-i_min)*(((o_max-o_min)/(i_max - i_min)) + o_min)
return io
def filter_images(primary_image,image_source,edited_source):
img_yuv = cv2.cvtColor(primary_image, cv2.COLOR_BGR2YUV)
y, u, v = cv2.split(img_yuv)
lut_u, lut_v = make_lut_u(), make_lut_v()
y = cv2.cvtColor(y, cv2.COLOR_GRAY2BGR)
u = cv2.cvtColor(u, cv2.COLOR_GRAY2BGR)
v = cv2.cvtColor(v, cv2.COLOR_GRAY2BGR)
min = 125
max = 130
i = 530
#Only checking mid range of each photo for variance from YUV range
while(i < 550):
if(u[i][1][1] < 128 or u[i][1][1]>128):
#Blur score
gray = cv2.cvtColor(primary_image, cv2.COLOR_BGR2GRAY)
threshold = variance_of_laplacian(gray)
if(threshold < 1500):
print("not blurry")
else:
print("blurry")
else:
flag = 1;
print("night")
# img = Image.open(image_source)
# multi = img.split()
# red_band = multi[0].point(red)
# green_band = multi[1].point(green)
# blue_band = multi[2].point(blue)
# normal_img = Image.merge("RGB",(red_band, green_band, blue_band))
# #normal_img.show()
# save_image = normal_img
# save_image.save(image_source, "JPEG")
#normal_img.save(image_source, "JPEG")
save_image = np.copy(primary_image)
image_path = image_source
image_path = image_path.split("\\")
num = image_path.index('images')
# Writing original image to folder and editing copy
edited_source = edited_source + "\\" + image_path[num+1]
cv2.imwrite(edited_source,primary_image)
sharp_image = edge_sharpening(save_image)
hist_image = histogram_equalization(sharp_image)
cv2.imwrite(image_source,hist_image)
#sharp_image = edge_sharpening(normal_img)
# cv2.imshow("final image", np.asarray(hist_image))
# cv2.waitKey(0)
break
i += 1
################################################################################
def call_cluster(arr):
# Normalizing the descriptor values before being sent to MCL
max_num = arr.max()
arr = arr/max_num
markov_cluster(arr)
################################################################################
def match(primary_images, secondary_images, image_destination,
start_i, score_matrix, write_threshold, parameters):
'main function used for determining matches between two images.'
'Finds the sift keypoints/descriptors and uses a KNN based matcher'
'to filter out bad keypoints. Writes final output to score_matrix'
# Begin loop on the primary imags to match. Due to multithreading of the
# program this may not be the full set of images.
descriptors = []
descriptors_MCL = []
distances = []
clustering_counter = 0
primary_counter = 0
count = 0
for primary_count in range(len(primary_images)):
primary_counter = primary_counter + 1
num_desc = 0
countDesctoPrimary = 0
new_arr = 0
new_arr = np.asarray(new_arr)
print("\t\tMatching: " + os.path.basename(primary_images[primary_count].image_title) + "\n")
# create mask from template and place over image to reduce ROI
mask_1 = cv2.imread(primary_images[primary_count].template_title, -1)
mySift = cv2.xfeatures2d.SIFT_create()
kp_1, desc_1 = mySift.detectAndCompute(primary_images[primary_count].image, mask_1)
for i in desc_1:
descriptors.append(i)
# paramter setup and create nearest nieghbor matcher
index_params = dict(algorithm = 0, trees = 5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
count = count + 1
# Begin nested loopfor the images to be matched to. This secondary loop
# will always iterate over the full dataset of images.
for secondary_count in range(len(secondary_images)):
countDesctoPrimary = countDesctoPrimary + 1
# check if same image; if not, go into sophisticated matching
if primary_images[primary_count].image_title != secondary_images[secondary_count].image_title:
# create mask from template
mask_2 = cv2.imread(secondary_images[secondary_count].template_title, -1)
kp_2, desc_2 = mySift.detectAndCompute(secondary_images[secondary_count].image, mask_2)
# check for matches
try:
# Check for similarities between pairs
matches = flann.knnMatch(desc_1, desc_2, k=2)
# Use Lowe's ratio test
good_points = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good_points.append(m)
descriptors_MCL.append(desc_1[primary_count])
# RANSAC
if (int(parameters['config']['ransac'])):
src_pts = np.float32([ kp_1[m.queryIdx].pt for m in good_points ]).reshape(-1,1,2)
dst_pts = np.float32([ kp_2[m.trainIdx].pt for m in good_points ]).reshape(-1,1,2)
# used to detect bad keypoints
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h,w = primary_images[primary_count].image.shape[1], primary_images[primary_count].image.shape[0]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
# take smallest number of keypoints between two images
number_keypoints = 0
if len(kp_1) <= len(kp_2):
number_keypoints = len(kp_1)
else:
number_keypoints = len(kp_2)
# score boosting
score = score_boosting(primary_images[primary_count],
secondary_images[secondary_count], good_points, parameters)
# add the number of good points to score_matrix. start_i is
# passed in as a paramter to ensure that the correct row of the
# score matrix is being written to. Give this index the number
# of 'good_points' from the output of the KNN matcher.
score_matrix[start_i + primary_count][secondary_count] = score
# only do image processing if number of good points
# exceeeds threshold
if len(good_points) > write_threshold:
write_matches(kp_1, kp_2, good_points,
primary_images[primary_count], secondary_images[secondary_count],
image_destination)
except cv2.error as e:
print('\n\t\tERROR: {0}\n'.format(e))
print("\t\tError matching: " + os.path.basename(primary_images[primary_count].image_title) +
" and " + os.path.basename(secondary_images[secondary_count].image_title) + "\n")
final_array = np.asarray(descriptors_MCL)
x,y = final_array.shape
size = x-y
one_array = np.ones((x,size))
resize_amt = math.floor(math.sqrt(x*y))
new_array = np.resize(final_array,(resize_amt,resize_amt))
call_cluster(new_array)
# Markov clustering with one filled array to square matrix
mark_array = np.hstack((final_array, one_array))
#markov_cluster(mark_array)
return score_matrix
################################################################################
def slice_generator(
sequence_length,
n_blocks):
""" Creates a generator to get start/end indexes for dividing a
sequence_length into n blocks
"""
return ((int(round((b - 1) * sequence_length/n_blocks)),
int(round(b * sequence_length/n_blocks)))
for b in range(1, n_blocks+1))
################################################################################
def match_multi(primary_images, image_destination, n_threads, write_threshold, parameters):
'Wrapper function for the "match". This also controls the multithreading'
'if the user has declared to use multiple threads'
# deep copy the primary_images for secondary images
secondary_images = deepcopy(primary_images)
# init score_matrix
num_pictures = len(primary_images)
score_matrix = np.zeros(shape = (num_pictures, num_pictures))
# prep for multiprocessing; slices is a 2D array that specifies the
# start and end array index for each program thread about to be created
slices = slice_generator(num_pictures, n_threads)
thread_list = list()
print("\tImages to pattern match: {0}\n".format(str(num_pictures)))
# start threading
for i, (start_i, end_i) in enumerate(slices):
thread = threading.Thread(target = match,
args = (primary_images[start_i: end_i],
secondary_images,
image_destination,
start_i,
score_matrix,
write_threshold,
parameters))
thread.start()
thread_list.append(thread)
for thread in thread_list:
thread.join()
return score_matrix
################################################################################
def add_cat_ID(rec_list, cluster_path):
# create the list
import pandas as pd
csv_file = pd.read_csv(cluster_path)
image_names = list(csv_file['Image Name'])
cat_ID_list = list(csv_file['Cat ID'])
for count in range(len(rec_list)):
image = os.path.basename(rec_list[count].image_title)
try:
image_index = image_names.index(image)
except ValueError:
print('\tSomething is wrong with cluster_table file. Image name is not present.')
cat_ID = cat_ID_list[image_index]
rec_list[count].add_cat_ID(cat_ID)
return rec_list
################################################################################
def crop(event, x, y, flags, param):
global ref_points, cropping
if event == cv2.EVENT_LBUTTONDOWN:
ref_points = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
ref_points.append((x, y))
cropping = False
cv2.rectangle(param, ref_points[0], ref_points[1], (0, 255, 0), 2)
cv2.imshow("image", param)
################################################################################
def manual_roi(rec_list, image_source):
| |
from builtins import str
from past.builtins import basestring
from builtins import object
from django.conf import settings
from django.core.exceptions import FieldError, ValidationError as DjangoValidationError
from django.template.loader import render_to_string
from django.db.models import Q
import re
from rest_framework import filters
from rest_framework.exceptions import ValidationError
from collections import OrderedDict
DEFAULT_IGNORE = [
settings.REST_FRAMEWORK['ORDERING_PARAM'],
'fields',
'page',
'page_size',
'format',
'q',
'stream',
]
class FilterableField(object):
'''
Describes a field that can be filtered on.
name - the field name to use in the API.
source - the field name in the model, in case it is different than the name.
it can also be a callable f(field, orm_operator, value) --> Q object
converter - an optional function to modify the given filter value before passing it to the queryset.
datatype - the type of values that are expected for this field.
advanced - if true, the filter will be used only by InfinidatFilter and not by SimpleFilter
'''
STRING = 'string'
INTEGER = 'integer'
FLOAT = 'float'
BOOLEAN = 'boolean'
DATETIME = 'datetime'
CAPACITY = 'capacity'
_datatypes = (STRING, INTEGER, FLOAT, BOOLEAN, DATETIME, CAPACITY)
def __init__(self, name, source=None, converter=None, datatype=STRING, advanced=False):
assert datatype in FilterableField._datatypes, 'Invalid datatype "%s"' % datatype
assert not (datatype == FilterableField.CAPACITY and converter), 'Converter is not supported for capacity fields'
self.name = name
self.source = source or name
self.converter = converter or (lambda value: value)
self.datatype = datatype
self.advanced = advanced
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def __repr__(self):
return '<FilterableField name=%s source=%s datatype=%s>' % (self.name, self.source, self.datatype)
def convert(self, value):
if self.datatype == FilterableField.CAPACITY:
return _convert_capacity(value)
if isinstance(value, list):
return [self.converter(v) for v in value]
else:
return self.converter(value)
def build_q(self, orm_operator, value):
value = self.convert(value)
if hasattr(self.source, '__call__'):
return self.source(self, orm_operator, value)
return Q(**{self.source + '__' + orm_operator: value})
@classmethod
def for_model(cls, model_cls):
'''
Generate a list of filterable fields automatically for the given model class.
Includes model fields of the following types: CharField, TextField, IntegerField, AutoField,
DateField, DateTimeField, FloatField, DecimalField, BooleanField, NullBooleanField.
'''
from django.db.models.fields import CharField, TextField, IntegerField, AutoField, DateField, DateTimeField
from django.db.models.fields import FloatField, DecimalField, BooleanField, NullBooleanField
filterable_fields = []
for field in model_cls._meta.get_fields():
datatype = None
if isinstance(field, CharField) or isinstance(field, TextField):
datatype = FilterableField.STRING
elif isinstance(field, IntegerField) or isinstance(field, AutoField):
datatype = FilterableField.INTEGER
elif isinstance(field, FloatField) or isinstance(field, DecimalField):
datatype = FilterableField.FLOAT
elif isinstance(field, BooleanField) or isinstance(field, NullBooleanField):
datatype = FilterableField.BOOLEAN
elif isinstance(field, DateField) or isinstance(field, DateTimeField):
datatype = FilterableField.DATETIME
if datatype:
filterable_fields.append(cls(field.name, datatype=datatype))
return filterable_fields
def _convert_capacity(values):
'''
Converts values such as "100 GB" or "1TiB" to bytes. Unitless values are
interpreted as bytes. If there's a list of values and the last one has units,
the units are applied to any unitless values. For example "1GB,2,3,4TB" is
interpreted as "1GB,2TB,3TB,4TB". Units are case-sensitive.
'''
from capacity import capacity
# Check if we got a single value or multiple values
single_value = (not isinstance(values, list))
if single_value:
values = [values]
# If the last value has a unit, use it as the default for values that don't
default_units = 'byte'
for units in capacity._KNOWN_CAPACITIES:
if values[-1].endswith(units):
default_units = units
break
# Parse each value to a Capacity instance
capacities = []
for v in values:
v = v.strip()
# If v is a plain number without units, use the default units
try:
float(v)
v += default_units
except ValueError:
pass
# Parse the capacity
try:
capacities.append(capacity.from_string(v))
except:
expected = ', '.join(capacity._KNOWN_CAPACITIES.keys())
raise ValidationError('Invalid capacity value "%s", supported units are: %s' % (v, expected))
# Convert to bytes
byte_values = [int(c / capacity.byte) for c in capacities]
return byte_values[0] if single_value else byte_values
class Operator(object):
def __init__(self, name, orm_operator, description='', negate=False, min_vals=1, max_vals=1, boolean=False, title=None):
self.name = name
self.orm_operator = orm_operator
self.description = description
self.negate = negate
self.min_vals = min_vals
self.max_vals = max_vals
self.boolean = boolean
self.title = title or name
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def get_expected_value_description(self):
if self.boolean:
return 'a single boolean value: 0 or 1'
if self.max_vals == 1:
return 'a single value'
if self.max_vals == self.min_vals:
return 'a list of exactly {} values'.format(self.max_vals)
return 'a list of {}-{} values'.format(self.min_vals, self.max_vals)
def _get_filterable_fields(view):
'''
Get the list of filterable fields for the given view, or deduce them
from the serializer fields.
'''
serializer = view.get_serializer()
if hasattr(serializer, 'get_filterable_fields'):
return serializer.get_filterable_fields()
# Autodetect filterable fields
ret = []
for field_name, field in serializer.fields.items():
if not getattr(field, 'write_only', False) and not field.source == '*':
source = (field.source or field_name).replace('.', '__')
ret.append(FilterableField(field_name, source, datatype=_get_field_type(field)))
return ret
def _get_field_type(serializer_field):
'''
Determine the appropriate FilterableField type for the given serializer field.
'''
from rest_framework.fields import BooleanField, IntegerField, FloatField, DecimalField, DateTimeField
from rest_framework.relations import PrimaryKeyRelatedField
if isinstance(serializer_field, BooleanField):
return FilterableField.BOOLEAN
if isinstance(serializer_field, (IntegerField, PrimaryKeyRelatedField)):
return FilterableField.INTEGER
if isinstance(serializer_field, (FloatField, DecimalField)):
return FilterableField.FLOAT
if isinstance(serializer_field, DateTimeField):
return FilterableField.DATETIME
return FilterableField.STRING
def _parse_array(expr):
'''
Parse an array expression such as "a,b" or "[1,2,3]"
'''
expr = expr.strip()
if (expr.startswith('(') and expr.endswith(')')) or expr.startswith('[') and expr.endswith(']'):
expr = expr[1:-1]
if not expr:
return []
return [x.strip() for x in expr.split(',')]
def _normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
'''
Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
class InfinidatFilter(filters.BaseFilterBackend):
'''
Implements a filter backend that uses Infinidat's API syntax.
The serializer in use can implement get_filterable_fields that
returns a list of FilterableField instances. If not, the list
will be generated automatically from the serializer fields.
'''
def get_filter_description(self, view, html):
if not html:
return None
filterable_fields = _get_filterable_fields(view)
if not filterable_fields:
return None
active_filters = [(f.name, view.request.GET[f.name]) for f in filterable_fields if f.name in view.request.GET]
context = dict(
fields=filterable_fields,
operators=self._get_operators(),
active_filters=active_filters,
url=view.request.build_absolute_uri(view.request.path)
)
return render_to_string('django_rest_utils/infinidat_filter.html', context)
def filter_queryset(self, request, queryset, view):
filterable_fields = _get_filterable_fields(view)
ignored_fields = self._get_ignored_fields(view)
for field_name in request.GET.keys():
if field_name in ignored_fields:
continue
field = None
for f in filterable_fields:
if field_name == f.name:
field = f
break
if not field:
names = [f.name for f in filterable_fields]
raise ValidationError("Unknown filter field: '%s' (choices are %s)" % (field_name, ', '.join(names)))
for expr in request.GET.getlist(field_name):
queryset = self._apply_filter(queryset, field, expr)
return queryset
def _get_ignored_fields(self, view):
return getattr(view, 'non_filtering_fields', DEFAULT_IGNORE)
def _get_operators(self):
return [
Operator('eq', 'exact', 'field = value'),
Operator('ne', 'exact', 'field <> value', negate=True),
Operator('lt', 'lt', 'field < value'),
Operator('le', 'lte', 'field <= value'),
Operator('gt', 'gt', 'field > value'),
Operator('ge', 'gte', 'field >= value'),
Operator('like', 'icontains', 'field contains a string (case insensitive)'),
Operator('unlike', 'icontains', 'field does not contain a string (case insensitive)', negate=True),
Operator('in', 'in', 'field is equal to one of the given values', max_vals=1000),
Operator('out', 'in', 'field is not equal to any of the given values', negate=True, max_vals=1000),
Operator('between', 'range', 'field is in a range of two values (inclusive)', min_vals=2, max_vals=2),
Operator('isnull', 'isnull', 'field is null', boolean=True, max_vals=0, title='is null'),
Operator('isnotnull', 'isnull', 'field is not null', boolean=True, max_vals=0, negate=True, title='is not null')
]
def _apply_filter(self, queryset, field, expr):
q, negate = self._build_q(field, expr)
try:
return queryset.exclude(q).distinct() if negate else queryset.filter(q).distinct()
except (ValueError, DjangoValidationError, FieldError):
raise ValidationError(field.name + ': the given operator or value are inappropriate for this field')
def _build_q(self, field, expr):
# Get operator and value
operators = self._get_operators()
if ':' in expr:
opname, value = expr.split(':', 1)
try:
[operator] = [operator for operator in operators if operator.name == opname]
except ValueError:
raise ValidationError('{}: unknown operator "{}"'.format(field.name, opname))
else:
operator = operators[0] # eq operator is the default one
value = expr
# Build Q object
if operator.max_vals > 1:
vals = _parse_array(value)
# Validate that the correct number of values is provided
if len(vals) < operator.min_vals or len(vals) > operator.max_vals:
raise ValidationError('{}: "{}" operator expects {}'.format(
field.name, operator.name, operator.get_expected_value_description()))
q = field.build_q(operator.orm_operator, vals)
else:
if operator.boolean:
try:
value = int(value)
except ValueError:
raise ValidationError('{}: "{}" operator expects {}'.format(
field.name, operator.name, operator.get_expected_value_description()))
q = field.build_q(operator.orm_operator, value)
return (q, operator.negate)
class SimpleFilter(object):
def | |
"""
*
* Copyright 2010-2020 Australian Signals Directorate
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
"""
import os
import json
import zipfile
from os import path
from django.db.models import signals
from rest_framework import permissions, generics
from rest_framework.decorators import api_view
from rest_framework.response import Response
from app.models import AttribType, AttribTypeChoice, attrib_str_to_value
from app.models import Schema, SchemaAttribDefGraph, SchemaAttribDefVertex, SchemaAttribDefTrans
from app.models import Graph, GraphAttrib, GraphAttribDefGraph, GraphAttribDefVertex, GraphAttribDefTrans
from app.models import Vertex, VertexAttrib, Transaction, TransactionAttrib
from app.serializers import AttribTypeSerializer, SchemaSerializer
from app.serializers import SchemaAttribDefGraphSerializer, SchemaAttribDefVertexSerializer, SchemaAttribDefTransSerializer
from app.serializers import GraphSerializer, GraphJsonSerializer, GraphAttribSerializer
from app.serializers import GraphAttribDefGraphSerializer, GraphAttribDefVertexSerializer, GraphAttribDefTransSerializer
from app.serializers import VertexSerializer, VertexAttribSerializer
from app.serializers import TransactionSerializer, TransactionAttribSerializer
from app.serializers import GraphJsonVertexesSerializer, GraphJsonTransactionsSerializer
from websockets.consumers import *
# <editor-fold Constants">
ATTRIBUTE_EDITOR_KEY_GRAPH_ID = 'graph_id' # Key used in POST body to identify parent graph ID
ATTRIBUTE_EDITOR_KEY_VERTEX_ID = 'vx_id' # Key used in POST body to identify vertex ID (per graph)
ATTRIBUTE_EDITOR_KEY_TRANSACTION_ID = 'tx_id' # Key used in POST body to identify transaction ID (per graph)
# </editor-fold>
# Configuration of how many records (ie vertexes, transactions, vertex
# attributes, or transaction attributes) should be sent in a hit to the
# database for creation.
IMPORT_BATCH_SIZE = 2000
# <editor-fold Common functions">
class InvalidTypeException(Exception):
"""
Bespoke exception thrown if object passed to method is of incorrect type.
"""
pass
def __update_graph_attribute(graph_attribute, value):
"""
Update supplied graph attribute to the given value. Changes will also be
reflected back into enclosing graph JSON.
:param graph_attribute: Object to update
:param value: Value to use
"""
if not isinstance(graph_attribute, GraphAttrib):
raise InvalidTypeException("Supplied object is not GraphAttrib")
attrib_type = graph_attribute.attrib_fk.type_fk.raw_type
if attrib_type == AttribTypeChoice.DICT:
graph_attribute.value_str = json.dumps(value)
else:
graph_attribute.value_str = str(value)
graph_attribute.save()
graph = graph_attribute.graph_fk
graph_attribute_json = json.loads(graph.attribute_json)
graph_attribute_json[graph_attribute.attrib_fk.label] = \
attrib_str_to_value(attrib_type, value)
graph.attribute_json = json.dumps(graph_attribute_json)
signals.post_save.disconnect(graph_saved, sender=Graph)
graph.save()
signals.post_save.connect(graph_saved, sender=Graph)
def __update_vertex_attribute(vertex_attribute, value):
"""
Update supplied vertex attribute to the given value. Changes will also be
reflected back into enclosing vertex JSON.
:param vertex_attribute: Object to update
:param value: Value to use
"""
if not isinstance(vertex_attribute, VertexAttrib):
raise InvalidTypeException("Supplied object is not VertexAttrib")
attrib_type = vertex_attribute.attrib_fk.type_fk.type
if attrib_type == AttribTypeChoice.DICT:
vertex_attribute.value_str = json.dumps(value)
else:
vertex_attribute.value_str = str(value)
vertex_attribute.save()
vertex = vertex_attribute.vertex_fk
vertex_attribute_json = json.loads(vertex.attribute_json)
vertex_attribute_json[vertex_attribute.attrib_fk.label] = \
attrib_str_to_value(attrib_type, value)
vertex.attribute_json = json.dumps(vertex_attribute_json)
vertex.save()
def __update_transaction_attribute(transaction_attribute, value):
"""
Update supplied transaction attribute to the given value. Changes will also
be reflected back into enclosing transaction JSON.
:param transaction_attribute: Object to update
:param value: Value to use
"""
if not isinstance(transaction_attribute, TransactionAttrib):
raise InvalidTypeException("Supplied object is not TransactionAttrib")
attrib_type = transaction_attribute.attrib_fk.type_fk.type
if attrib_type == AttribTypeChoice.DICT:
transaction_attribute.value_str = json.dumps(value)
else:
transaction_attribute.value_str = str(value)
transaction_attribute.save()
transaction = transaction_attribute.transaction_fk
transaction_attribute_json = json.loads(transaction.attribute_json)
transaction_attribute_json[transaction_attribute.attrib_fk.label] = \
attrib_str_to_value(attrib_type, value)
transaction.attribute_json = json.dumps(transaction_attribute_json)
transaction.save()
# </editor-fold>
# <editor-fold AttribType Views">
class AttribTypesView(generics.ListCreateAPIView):
"""
Support Create and List operations of custom application attribute types
(AttribType) used to data types of attributes.
"""
queryset = AttribType.objects.all()
serializer_class = AttribTypeSerializer
class AttribTypeView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of custom application
attribute types (AttribType) used to data types of attributes.
"""
queryset = AttribType.objects.all()
serializer_class = AttribTypeSerializer
# </editor-fold>
# <editor-fold Attribute definition hierarchy Views">
class SchemaAttribDefGraphsView(generics.ListCreateAPIView):
"""
Support Create and List operations of Graph attributes to be defined in a
Schema.
"""
queryset = SchemaAttribDefGraph.objects.all()
serializer_class = SchemaAttribDefGraphSerializer
class SchemaAttribDefGraphView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Graph attributes to be
defined in a Schema.
"""
queryset = SchemaAttribDefGraph.objects.all()
serializer_class = SchemaAttribDefGraphSerializer
class SchemaAttribDefVertexesView(generics.ListCreateAPIView):
"""
Support Create and List operations of Vertex attributes to be defined in a
Schema.
"""
queryset = SchemaAttribDefVertex.objects.all()
serializer_class = SchemaAttribDefVertexSerializer
class SchemaAttribDefVertexView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Vertex attributes to be
defined in a Schema.
"""
queryset = SchemaAttribDefVertex.objects.all()
serializer_class = SchemaAttribDefVertexSerializer
class SchemaAttribDefTransactionsView(generics.ListCreateAPIView):
"""
Support Create and List operations of Transaction attributes to be defined
in a Schema.
"""
queryset = SchemaAttribDefTrans.objects.all()
serializer_class = SchemaAttribDefTransSerializer
class SchemaAttribDefTransactionView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Transaction attributes to
be defined in a Schema.
"""
queryset = SchemaAttribDefTrans.objects.all()
serializer_class = SchemaAttribDefTransSerializer
class GraphAttribDefGraphsView(generics.ListCreateAPIView):
"""
Support Create and List operations of Graph attributes to be tied to a
specific Graph.
"""
queryset = GraphAttribDefGraph.objects.all()
serializer_class = GraphAttribDefGraphSerializer
class GraphAttribDefGraphView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Graph attributes to be
tied to a specific Graph.
"""
queryset = GraphAttribDefGraph.objects.all()
serializer_class = GraphAttribDefGraphSerializer
class GraphAttribDefVertexesView(generics.ListCreateAPIView):
"""
Support Create and List operations of Vertex attributes to be tied to a
specific Graph.
"""
queryset = GraphAttribDefVertex.objects.all()
serializer_class = GraphAttribDefVertexSerializer
class GraphAttribDefVertexView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Vertex attributes to be
tied to a specific Graph.
"""
queryset = GraphAttribDefVertex.objects.all()
serializer_class = GraphAttribDefVertexSerializer
class GraphAttribDefTransactionsView(generics.ListCreateAPIView):
"""
Support Read, Update, and Destroy operations of Transaction attributes to
be tied to a specific Graph.
"""
queryset = GraphAttribDefTrans.objects.all()
serializer_class = GraphAttribDefTransSerializer
class GraphAttribDefTransactionView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Transaction attributes to
be tied to a specific Graph.
"""
queryset = GraphAttribDefTrans.objects.all()
serializer_class = GraphAttribDefTransSerializer
# </editor-fold>
# <editor-fold Schema Views">
class SchemasView(generics.ListCreateAPIView):
"""
Support Create and List operations of graph schemas (Schemas) used to
define categorized styling for graph information.
"""
queryset = Schema.objects.all()
serializer_class = SchemaSerializer
class SchemaView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of graph schemas (Schemas)
used to define categorized styling for graph information.
"""
queryset = Schema.objects.all()
serializer_class = SchemaSerializer
def perform_destroy(self, instance):
"""
An graph is being deleted, only report this deletion, and not that of
sub components.
"""
signals.post_delete.disconnect(schema_attribute_def_graph_deleted, sender=SchemaAttribDefGraph)
signals.post_delete.disconnect(schema_attribute_def_vertex_deleted, sender=SchemaAttribDefVertex)
signals.post_delete.disconnect(schema_attribute_def_transaction_deleted, sender=SchemaAttribDefTrans)
instance.delete()
signals.post_delete.connect(schema_attribute_def_graph_deleted, sender=SchemaAttribDefGraph)
signals.post_delete.connect(schema_attribute_def_vertex_deleted, sender=SchemaAttribDefVertex)
signals.post_delete.connect(schema_attribute_def_transaction_deleted, sender=SchemaAttribDefTrans)
# </editor-fold>
# <editor-fold Graph and GraphAttrib views">
class GraphsView(generics.ListCreateAPIView):
"""
Support Create and List operations of Graph objects. The Create operation
will populate definitions of attributes to be used within the graph at
graph, vertex, and transaction levels based on the graphs associated
Schema.
"""
queryset = Graph.objects.all()
serializer_class = GraphSerializer
def create(self, request, *args, **kwargs):
"""
Override default create operation to ensure any default attributes for
Graph, Vertex, and Transaction are added based on the associated Graph
Schema object.
:param request: The request payload
:param args: Request arguments
:param kwargs: Request kwargs
:return: Created Graph object
"""
# TODO: Does every graph need to be created with a defined Schema ?
# Check if a schema was selected, if so identify all default attributes
# defined for the schema
schema_id = request.data['schema_fk'] if (request.data['schema_fk'] != '') else 0
graph = super(GraphsView, self).create(request, *args, **kwargs)
graph_record = Graph.objects.filter(title=request.data['title']).last()
schema_graph_attribs = SchemaAttribDefGraph.objects.filter(schema_fk=schema_id)
signals.post_save.disconnect(graph_attribute_def_graph_saved, sender=GraphAttribDefGraph)
for schema_attrib in schema_graph_attribs:
graph_attrib = GraphAttribDefGraph(
graph_fk=graph_record,
label=schema_attrib.label,
type_fk=schema_attrib.type_fk,
descr=schema_attrib.descr,
default_str=schema_attrib.default_str)
graph_attrib.save()
signals.post_save.connect(graph_attribute_def_graph_saved, sender=GraphAttribDefGraph)
schema_vtx_attribs = SchemaAttribDefVertex.objects.filter(schema_fk=schema_id)
signals.post_save.disconnect(graph_attribute_def_vertex_saved, sender=GraphAttribDefVertex)
for schema_attrib in schema_vtx_attribs:
graph_attrib = GraphAttribDefVertex(
graph_fk=graph_record,
label=schema_attrib.label,
type_fk=schema_attrib.type_fk,
descr=schema_attrib.descr,
default_str=schema_attrib.default_str)
graph_attrib.save()
signals.post_save.connect(graph_attribute_def_vertex_saved, sender=GraphAttribDefVertex)
schema_trans_attribs = SchemaAttribDefTrans.objects.filter(schema_fk=schema_id)
signals.post_save.disconnect(graph_attribute_def_transaction_saved, sender=GraphAttribDefTrans)
for schema_attrib in schema_trans_attribs:
graph_attrib = GraphAttribDefTrans(
graph_fk=graph_record,
label=schema_attrib.label,
type_fk=schema_attrib.type_fk,
descr=schema_attrib.descr,
default_str=schema_attrib.default_str)
graph_attrib.save()
signals.post_save.connect(graph_attribute_def_transaction_saved, sender=GraphAttribDefTrans)
return graph
class GraphView(generics.RetrieveUpdateDestroyAPIView):
"""
Support Read, Update, and Destroy operations of Graph objects.
"""
# TODO prevent update of Schema ?
queryset = Graph.objects.all()
serializer_class = GraphSerializer
def perform_destroy(self, instance):
"""
An graph is being deleted, only report this deletion, and not that of
sub components.
"""
signals.post_delete.disconnect(graph_attribute_def_graph_deleted, sender=GraphAttribDefGraph)
signals.post_delete.disconnect(graph_attribute_def_vertex_deleted, sender=GraphAttribDefVertex)
signals.post_delete.disconnect(graph_attribute_def_transaction_deleted, sender=GraphAttribDefTrans)
signals.post_delete.disconnect(vertex_deleted, sender=Vertex)
signals.post_delete.disconnect(transaction_deleted, sender=Transaction)
instance.delete()
signals.post_delete.connect(graph_attribute_def_graph_deleted, sender=GraphAttribDefGraph)
signals.post_delete.connect(graph_attribute_def_vertex_deleted, sender=GraphAttribDefVertex)
signals.post_delete.connect(graph_attribute_def_transaction_deleted, sender=GraphAttribDefTrans)
signals.post_delete.connect(vertex_deleted, sender=Vertex)
signals.post_delete.connect(transaction_deleted, sender=Transaction)
class GraphAttributesView(generics.ListCreateAPIView):
"""
Manage GraphAttrib Create and List operations. Read, Update, Destroy operations are managed in VertexAttribList.
"""
queryset = GraphAttrib.objects.all()
serializer_class = GraphAttribSerializer
class GraphAttributeView(generics.RetrieveUpdateDestroyAPIView):
"""
Manage GraphAttrib Read, Update, Destroy operations. Create and List operations are managed in GraphAttribList.
"""
queryset = GraphAttrib.objects.all()
serializer_class = GraphAttribSerializer
def perform_destroy(self, instance):
"""
An attribute is being deleted from a graph. The parent graphs cached json needs to be updated to reflect it.
:param instance: The attribute being deleted
"""
# Get | |
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day(pd.to_datetime('2020/03/06')))
self.assertFalse(maybe_trade_day('2020-01-01'))
self.assertFalse(maybe_trade_day('2020/10/06'))
self.assertRaises(TypeError, maybe_trade_day, 'aaa')
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_prev_market_trade_day(self):
""" test the function prev_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day | |
(self.name, self.uid)
else:
if self.parent is None:
label = 'index space %s' % hex(self.uid)
else:
label = 'subspace %s' % self.uid
if self.parent is not None:
color = None
for c, child in self.parent.children.iteritems():
if child == self:
color = c
break
assert color is not None
label += ' (color: %s)' % color
printer.println('%s [label="%s",shape=plaintext,fontsize=14,fontcolor=black,fontname="Helvetica"];' %
(self.node_name, label))
# print links to children
for child in self.children.itervalues():
child.print_link_to_parent(printer, self.node_name)
for child in self.children.itervalues():
child.print_graph(printer)
def print_tree(self):
if self.depth == 0:
print("---------------------------------------")
print(self)
else:
prefix = ''
for i in range(self.depth):
prefix += ' '
print('%s%s Color: %s' % (prefix, self, self.color.to_string()))
for child in self.children.itervalues():
child.print_tree()
if self.depth == 0:
print("---------------------------------------")
class IndexPartition(object):
__slots__ = ['state', 'uid', 'parent', 'color', 'children', 'instances',
'disjoint', 'complete', 'name', 'depth', 'shape', 'point_set',
'node_name', 'intersections', 'dominated']
def __init__(self, state, uid):
self.state = state
self.uid = uid
self.parent = None
self.color = Point(0)
self.children = dict()
self.instances = dict()
self.disjoint = False
self.complete = None
self.name = None
self.depth = None
self.shape = None
self.point_set = None
self.node_name = 'index_part_node_%s' % uid
self.intersections = dict()
self.dominated = dict()
def set_parent(self, parent, color):
self.parent = parent
self.depth = parent.depth+1
self.color = color
self.parent.add_child(self)
def set_disjoint(self, disjoint):
self.disjoint = disjoint
def set_name(self, name):
self.name = name
def add_child(self, child):
self.children[child.color] = child
def add_instance(self, tid, partition):
self.instances[tid] = partition
def __str__(self):
if self.name is None:
return "Index Partition: %s" % self.uid
else:
return '%s (%s)' % (self.name, self.uid)
__repr__ = __str__
def check_partition_properties(self):
# Check for dominance of children by parent
for child in self.children.itervalues():
if not self.parent.dominates(child):
print('WARNING: child % is not dominated by parent %s in %s. '+
'This is definitely an application bug.' %
(child, self.parent, self))
if self.node.state.assert_on_warning:
assert False
# Check disjointness
if self.disjoint:
previous = Shape()
for child in self.children.itervalues():
child_shape = child.get_shape()
if not (child_shape & previous).empty():
print('WARNING: %s was logged disjoint '+
'but there are overlapping children. This '+
'is definitely an application bug.' % self)
if self.node.state.assert_on_warning:
assert False
break
previous |= child_shape
# TODO: Check completeness
def update_index_sets(self, index_sets):
for child in self.children.itervalues():
child.update_index_sets(index_sets)
def are_all_children_disjoint(self):
return self.disjoint
def are_children_disjoint(self, c1, c2):
if self.disjoint:
return True
if c1.intersects(c2):
return False
return True
def get_shape(self):
if self.shape is None:
for child in self.children.itervalues():
if self.shape is None:
self.shape = child.get_shape().copy()
else:
self.shape |= child.get_shape()
return self.shape
def get_point_set(self):
if self.point_set is None:
for child in self.children.itervalues():
if self.point_set is None:
self.point_set = child.get_point_set().copy()
else:
self.point_set |= child.get_point_set()
return self.point_set
def intersection(self, other):
if self is other:
return self.get_point_set()
if other in self.intersections:
return self.intersections[other]
intersection = self.get_point_set() & other.get_point_set()
if intersection.empty():
self.intersections[other] = None
return None
self.intersections[other] = intersection
return intersection
def intersects(self, other):
if self is other:
return True
return self.intersection(other) is not None
def dominates(self, other):
if self is other:
return True
if other in self.dominated:
return self.dominated[other]
non_dominated = other.get_point_set() - self.get_point_set()
if non_dominated.empty():
self.dominated[other] = True
return True
else:
self.dominated[other] = False
return False
def is_complete(self):
if self.complete is None:
# Figure out if this partition is complete or not
self.complete = (self.parent.get_point_set() -
self.get_point_set()).empty()
return self.complete
def get_num_children(self):
return len(self.children)
def print_link_to_parent(self, printer, parent):
printer.println(parent+' -> '+ self.node_name+
' [style=dotted,color=black,penwidth=2];')
def print_graph(self, printer):
if self.name is not None:
label = self.name + ' (ID: ' + str(self.uid) + ')'
else:
label = 'Index Partition '+str(self.uid)
color = None
for c,child in self.parent.children.iteritems():
if child == self:
color = c
break
assert color is not None
label += ' (color: %s)' % color
label += '\nDisjoint=%s, Complete=%s' % (self.disjoint, self.is_complete())
printer.println(
'%s [label="%s",shape=plaintext,fontsize=14,fontcolor=black,fontname="times italic"];' %
(self.node_name, label))
# print links to children
for child in self.children.itervalues():
child.print_link_to_parent(printer, self.node_name)
for child in self.children.itervalues():
child.print_graph(printer)
def print_tree(self):
prefix = ' ' * self.depth
print('%s%s Color: %s' % (prefix, self, self.color.to_string()))
for child in self.children.itervalues():
child.print_tree()
class Field(object):
__slots__ = ['space', 'fid', 'name']
def __init__(self, space, fid):
self.space = space
self.fid = fid
self.name = None
def set_name(self, name):
self.name = name
def __str__(self):
if self.name is None:
return "Field "+str(self.fid)
else:
return self.name + ' (' + str(self.fid) + ')'
__repr__ = __str__
class FieldSpace(object):
__slots__ = ['state', 'uid', 'name', 'fields', 'node_name']
def __init__(self, state, uid):
self.state = state
self.uid = uid
self.name = None
self.fields = dict()
self.node_name = 'field_space_node_'+str(uid)
def set_name(self, name):
self.name = name
def get_field(self, fid):
if fid in self.fields:
return self.fields[fid]
field = Field(self, fid)
self.fields[fid] = field
return field
def __str__(self):
if self.name is None:
return "Field Space "+str(self.uid)
else:
return self.name + ' ('+str(self.uid)+')'
__repr__ = __str__
def print_graph(self, printer):
if self.name is not None:
label = self.name + ' (ID: '+str(self.uid) + ')'
else:
label = str(self)
printer.println(self.node_name+' [label="'+label+
'",shape=plaintext,fontsize=14,'+
'fontcolor=black,fontname="Helvetica"];')
for fid,field in self.fields.iteritems():
field_id = "field_node_"+str(self.uid)+"_"+str(fid)
if field.name is not None:
field_name = field.name + '(FID: ' + str(fid) + ')'
else:
field_name = 'FID: ' + str(fid)
printer.println(field_id+' [label="'+field_name+
'",shape=plaintext,fontsize=14,'+
'fontcolor=black,fontname="Helvetica"]')
printer.println(self.node_name+' -> '+ field_id+
" [style=dotted,color=black,penwidth=2];")
class LogicalRegion(object):
__slots__ = ['state', 'index_space', 'field_space', 'tree_id', 'children',
'name', 'parent', 'logical_state', 'physical_state', 'node_name',
'has_named_children']
def __init__(self, state, iid, fid, tid):
self.state = state
self.index_space = iid
self.field_space = fid
self.tree_id = tid
self.children = dict()
self.name = None
self.parent = None
self.logical_state = dict()
self.physical_state = dict()
self.index_space.add_instance(self.tree_id, self)
self.node_name = 'region_node_'+str(self.index_space.uid)+\
'_'+str(self.field_space.uid)+'_'+str(self.tree_id)
self.has_named_children = False
def set_name(self, name):
self.name = name
def set_parent(self, parent):
self.parent = parent
self.parent.add_child(self.index_space.color, self)
def add_child(self, color, child):
self.children[color] = child
def has_all_children(self):
return len(self.children) == len(self.index_space.children)
def get_index_node(self):
return self.index_space
def update_parent(self):
if not self.parent and self.index_space.parent is not None:
self.parent = self.state.get_partition(
self.index_space.parent.uid, self.field_space.uid, self.tree_id)
def __str__(self):
if self.name is None:
return "Region (%d,%d,%d)" % (self.index_space.uid,
self.field_space.uid,self.tree_id)
else:
return self.name + ' ('+str(self.index_space.uid)+','+\
str(self.field_space.uid)+','+str(self.tree_id)+')'
__repr__ = __str__
def are_all_children_disjoint(self):
return self.index_space.are_all_children_disjoint()
def are_children_disjoint(self, c1, c2):
return self.index_space.are_children_disjoint(c1.index_partition,
c2.index_partition)
def get_shape(self):
return self.index_space.get_shape()
def get_point_set(self):
return self.index_space.get_point_set()
def intersection(self, other):
if isinstance(other, LogicalRegion):
return self.index_space.intersection(other.index_space)
else:
return self.index_space.intersection(other.index_partition)
def intersects(self, other):
if isinstance(other, LogicalRegion):
return self.index_space.intersects(other.index_space)
else:
return self.index_space.intersects(other.index_partition)
def dominates(self, other):
if isinstance(other, LogicalRegion):
return self.index_space.dominates(other.index_space)
else:
return self.index_space.dominates(other.index_partition)
def is_complete(self):
return self.index_space.is_complete()
def get_num_children(self):
return self.index_space.get_num_children()
def reset_logical_state(self):
if self.logical_state:
self.logical_state = dict()
def reset_physical_state(self, depth):
if self.physical_state and depth in self.physical_state:
self.physical_state[depth] = dict()
def compute_path(self, path, target):
if self is not target:
assert self.parent is not None
self.parent.compute_path(path, target)
path.append(self)
def perform_logical_analysis(self, depth, path, op, req, field,
projecting, register_user, prev, checks):
assert self is path[depth]
if field not in self.logical_state:
self.logical_state[field] = LogicalState(self, field)
arrived = (depth+1) == len(path)
next_child = path[depth+1] if not arrived else None
if not self.logical_state[field].perform_logical_analysis(op, req, next_child,
projecting, register_user, prev, checks):
return False
if not arrived:
return path[depth+1].perform_logical_analysis(depth+1, path, op, req, field,
projecting, register_user, prev, checks)
return True
def register_logical_user(self, op, req, field):
if field not in self.logical_state:
self.logical_state[field] = LogicalState(self, field)
self.logical_state[field].register_logical_user(op, req)
def perform_logical_fence(self, op, field, checks):
if field not in self.logical_state:
self.logical_state[field] = LogicalState(self, field)
if not self.logical_state[field].perform_logical_fence(op, checks):
return False
for child in self.children.itervalues():
if not child.perform_logical_fence(op, field, checks):
return False
return True
def perform_logical_deletion(self, depth, path, op, req, field, prev, checks):
assert self is path[depth]
if field not in self.logical_state:
return True
arrived = (depth+1) == len(path)
force_close = (depth+1) < len(path)
next_child = path[depth+1] if not arrived else None
if not self.logical_state[field].perform_logical_deletion(op, req, next_child,
prev, checks, force_close):
return False
if not arrived:
return path[depth+1].perform_logical_deletion(depth+1, path, op, req, field,
prev, checks)
elif not checks:
# Do all the invalidations and record any dependences
self.perform_deletion_invalidation(op, req, field)
return True
def perform_deletion_invalidation(self, op, req, field):
if field not in self.logical_state:
return
self.logical_state[field].perform_deletion_invalidation(op, req)
for child in self.children.itervalues():
child.perform_deletion_invalidation(op, req, field)
def close_logical_tree(self, field, closed_users, permit_leave_open):
if field not in self.logical_state:
return
self.logical_state[field].close_logical_tree(closed_users, permit_leave_open)
def get_physical_state(self, depth, field):
if depth not in self.physical_state:
self.physical_state[depth] = dict()
field_dict = self.physical_state[depth]
if field | |
# coding: utf-8
"""
ww contains convenient wrappers around strings. The Most important one is
StringWrapper, that you will mostly use as the "s()" object.
It behaves like unicode strings (the API is compatible),
but make small improvements to the existing methods and add some new
methods.
It doesn't accept bytes as an input. If you do so and it works, you must
know it's not a supported behavior and may change in the future. Only
pass:
- unicode objects in Python 2;
- str objects in Python 3.
Example:
Import::
>>> from ww import s
You always have the more explicit import at your disposal::
>>> from ww.wrappers.strings import StringWrapper
`s` is just an alias of StringWrapper, but it's what most people will
want to use most of the time. Hence it's what we will use in the
examples.
Basic usages::
>>> string = s("this is a test")
>>> string
u'this is a test'
>>> type(string)
<class 'ww.wrappers.strings.StringWrapper'>
>>> string.upper() # regular string methods are all there
u'THIS IS A TEST'
>>> string[:4] + "foo" # same behaviors you expect from a string
u'thisfoo'
Some existing methods, while still compatible with the previous
behavior, have been improved::
>>> string.replace('e', 'a') # just as before
u'this is a tast'
>>> string.replace(('e', 'i'), ('a', 'o')) # and a little more
u'thos os a tast'
>>> s('-').join(range(10)) # join() autocast to string
u'0-1-2-3-4-5-6-7-8-9'
>>> s('-').join(range(10), template="{:.2f}")
u'0.00-1.00-2.00-3.00-4.00-5.00-6.00-7.00-8.00-9.00'
Some methods have been added::
>>> print(s('''
... This should be over indented.
... But it will not be.
... Because dedent() calls textwrap.dedent() on the string.
... ''').dedent())
<BLANKLINE>
This should be over indented.
But it will not be.
Because dedent() calls textwrap.dedent() on the string.
<BLANKLINE>
By overriding operators, we can provide some interesting syntaxic
sugar, such as this shortcut for writting long dedented text::
>>> print(s >> '''
... Calling dedent() is overrated.
... Overriding __rshift__ is much more fun.
... ''')
<BLANKLINE>
Calling dedent() is overrated.
Overriding __rshift__ is much more fun.
<BLANKLINE>
Also we hacked something that looks like Python 3.6 f-string, but
that works in Python 2.7 and 3.3+:
>>> from ww import f
>>> a = 1
>>> f('Sweet, I can print locals: {a}')
u'Sweet, I can print locals: 1'
>>> print(f >> '''
... Yes it works with long string too.
... And globals, if you are into that kind
... of things.
... But we have only {a} for now.
... ''')
<BLANKLINE>
Yes it works with long string too.
And globals, if you are into that kind
of things.
But we have only 1 for now.
<BLANKLINE>
.. warning::
Remember that, while f-strings are interpreted at parsing time,
our implementation is executed at run-time, making it vulnerable
to code injection. This makes it a dangerous feature to put in
production.
There is much, much more to play with. Check it out :)
You'll find bellow the detailed documentation for each method of
StringWrapper. Go have a look, there is some great stuff here!
"""
from __future__ import (absolute_import, division, print_function)
# TODO : flags can be passed as strings. Ex: s.search('regex', flags='ig')
# TODO : make s.search(regex) return a wrapper with __bool__ evaluating to
# false if no match instead of None and allow default value for group(x)
# also allow match[1] to return group(1) and match['foo'] to return
# groupdict['foo']
# TODO .groups would be a g() object
# TODO: .pp() to pretty_print
# TODO: override slicing to allow callables
# TODO: provide "strip_comments" ?
# TODO: provide from_json() / to_json()
# TODO: provide the same for html / xml
# TODO : add encoding detection, fuzzy_decode() to make the best of shitty
# decoding, unidecode, slug, etc,
# tpl() or tpl >> for a jinja2 template (optional dependency ?)
# something for translation ?
# TODO: match.__repr__ should show match, groups, groupsdict in summary
import inspect
from textwrap import dedent
import six
import chardet
from future.utils import raise_from
try:
from formatizer import LiteralFormatter
FORMATTER = LiteralFormatter()
except ImportError: # pragma: no cover
FORMATTER = str
from six import with_metaclass
import ww
from ww.tools.strings import multisplit, multireplace
from ww.types import (Union, unicode, str_istr, str_istr_icallable, # noqa
C, I, Iterable, Callable, Any)
# TODO: make sure we copy all methods from str but return s()
FORMATTER = LiteralFormatter()
# TODO: s >> should do s().strip().dedent().fold()
class MetaS(type):
""" Allow s >> 'text' as a shortcut to dedent strings
This is not something you should use directly. It's a metaclass
for s() StringWrapper objects and is used to override the
operator >> on the StringWrapper class (not the object).
"""
def __rshift__(self, other):
# type (str) -> StringWrapper
""" Let you do s >> "a string" as a shortcut to s("a string").dedent()
s is the class, not s(), which would be an instance.
Args:
other: the string at the right of the '>>' operator.
Returns:
The dedented string as wrapped in StringWrapper. Right now
we always return StringWrapper, so subclassing won't work
if you want to override this.
Raises:
TypeError: if you try to apply it on non strings.
Example:
>>> from ww import s
>>> print(s >> '''
... This should be indented
... but it will not be
... ''')
<BLANKLINE>
This should be indented
but it will not be
<BLANKLINE>
"""
# TODO: figure out how to allow this to work with subclasses
return StringWrapper(dedent(other))
class MetaF(type):
""" Allow f >> 'text' as a shortcut to dedent f-like-strings.
This is not something you should use directly. It's a metaclass
for s() StringWrapper objects and is used to override the
operator >> on the StringWrapper class (not the object).
This is the same as MetaS, but it wraps the string in f(), not in
s(), meaning you can use the f-string compatible syntax inside
the string you wish to dedent.
.. warning::
Remember that, while f-strings are interpreted at parsing time,
our implementation is executed at run-time, making it vulnerable
to code injection. This makes it a dangerous feature to put in
production.
"""
def __rshift__(self, other):
# type (str) -> StringWrapper
""" Let you do f >> "a string" as a shortcut to f("a string").dedent()
f is the class, not f(), which would be an instance.
Args:
other: the string at the right of the '>>' operator.
Returns:
The dedented string as wrapped in StringWrapper. Right now
we always return StringWrapper, so subclassing won't work
if you want to override this.
Raises:
TypeError: if you try to apply it on non strings.
Example:
>>> from ww import f
>>> var = "foo"
>>> print(f >> '''
... This should be indented
... but it will not be.
... And you can use {var}.
... ''')
<BLANKLINE>
This should be indented
but it will not be.
And you can use foo.
<BLANKLINE>
.. warning::
Remember that, while f-strings are interpreted at parsing
time, our implementation is executed at run-time, making it
vulnerable to code injection. This makes it a dangerous feature
to put in production.
"""
caller_frame = inspect.currentframe().f_back
caller_globals = caller_frame.f_globals
caller_locals = caller_frame.f_locals
# TODO: figure out how to allow StringWrapper subclasses to work
# with this
return StringWrapper(dedent(
FORMATTER.format(other, caller_globals, caller_locals)
))
# TODO: add normalize() (removes special caracters) and slugify
# (normalize + slug)
# TODO: refactor methods to be only wrappers
# for functions from a separate module
# TODO: override capitalize, title, upper, lower, etc
# TODO: inherit from BaseWrapper
class StringWrapper(with_metaclass(MetaS, unicode)): # type: ignore
"""
Convenience wrappers around strings behaving like unicode strings, but
make small improvements to the existing methods and add some new
methods.
It doesn't accept bytes as an input. If you do so and it works, you
must know it's not a supported behavior and may change in the future.
Only pass:
- unicode objects in Python 2;
- str objects in Python 3.
Basic usages::
>>> from ww import s
>>> string = s("this is a test")
>>> string
u'this is a test'
| |
import csv
import shutil
import io
import math
import os
import tempfile
import time
from builtins import zip
import pandas as pd
import pytest
from unittest.mock import call, MagicMock
from tests.unit.test_utils.unit_utils import StringIOContextManager
from synapseclient import client, Entity, Synapse
from synapseclient.core.exceptions import SynapseError, SynapseTimeoutError
from synapseclient.entity import split_entity_namespaces
import synapseclient.table
from synapseclient.table import Column, Schema, CsvFileTable, TableQueryResult, cast_values, \
as_table_columns, Table, build_table, RowSet, SelectColumn, EntityViewSchema, RowSetTable, Row, PartialRow, \
PartialRowset, SchemaBase, _get_view_type_mask_for_deprecated_type, EntityViewType, _get_view_type_mask, \
MAX_NUM_TABLE_COLUMNS, SubmissionViewSchema, escape_column_name, join_column_names
from synapseclient.core.utils import from_unix_epoch_time
from unittest.mock import patch
from collections import OrderedDict
def test_cast_values():
selectColumns = [{'id': '353',
'name': 'name',
'columnType': 'STRING'},
{'id': '354',
'name': 'foo',
'columnType': 'STRING'},
{'id': '355',
'name': 'x',
'columnType': 'DOUBLE'},
{'id': '356',
'name': 'n',
'columnType': 'INTEGER'},
{'id': '357',
'name': 'bonk',
'columnType': 'BOOLEAN'},
{'id': '358',
'name': 'boom',
'columnType': 'LINK'}]
row = ('Finklestein', 'bat', '3.14159', '65535', 'true', 'https://www.synapse.org/')
assert (
cast_values(row, selectColumns) ==
['Finklestein', 'bat', 3.14159, 65535, True, 'https://www.synapse.org/']
)
# group by
selectColumns = [{'name': 'bonk',
'columnType': 'BOOLEAN'},
{'name': 'COUNT(name)',
'columnType': 'INTEGER'},
{'name': 'AVG(x)',
'columnType': 'DOUBLE'},
{'name': 'SUM(n)',
'columnType': 'INTEGER'}]
row = ('true', '211', '1.61803398875', '1421365')
assert cast_values(row, selectColumns) == [True, 211, 1.61803398875, 1421365]
def test_cast_values__unknown_column_type():
selectColumns = [{'id': '353',
'name': 'name',
'columnType': 'INTEGER'},
{'id': '354',
'name': 'foo',
'columnType': 'DEFINTELY_NOT_A_EXISTING_TYPE'},
]
row = ('123', 'othervalue')
assert (
cast_values(row, selectColumns) ==
[123, 'othervalue']
)
def test_cast_values__list_type():
selectColumns = [{'id': '354',
'name': 'foo',
'columnType': 'STRING_LIST'},
{'id': '356',
'name': 'n',
'columnType': 'INTEGER_LIST'},
{'id': '357',
'name': 'bonk',
'columnType': 'BOOLEAN_LIST'},
{'id': '358',
'name': 'boom',
'columnType': 'DATE_LIST'}]
now_millis = int(round(time.time() * 1000))
row = ('["foo", "bar"]', '[1,2,3]', '[true, false]', '[' + str(now_millis) + ']')
assert (
cast_values(row, selectColumns) ==
[["foo", "bar"], [1, 2, 3], [True, False], [from_unix_epoch_time(now_millis)]]
)
def test_schema():
schema = Schema(name='My Table', parent="syn1000001")
assert not schema.has_columns()
schema.addColumn(Column(id='1', name='Name', columnType='STRING'))
assert schema.has_columns()
assert schema.properties.columnIds == ['1']
schema.removeColumn('1')
assert not schema.has_columns()
assert schema.properties.columnIds == []
schema = Schema(name='Another Table', parent="syn1000001")
schema.addColumns([
Column(name='Name', columnType='STRING'),
Column(name='Born', columnType='INTEGER'),
Column(name='Hipness', columnType='DOUBLE'),
Column(name='Living', columnType='BOOLEAN')])
assert schema.has_columns()
assert len(schema.columns_to_store) == 4
assert Column(name='Name', columnType='STRING') in schema.columns_to_store
assert Column(name='Born', columnType='INTEGER') in schema.columns_to_store
assert Column(name='Hipness', columnType='DOUBLE') in schema.columns_to_store
assert Column(name='Living', columnType='BOOLEAN') in schema.columns_to_store
schema.removeColumn(Column(name='Living', columnType='BOOLEAN'))
assert schema.has_columns()
assert len(schema.columns_to_store) == 3
assert Column(name='Living', columnType='BOOLEAN') not in schema.columns_to_store
assert Column(name='Hipness', columnType='DOUBLE') in schema.columns_to_store
def test_RowSetTable():
row_set_json = {
'etag': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'headers': [
{'columnType': 'STRING', 'id': '353', 'name': 'name'},
{'columnType': 'DOUBLE', 'id': '355', 'name': 'x'},
{'columnType': 'DOUBLE', 'id': '3020', 'name': 'y'},
{'columnType': 'INTEGER', 'id': '891', 'name': 'n'}],
'rows': [{
'rowId': 5,
'values': ['foo', '1.23', '2.2', '101'],
'versionNumber': 3},
{'rowId': 6,
'values': ['bar', '1.34', '2.4', '101'],
'versionNumber': 3},
{'rowId': 7,
'values': ['foo', '1.23', '2.2', '101'],
'versionNumber': 4},
{'rowId': 8,
'values': ['qux', '1.23', '2.2', '102'],
'versionNumber': 3}],
'tableId': 'syn2976298'}
row_set = RowSet.from_json(row_set_json)
assert row_set.etag == 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
assert row_set.tableId == 'syn2976298'
assert len(row_set.headers) == 4
assert len(row_set.rows) == 4
schema = Schema(id="syn2976298", name="Bogus Schema", columns=[353, 355, 3020, 891], parent="syn1000001")
table = Table(schema, row_set)
assert table.etag == 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
assert table.tableId == 'syn2976298'
assert len(table.headers) == 4
assert len(table.asRowSet().rows) == 4
df = table.asDataFrame()
assert df.shape == (4, 4)
assert list(df['name']) == ['foo', 'bar', 'foo', 'qux']
def test_as_table_columns__with_pandas_DataFrame():
df = pd.DataFrame({
'foobar': ("foo", "bar", "baz", "qux", "asdf"),
'x': tuple(math.pi*i for i in range(5)),
'n': (101, 202, 303, 404, 505),
'really': (False, True, False, True, False),
'size': ('small', 'large', 'medium', 'medium', 'large')},
columns=['foobar', 'x', 'n', 'really', 'size'])
cols = as_table_columns(df)
expected_columns = [
{'defaultValue': '',
'columnType': 'STRING',
'name': 'foobar',
'maximumSize': 30,
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'DOUBLE',
'name': 'x',
u'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'INTEGER',
'name': 'n',
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'BOOLEAN',
'name': 'really',
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'defaultValue': '',
'columnType': 'STRING',
'name': 'size',
'maximumSize': 30,
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'}
]
assert expected_columns == cols
def test_as_table_columns__with_non_supported_input_type():
pytest.raises(ValueError, as_table_columns, dict(a=[1, 2, 3], b=["c", "d", "e"]))
def test_as_table_columns__with_csv_file():
string_io = StringIOContextManager(
'ROW_ID,ROW_VERSION,Name,Born,Hipness,Living\n'
'"1", "1", "<NAME>", 1926, 8.65, False\n'
'"2", "1", "<NAME>", 1926, 9.87, False'
)
cols = as_table_columns(string_io)
assert cols[0]['name'] == 'Name'
assert cols[0]['columnType'] == 'STRING'
assert cols[1]['name'] == 'Born'
assert cols[1]['columnType'] == 'INTEGER'
assert cols[2]['name'] == 'Hipness'
assert cols[2]['columnType'] == 'DOUBLE'
assert cols[3]['name'] == 'Living'
assert cols[3]['columnType'] == 'STRING'
def test_dict_to_table():
d = dict(a=[1, 2, 3], b=["c", "d", "e"])
df = pd.DataFrame(d)
schema = Schema(name="Baz", parent="syn12345", columns=as_table_columns(df))
with patch.object(CsvFileTable, "from_data_frame") as mocked_from_data_frame:
Table(schema, d)
# call_agrs is a tuple with values and name
agrs_list = mocked_from_data_frame.call_args[0]
# getting the second argument
df_agr = agrs_list[1]
assert df_agr.equals(df)
def test_pandas_to_table():
df = pd.DataFrame(dict(a=[1, 2, 3], b=["c", "d", "e"]))
schema = Schema(name="Baz", parent="syn12345", columns=as_table_columns(df))
# A dataframe with no row id and version
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == (i + 1)
assert row[1] == ["c", "d", "e"][i]
assert len(table) == 3
# If includeRowIdAndRowVersion=True, include empty row id an versions
# ROW_ID,ROW_VERSION,a,b
# ,,1,c
# ,,2,d
# ,,3,e
table = Table(schema, df, includeRowIdAndRowVersion=True)
for i, row in enumerate(table):
assert row[0] is None
assert row[1] is None
assert row[2] == (i + 1)
# A dataframe with no row id and version
df = pd.DataFrame(index=["1_7", "2_7", "3_8"], data=dict(a=[100, 200, 300], b=["c", "d", "e"]))
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == ["1", "2", "3"][i]
assert row[1] == ["7", "7", "8"][i]
assert row[2] == (i + 1) * 100
assert row[3] == ["c", "d", "e"][i]
# A dataframe with row id and version in columns
df = pd.DataFrame(dict(ROW_ID=["0", "1", "2"], ROW_VERSION=["8", "9", "9"], a=[100, 200, 300], b=["c", "d", "e"]))
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == ["0", "1", "2"][i]
assert row[1] == ["8", "9", "9"][i]
assert row[2] == (i + 1) * 100
assert row[3] == ["c", "d", "e"][i]
def test_csv_table():
# Maybe not truly a unit test, but here because it doesn't do
# network IO to synapse
data = [["1", "1", "<NAME>", 1926, 8.65, False],
["2", "1", "<NAME>", 1926, 9.87, False],
["3", "1", "<NAME>", 1929, 7.65, False],
["4", "1", "<NAME>", 1935, 5.14, False],
["5", "1", "<NAME>", 1929, 5.78, True],
["6", "1", "<NAME>", 1936, 4.21, False],
["7", "1", "<NAME>", 1930, 8.99, True],
["8", "1", "<NAME>", 1931, 4.37, True]]
filename = None
cols = [Column(id='1', name='Name', columnType='STRING'),
Column(id='2', name='Born', columnType='INTEGER'),
Column(id='3', name='Hipness', columnType='DOUBLE'),
Column(id='4', name='Living', columnType='BOOLEAN')]
schema1 = Schema(id='syn1234', name='<NAME>', columns=cols, parent="syn1000001")
# TODO: use StringIO.StringIO(data) rather than writing files
try:
# create CSV file
with tempfile.NamedTemporaryFile(delete=False) as temp:
filename = temp.name
with io.open(filename, mode='w', encoding="utf-8", newline='') as temp:
writer = csv.writer(temp, quoting=csv.QUOTE_NONNUMERIC, lineterminator=str(os.linesep))
headers = ['ROW_ID', 'ROW_VERSION'] + [col.name for col in cols]
writer.writerow(headers)
for row in data:
writer.writerow(row)
table = Table(schema1, filename)
assert isinstance(table, CsvFileTable)
# need to set column headers to read a CSV file
table.setColumnHeaders(
[SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING")] +
[SelectColumn.from_column(col) for col in cols])
# test iterator
for table_row, expected_row in zip(table, data):
assert table_row == expected_row
# test asRowSet
rowset = table.asRowSet()
for rowset_row, expected_row in zip(rowset.rows, data):
assert rowset_row['values'] == expected_row[2:]
assert rowset_row['rowId'] == expected_row[0]
assert rowset_row['versionNumber'] == expected_row[1]
df = table.asDataFrame()
assert list(df['Name']) == [row[2] for row in data]
assert list(df['Born']) == [row[3] for row in data]
assert list(df['Living']) == [row[5] for row in data]
assert list(df.index) == ['%s_%s' % tuple(row[0:2]) for row in data]
assert df.shape == (8, 4)
except Exception:
if filename:
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except Exception as ex:
print(ex)
raise
def test_list_of_rows_table():
data = [["<NAME>", 1926, 8.65, False],
["<NAME>", 1926, 9.87, False],
["<NAME>", 1929, 7.65, False],
["<NAME>", 1935, 5.14, False],
["<NAME>", 1929, 5.78, True],
["<NAME>", 1936, 4.21, False],
["<NAME>", 1930, 8.99, True],
["<NAME>", 1931, 4.37, True]]
cols = [Column(id='1', name='Name', columnType='STRING'),
Column(id='2', name='Born', columnType='INTEGER'),
Column(id='3', name='Hipness', columnType='DOUBLE'),
Column(id='4', name='Living', columnType='BOOLEAN')]
schema1 = Schema(name='Jazz Guys', columns=cols, id="syn1000002", parent="syn1000001")
# need columns to do cast_values w/o storing
table = Table(schema1, data, headers=[SelectColumn.from_column(col) for col in cols])
for table_row, expected_row in zip(table, data):
assert table_row == expected_row
rowset = table.asRowSet()
for rowset_row, expected_row in zip(rowset.rows, data):
assert rowset_row['values'] == expected_row
table.columns = cols
df = table.asDataFrame()
assert list(df['Name']) == [r[0] for r in data]
def test_aggregate_query_result_to_data_frame():
class MockSynapse(object):
def _queryTable(self, query, limit=None, offset=None, isConsistent=True, partMask=None):
return {'concreteType': 'org.sagebionetworks.repo.model.table.QueryResultBundle',
'maxRowsPerPage': 2,
'queryCount': 4,
'queryResult': {
'concreteType': 'org.sagebionetworks.repo.model.table.QueryResult',
'nextPageToken': 'aaaaaaaa',
'queryResults': {'etag': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'headers': [
{'columnType': 'STRING', 'name': 'State'},
{'columnType': 'INTEGER', | |
<filename>plugins/helpers/btree.py<gh_stars>1-10
'''
Copyright 2011 <NAME>, <NAME>
Using New BSD License:
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from io import BytesIO
from plugins.helpers.structs import *
"""
Probably buggy
"""
class BTree(object):
def __init__(self, file, keyStruct, dataStruct):
self.file = file
self.keyStruct = keyStruct
self.dataStruct = dataStruct
block0 = self.file.readBlock(0)
btnode = BTNodeDescriptor.parse(block0)
assert btnode.kind == kBTHeaderNode
self.header = BTHeaderRec.parse(block0[BTNodeDescriptor.sizeof():])
#TODO: do more testing when nodeSize != blockSize
self.nodeSize = self.header.nodeSize
self.nodesInBlock = file.blockSize // self.header.nodeSize
self.blocksForNode = self.header.nodeSize // file.blockSize
#print (file.blockSize , self.header.nodeSize)
self.lastRecordNumber = 0
type, (hdr, maprec) = self.readBtreeNode(0)
self.maprec = maprec
self.compare_case_sensitive = self.header.keyCompareType == kHFSBinaryCompare # 0xBC
def isNodeInUse(self, nodeNumber):
thisByte = ord(self.maprec[nodeNumber // 8])
return (thisByte & (1 << (7 - (nodeNumber % 8)))) != 0
def readEmptySpace(self):
res = ""
z = 0
for i in range(self.header.totalNodes):
if not self.isNodeInUse(i):
z += 1
res += self.readNode(i)
assert z == self.header.freeNodes
return res
#convert construct structure to tuple
def getComparableKey(self, k):
raise Exception("implement in subclass")
def compare_operation_insensitive(self, k1, operation, k2):
'''Case Insensitive compare operation
TODO: Fix issues: There are 2 problems-
1. Nulls (empty strings) end up first in sort, but should be last in HFS implementation
2. Unicode handling is not addressed (Need to port Apple's FastUnicodeCompare())
'''
k1_ci = [(item.lower() if (type(item)==str) else item) for item in k1]
k2_ci = [(item.lower() if (type(item)==str) else item) for item in k2]
if operation == '==':
return k1_ci == k2_ci
elif operation == '<':
return k1_ci < k2_ci
elif operation == '>':
return k1_ci > k2_ci
def compareKeys(self, k1, k2):
k2 = self.getComparableKey(k2)
if self.compare_case_sensitive:
#print ('Comparing k1=' + str(k1) + ' k2=' + str(k2) + ' ' + str(k1 > k2))
if k1 == k2:
return 0
return -1 if k1 < k2 else 1
else:
#print ('Comparing k1=' + str(k1) + ' k2=' + str(k2) + ' ' + str(self.compare_operation_insensitive(k1, ">", k2)))
if self.compare_operation_insensitive(k1, "==", k2):
return 0
return -1 if self.compare_operation_insensitive(k1, "<", k2) else 1
def printLeaf(self, key, data):
print (key, data)
def readNode(self, nodeNumber):
node = b""
for i in range(self.blocksForNode):
node += self.file.readBlock((nodeNumber * self.blocksForNode) + i)
return node
def readBtreeNode(self, nodeNumber):
self.lastnodeNumber = nodeNumber
#node = memoryview(self.readNode(nodeNumber))
node = self.readNode(nodeNumber)
self.lastbtnode = btnode = BTNodeDescriptor.parse(node)
if btnode.kind == kBTHeaderNode:
#XXX
offsets = Array(btnode.numRecords, "off" / Int16ub).parse(node[-2*btnode.numRecords:])
hdr = BTHeaderRec.parse(node[BTNodeDescriptor.sizeof():])
maprec = node[offsets[-3]:]
return kBTHeaderNode, [hdr, maprec]
elif btnode.kind == kBTIndexNode:
recs = []
offsets = Array(btnode.numRecords, "off" / Int16ub).parse(node[-2*btnode.numRecords:])
for i in range(btnode.numRecords):
off = offsets[btnode.numRecords - i - 1]
k = self.keyStruct.parse(node[off:])
off += 2 + k.keyLength
k.childNode = Int32ub.parse(node[off:off + 4]) # ("nodeNumber")
recs.append(k)
return kBTIndexNode, recs
elif btnode.kind == kBTLeafNode:
recs = []
offsets = Array(btnode.numRecords, "off" / Int16ub).parse(node[-2*btnode.numRecords:])
for i in range(btnode.numRecords):
off = offsets[btnode.numRecords-i-1]
k = self.keyStruct.parse(node[off:])
off += 2 + k.keyLength
d = self.dataStruct.parse(node[off:])
recs.append((k,d))
return kBTLeafNode, recs
else:
raise Exception("Invalid node type " + str(btnode))
def search(self, searchKey, node=None):
if node == None:
node = self.header.rootNode
type, stuff = self.readBtreeNode(node)
if type == kBTIndexNode:
for i in range(len(stuff)):
if self.compareKeys(searchKey, stuff[i]) < 0:
if i > 0:
i = i - 1
return self.search(searchKey, stuff[i].childNode)
return self.search(searchKey, stuff[len(stuff)-1].childNode)
elif type == kBTLeafNode:
self.lastRecordNumber = 0
for k,v in stuff:
res = self.compareKeys(searchKey, k)
if res == 0:
return k, v
if res < 0:
break
self.lastRecordNumber += 1
return None, None
def traverse(self, node=None, count=0, callback=None):
if node == None:
node = self.header.rootNode
type, stuff = self.readBtreeNode(node)
if type == kBTIndexNode:
for i in range(len(stuff)):
count += self.traverse(stuff[i].childNode, callback=callback)
elif type == kBTLeafNode:
for k,v in stuff:
if callback:
callback(k,v)
else:
self.printLeaf(k, v)
count += 1
return count
def traverseLeafNodes(self, callback=None):
nodeNumber = self.header.firstLeafNode
count = 0
while nodeNumber != 0:
_, stuff = self.readBtreeNode(nodeNumber)
count += len(stuff)
for k,v in stuff:
if callback:
callback(k,v)
else:
self.printLeaf(k, v)
nodeNumber = self.lastbtnode.fLink
return count
#XXX
def searchMultiple(self, searchKey, filterKeyFunction=lambda x:False):
self.search(searchKey)
nodeNumber = self.lastnodeNumber
recordNumber = self.lastRecordNumber
kv = []
while nodeNumber != 0:
_, stuff = self.readBtreeNode(nodeNumber)
for k,v in stuff[recordNumber:]:
if filterKeyFunction(k):
kv.append((k,v))
else:
return kv
nodeNumber = self.lastbtnode.fLink
recordNumber = 0
return kv
class CachedNodeData():
def __init__(self, path='', cnid=0, k=None, v=None):
self.path = path
self.cnid = cnid
self.key = k
self.value = v
class CatalogTree(BTree):
def __init__(self, file):
super(CatalogTree,self).__init__(file, HFSPlusCatalogKey, HFSPlusCatalogData)
# Cache last folder data
self.cached_last_folder_info = CachedNodeData()
def printLeaf(self, k, d):
if d.recordType == kHFSPlusFolderRecord or d.recordType == kHFSPlusFileRecord:
print (getString(k))
def getComparableKey(self, k2):
return (k2. parentID, getString(k2))
def searchByCNID(self, cnid):
threadk, threadd = self.search((cnid, ""))
return self.search((threadd.data.parentID, getString(threadd.data))) if threadd else (None, None)
def getFolderContents(self, cnid):
return self.searchMultiple((cnid, ""), lambda k:k.parentID == cnid)
def getRecordFromPath(self, path):
# WARNING - Comparisons are all case-sensitive!
if not path.startswith("/"):
return None, None
if path == "/":
return self.searchByCNID(kHFSRootFolderID)
parentId=kHFSRootFolderID
is_folder = False
k = v = prev_k = prev_v = None
reconstructed_folder_path = ""
if self.cached_last_folder_info.path:
path = path.rstrip('/') # removing trailing / if present
last_path = self.cached_last_folder_info.path
if path == last_path: # same path as cached
return self.cached_last_folder_info.key, self.cached_last_folder_info.value
elif path.startswith(last_path): # partial path
if path[len(last_path)] == '/': # must be same folder, not /abc/de in /abc/defg
path = path[len(last_path) + 1:]
k = self.cached_last_folder_info.key
v = self.cached_last_folder_info.value
parentId = self.cached_last_folder_info.cnid
reconstructed_folder_path = last_path
#print('--Cache used!--', parentId, last_path)
path_parts = path.split("/") if k else path.split("/")[1:]
for p in path_parts:
if p == "":
break
prev_k = k
prev_v = v
k,v = self.search((parentId, p))
if (k,v) == (None, None):
return None, None
if v.recordType == kHFSPlusFolderRecord:
parentId = v.data.folderID
is_folder = True
reconstructed_folder_path += '/' + p
else:
is_folder = False
break
if self.cached_last_folder_info.cnid != parentId: # last folder changed, update cache
if is_folder:
self.cached_last_folder_info = CachedNodeData(reconstructed_folder_path, parentId, k, v)
#print ('Setting cacheFolder - ' + reconstructed_folder_path + " Id=" + str(parentId))
else:
self.cached_last_folder_info = CachedNodeData(reconstructed_folder_path, parentId, prev_k, prev_v)
#print ('Setting cacheFolder2- ' + reconstructed_folder_path + " Id=" + str(parentId))
#print ("p=" + p)
return k,v
class ExtentsOverflowTree(BTree):
def __init__(self, file):
super(ExtentsOverflowTree,self).__init__(file, HFSPlusExtentKey, HFSPlusExtentRecord)
def getComparableKey(self, k2):
return (k2.fileID, k2.forkType, k2.startBlock)
def searchExtents(self, fileID, forkType, startBlock):
return self.search((fileID, forkType, startBlock))
class AttributesTree(BTree):
def __init__(self, file):
super(AttributesTree,self).__init__(file, HFSPlusAttrKey, HFSPlusAttrRecord)
#self.debug_path = ''
def printLeaf(self, k, d):
print (k.fileID, getString(k), self._getData(k,d).encode("hex"))
def getComparableKey(self, k2):
return (k2.fileID, getString(k2))
def searchXattr(self, fileID, name):
k,v = self.search((fileID, name))
return self._getData(k,v) if v else None
def _getData(self, k, v):
if v.recordType == kHFSPlusAttrInlineData:
return v.data.data
elif v.recordType == kHFSPlusAttrForkData:
#print('skipping kHFSPlusAttrForkData, size=' + str(v.data.HFSPlusForkData.logicalSize) + ' k='+ getString(k))
#print(' path -> ' + self.debug_path)
return ">> | |
skillet_output = skillet.execute(jinja_context)
validation_output = skillet_output.get('pan_validation', dict())
# fix for #169 - add validation output to the context
self.save_dict_to_workflow(validation_output)
context['skillet'] = skillet
context['results'] = validation_output
if 'output_template' in skillet_output:
output_template = skillet_output['output_template']
# allow skillet builder to include markup if desired
if not output_template.startswith('<div'):
context['output_template_markup'] = False
else:
context['output_template_markup'] = True
context['output_template'] = output_template
return render(self.request, 'pan_cnc/results.html', context=context)
# fix for #120 - ensure we catch all skilletlib errors here and return
# a form_invalid up the stack
except PanoplyException as pe:
err = f'Skillet Error: {pe}'
print(err)
messages.add_message(self.request, messages.ERROR, err)
# no way to clean up here, just bail out, clean up and let the user start over
self.clean_up_workflow()
return HttpResponseRedirect(self.request.session.get('last_page', '/'))
except Exception as e:
print(f'ERROR: {e}')
messages.add_message(self.request, messages.ERROR, str(e))
return self.form_invalid(form)
# Render panforge report if found in repository
report_definition = meta['snippet_path'] + '/report'
if os.path.exists(report_definition):
try:
# Extract info from device config for reporting purposes
device_meta = {}
config_tree = lxml.etree.fromstring(skillet.context['config'])
if config_tree is not None:
host_node = config_tree.find('devices/entry/deviceconfig/system/hostname')
if host_node is not None:
device_meta['Hostname'] = host_node.text
report = Report(report_definition)
report.load_header(device_meta)
report.load_data(validation_output)
report_html = report.render_html()
if os.path.exists(settings.REPORT_PATH):
os.remove(settings.REPORT_PATH)
with open(settings.REPORT_PATH, 'w') as f:
f.write(report_html)
context['report'] = base64.encodestring(report_html.encode()).decode()
return render(self.request, 'panhandler/report.html', context)
except Exception as e:
print(f'Exception while rendering report - {e}')
return render(self.request, 'panhandler/validation-results.html', context)
class ReportView(CNCBaseAuth, View):
"""
View last report generated
"""
def get(self, request, *args, **kwargs) -> Any:
try:
with open(settings.REPORT_PATH, 'r') as f:
return HttpResponse(f.read())
except Exception as e:
print(f'Caught exception in ReportView: {e}')
redirect_url = self.request.session.get('last_page', '/')
return HttpResponseRedirect(redirect_url)
class ExportValidationResultsView(CNCBaseAuth, View):
def get(self, request, *args, **kwargs) -> Any:
validation_skillet = kwargs['skillet']
meta = db_utils.load_skillet_by_name(validation_skillet)
filename = meta.get('name', 'Validation Output')
full_output = dict()
full_context = self.get_workflow()
for s in meta['snippets']:
snippet_name = s['name']
if snippet_name in full_context:
output = full_context[snippet_name]
full_output[snippet_name] = output
json_output = json.dumps(full_output, indent=' ')
response = HttpResponse(json_output, content_type="application/json")
response['Content-Disposition'] = 'attachment; filename=%s.json' % filename
return response
class FavoritesView(CNCView):
template_name = "panhandler/favorites.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
favorites = Collection.objects.all()
collections_info = dict()
for f in favorites:
collections_info[f.name] = dict()
collections_info[f.name]['categories'] = json.dumps(f.categories)
collections_info[f.name]['description'] = f.description
context['collections'] = collections_info
return context
class DeleteFavoriteView(CNCBaseAuth, RedirectView):
def get_redirect_url(self, *args, **kwargs):
collection_name = kwargs['favorite']
collection = Collection.objects.get(name=collection_name)
collection.delete()
return '/panhandler/favorites'
class AddFavoritesView(PanhandlerAppFormView):
snippet = 'create_favorite'
next_url = '/panhandler/favorites'
app_dir = 'panhandler'
header = "Favorites"
title = "Add a new Collection"
# once the form has been submitted and we have all the values placed in the workflow, execute this
def form_valid(self, form):
workflow = self.get_workflow()
try:
collection_name = workflow['collection_name']
collection_description = workflow['collection_description']
categories = workflow.get('collection_categories', '[]')
c = Collection.objects.create(
name=collection_name,
description=collection_description,
categories=categories
)
print(f'created new collection with id {c.id}')
self.pop_value_from_workflow('collection_categories')
self.pop_value_from_workflow('snippet_name')
except KeyError:
return self.form_invalid(form)
return super().form_valid(form)
class FavoriteCollectionView(CNCView):
template_name = "panhandler/favorite.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
collection = self.kwargs.get('favorite', '')
skillet_ids = Favorite.objects.filter(collection__name=collection)
skillets = list()
for s in skillet_ids:
skillet_dict = db_utils.load_skillet_by_name(s.skillet_id)
skillets.append(skillet_dict)
if not skillets:
messages.add_message(self.request, messages.INFO,
mark_safe('No Skillets have been added to this Favorite yet. '
'Click on the <a href="/panhandler/collection/All Skillets" '
'class="btn btn-outline-primary">'
'<li class="fa fa-heart"></li></a> to add a Skillet.'))
context['skillets'] = skillets
context['collection'] = collection
return context
class AddSkilletToFavoritesView(PanhandlerAppFormView):
snippet = 'add_skillet_to_favorite'
next_url = '/panhandler/favorites'
app_dir = 'panhandler'
header = "Favorites"
title = "Add a new Collection"
def get_context_data(self, **kwargs) -> dict:
skillet_name = self.kwargs.get('skillet_name', '')
all_favorites = Collection.objects.all()
skillet = db_utils.load_skillet_by_name(skillet_name)
if skillet is None:
raise SnippetRequiredException('Could not find that skillet!')
skillet_label = skillet.get('label', '')
favorite_names = list()
for favorite in all_favorites:
favorite_names.append(favorite.name)
if not favorite_names:
messages.add_message(self.request, messages.WARNING,
mark_safe("You have not yet added any favorites. Click "
"<a href='/panhandler/add_favorite'>Add Favorite Now</a>"
" to get started."))
self.save_value_to_workflow('all_favorites', favorite_names)
self.save_value_to_workflow('skillet_name', skillet_name)
if Favorite.objects.filter(skillet_id=skillet_name).exists():
favorite = Favorite.objects.get(skillet_id=skillet_name)
current_favorites_qs = favorite.collection_set.all()
current_favorites = list()
for f in current_favorites_qs:
current_favorites.append(f.name)
self.prepopulated_form_values['favorites'] = current_favorites
context = super().get_context_data(**kwargs)
context['title'] = f'Add {skillet_label} to Favorites '
context['header'] = 'Configure Favorites'
return context
# once the form has been submitted and we have all the values placed in the workflow, execute this
def form_valid(self, form):
workflow = self.get_workflow()
try:
skillet_name = self.request.POST['skillet_name']
favorites = workflow['favorites']
# FIXME - should no longer be deleting skillets due to no favorites ...
if not favorites:
if Favorite.objects.filter(skillet_id=skillet_name).exists():
skillet = Favorite.objects.get(skillet_id=skillet_name)
skillet.collection_set.clear()
messages.add_message(self.request, messages.INFO, 'Removed Skillet from All Favorites')
self.next_url = self.request.session.get('last_page', '/')
return super().form_valid(form)
(skillet, created) = Favorite.objects.get_or_create(
skillet_id=skillet_name
)
if not created:
skillet.collection_set.clear()
for f in favorites:
c = Collection.objects.get(name=f)
skillet.collection_set.add(c)
self.pop_value_from_workflow('favorites')
self.pop_value_from_workflow('skillet_name')
except KeyError:
return self.form_invalid(form)
return super().form_valid(form)
class ExtractTemplateVariablesView(CNCBaseAuth, View):
def post(self, request, *args, **kwargs) -> HttpResponse:
template_str = 'not found'
if self.request.is_ajax():
try:
json_str = self.request.body
json_obj = json.loads(json_str)
template_str = json_obj.get('template_str', 'not found')
except ValueError:
message = 'Could not parse input'
return HttpResponse(message, content_type="application/json")
sl = SkilletLoader()
snippet = dict()
snippet['name'] = 'template_snippet'
snippet['element'] = template_str
skillet_dict = dict()
skillet_dict['name'] = 'template_skillet'
skillet_dict['description'] = 'template'
skillet_dict['snippets'] = [snippet]
s = sl.normalize_skillet_dict(skillet_dict)
skillet = TemplateSkillet(s)
variables = skillet.get_declared_variables()
json_output = json.dumps(variables)
response = HttpResponse(json_output, content_type="application/json")
return response
class SkilletTestView(CNCBaseAuth, View):
def post(self, request, *args, **kwargs) -> HttpResponse:
if self.request.is_ajax():
try:
json_str = self.request.body
json_obj = json.loads(json_str)
skillet_dict = json_obj.get('skillet', {})
context = json_obj.get('context', {})
except ValueError:
message = 'Could not parse input'
return HttpResponse(message, content_type="application/json")
else:
messages.add_message(self.request, messages.ERROR, 'Invalid Request Type')
return HttpResponseRedirect('/panhandler')
sl = SkilletLoader()
skillet = sl.create_skillet(skillet_dict)
if not str(skillet.type).startswith('pan') and not str(skillet.type) == 'template':
response = HttpResponse(json.dumps({"error": "Invalid Skillet type"}), content_type="application/json")
return response
results = dict()
output = dict()
# allow template type skillets in the debugger
if skillet.type == 'template':
output = skillet.execute(context)
# special handling for pan type skillets
elif str(skillet.type).startswith('pan') and len(skillet_dict['snippets']) == 1:
# this is a single snippet execution - check for dangerous commands
snippet = skillet.get_snippets()[0]
if not snippet.should_execute(context):
# skip initialize_context which will contact the device
skillet.context = context
output['debug'] = 'This snippet was skipped due to when conditional'
output['metadata'] = dict()
output['metadata']['name'] = snippet.metadata['name']
output['metadata']['when'] = snippet.metadata['when']
elif 'cmd' in snippet.metadata and \
snippet.metadata['cmd'] in ('op', 'set', 'edit', 'override', 'move', 'rename', 'clone', 'delete'):
try:
skillet.initialize_context(context)
except PanoplyException as pe:
output['error'] = str(pe)
metadata = snippet.render_metadata(context)
output['debug'] = 'No config changes pushed to the device during testing, ' \
'debug only showing rendered output'
output['metadata'] = metadata
else:
try:
output = skillet.execute(context)
if skillet_dict['type'] == 'pan_validation':
if snippet.name in skillet.context:
output['pan_validation'][snippet.name] = skillet.context[snippet.name]
except PanoplyException as pe:
print(pe)
output['error'] = str(pe)
else:
try:
# potentially dangerous to allow multi-snippet pan type skillets to execute
# FIXME - verify where this is actually useful :-/
output = skillet.execute(context)
except PanoplyException as pe:
print(pe)
output['error'] = str(pe)
results['output'] = output
results['context'] = dict()
# avoid putting full config var back into context
for i in skillet.context:
if i != 'config':
results['context'][i] = skillet.context[i]
json_output = json.dumps(results)
response = HttpResponse(json_output, content_type="application/json")
return response
class GenerateKeyView(CNCBaseAuth, View):
def post(self, request, *args, **kwargs) -> HttpResponse:
if self.request.is_ajax():
try:
json_str = self.request.body
json_obj = json.loads(json_str)
repo_name = json_obj.get('name', '')
except ValueError:
message = 'Could not parse input'
return HttpResponse(message, content_type="application/json")
else:
message = 'invalid input'
return HttpResponse(message, content_type="application/json")
pub_key = git_utils.generate_ssh_key(repo_name)
output = dict()
output['pub'] = pub_key
json_output = json.dumps(output)
response = HttpResponse(json_output, content_type="application/json")
return response
class PushGitRepositoryView(CNCBaseAuth, View):
def post(self, request, *args, **kwargs) -> HttpResponse:
if self.request.is_ajax():
try:
json_str = self.request.body
json_obj = json.loads(json_str)
repo_name = json_obj.get('name', '')
except ValueError:
message = 'Could not parse input'
return HttpResponse(message, content_type="application/json")
else:
message = 'invalid input'
return HttpResponse(message, content_type="application/json")
if not RepositoryDetails.objects.filter(name=repo_name).exists():
message = 'invalid repository'
return HttpResponse(message, content_type="application/json")
repo = RepositoryDetails.objects.get(name=repo_name)
output = dict()
if not repo.url.startswith('git@') and not repo.url.startswith('ssh://'):
message = 'invalid Repository URL - Push requires an SSH URL'
output['status'] = message
return HttpResponse(json.dumps(output), content_type="application/json")
user_dir = os.path.expanduser('~/.pan_cnc')
snippets_dir = os.path.join(user_dir, 'panhandler/repositories')
repo_dir = os.path.join(snippets_dir, repo_name)
(success, msg) = git_utils.push_local_changes(repo_dir, repo.deploy_key_path)
if success:
output['status'] = 'Changes pushed upstream'
messages.add_message(self.request, messages.SUCCESS, 'Changes pushed upstream')
else:
output['status'] = f'Error pushing changes upstream\n{msg}'
json_output = json.dumps(output)
response = HttpResponse(json_output, content_type="application/json")
return response
class CopySkilletView(CNCBaseAuth, RedirectView):
app_dir = 'panhandler'
def get_redirect_url(self, *args, **kwargs):
skillet_name = kwargs['skillet_name']
repo_name = kwargs['repo_name']
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import pdb
import json
import argparse
import warnings
from collections import OrderedDict
from functools import total_ordering
from itertools import combinations
import torch
import torch.nn.functional as F
import numpy as np
from sklearn import metrics
from sklearn.exceptions import UndefinedMetricWarning
from tqdm import tqdm
from helper import load_line_json
warnings.filterwarnings(action="ignore", category=UndefinedMetricWarning)
@total_ordering
class Threshold(object):
"""For Precision-Recall Curve"""
def __init__(self, threshold, label, true_label):
self.th = threshold
self.label = label
self.true_label = true_label
self.flag = int(self.label == self.true_label)
def __eq__(self, obj):
return self.th == obj.th
def __lt__(self, obj):
return self.th < obj.th
def compute_metrics_nyth(labels, preds, ids, target_names):
r"""calculate the metrics of NYT-H dataset"""
results = OrderedDict()
results['acc'] = (preds == labels).mean()
results['macro-f1'] = metrics.f1_score(labels, preds, average='macro')
results['macro-recall'] = metrics.recall_score(labels, preds, average='macro')
results['macro-precision'] = metrics.precision_score(labels, preds, average='macro')
results['micro-f1'] = metrics.f1_score(labels, preds, average='micro')
results['micro-recall'] = metrics.recall_score(labels, preds, average='micro')
results['micro-precision'] = metrics.precision_score(labels, preds, average='micro')
report = metrics.classification_report(labels, preds,
digits=4, labels=ids,
target_names=target_names, output_dict=True)
rels = set(target_names)
f1s = list()
ps = list()
rs = list()
for key, val in report.items():
if key in rels and key != 'NA':
ps.append(val['precision'])
rs.append(val['recall'])
f1s.append(val['f1-score'])
non_na_macro_precision = sum(ps)/len(ps)
non_na_macro_recall = sum(rs)/len(rs)
non_na_macro_f1 = sum(f1s)/len(f1s)
results['non_na_macro_precision'] = non_na_macro_precision
results['non_na_macro_recall'] = non_na_macro_recall
results['non_na_macro_f1'] = non_na_macro_f1
results['report'] = report
return results
def evaluate_nyth(model, criterion, logger, processor, config, dataset_name, prefix=""):
r"""evaluate the """
eval_output_dir = os.path.join(config.output_dir, "eval")
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
number_of_total_examples = {
'train': processor.num_train_examples,
'dev': processor.num_dev_examples,
'test': processor.num_test_examples,
}
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(f" Num examples = {number_of_total_examples[dataset_name]}")
results = dict()
eval_loss = 0.0
nb_eval_steps = 0
preds = list()
labels = list()
outs = list()
tokens = list()
instance_ids = list()
bag_ids = list()
data_loaders = {
"train": processor.train_loader,
"dev": processor.dev_loader,
"test": processor.test_loader
}
data_loader = data_loaders[dataset_name]
with torch.no_grad():
model.eval()
r"""opennre"""
pred_result = list()
r"""end of opennre"""
for raw_batch in tqdm(data_loader, desc="Evaluating", ncols=60):
if config.task_name == 'sent':
batch = tuple(t.to(config.device) for t in raw_batch[:-2])
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
}
elif config.task_name == 'bag':
batch = tuple(t.to(config.device) for t in raw_batch[:-3])
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
"scopes": raw_batch[8],
"is_training": False,
"rel_labels": rel_labels,
}
else:
raise NotImplementedError
instance_ids.extend(instance_id)
bag_ids.extend(bag_id)
out = model(**inputs)
loss = criterion(out, rel_labels)
eval_loss += loss.item()
nb_eval_steps += 1
_, pred = torch.max(out, dim=1) # replace softmax with max function, same results
pred = pred.cpu().numpy().reshape((-1, 1))
rel_labels = rel_labels.cpu().numpy().reshape((-1, 1))
bag_labels = bag_labels.cpu().numpy().reshape((-1, 1))
for x in batch[0].cpu().numpy():
tokens.append(" ".join([processor.id2word[y] for y in x.tolist()]))
if config.task_name == 'sent' or (config.task_name == 'bag' \
and '_one' in config.model_name):
softmax_out = torch.softmax(out.cpu().detach(), dim=-1)
else:
softmax_out = out.cpu().detach() # reference from opennre
outs.append(softmax_out.numpy())
preds.append(pred)
labels.append(rel_labels)
r"""opennre"""
for i in range(softmax_out.size(0)):
for relid in range(processor.class_num):
if processor.id2rel[relid] != 'NA':
pred_ins = {
'label': int(rel_labels[i].item() == relid),
'score': softmax_out[i][relid].item(),
'pred_label': relid
}
if rel_labels[i].item() == relid:
if bag_labels[i] == 1:
pred_ins.update({"b_label": 1})
elif bag_labels[i] == 0:
pred_ins.update({"b_label": 0})
pred_result.append(pred_ins)
r"""end of opennre"""
eval_loss = eval_loss / nb_eval_steps
outs = np.concatenate(outs, axis=0).astype(np.float32)
preds = np.concatenate(preds, axis=0).reshape(-1).astype(np.int64)
labels = np.concatenate(labels, axis=0).reshape(-1).astype(np.int64)
id2rel = processor.id2rel
ids = list(range(len(id2rel)))
target_names = [id2rel[ind] for ind in ids]
results = compute_metrics_nyth(labels, preds, ids, target_names)
"""Precision-Recall Curve (Ours)"""
probs = torch.tensor(outs)
# just take the probs in the max position
thresholds, indices = probs[:,1:].max(dim=1)
indices += 1
ppp, rrr, _ = metrics.precision_recall_curve(labels==indices.cpu().detach().numpy(), thresholds)
with open(os.path.join(eval_output_dir, 'prc_skprc_mine.json'), 'wt', encoding='utf-8') as fout:
json.dump({'precision': ppp.tolist(), 'recall': rrr.tolist()}, fout, ensure_ascii=False)
thresholds = thresholds.numpy()
indices = indices.numpy()
th_objs = list()
for th, lb, truth in zip(thresholds, indices, labels):
th_objs.append(Threshold(th, lb, truth))
th_list_sorted = sorted(th_objs, reverse=True)
tot_len = len(thresholds)
correct = 0
ps = list()
rs = list()
ths = list()
for ind, th in enumerate(th_list_sorted):
correct += th.flag
ps.append(float(correct)/(ind + 1))
rs.append(float(correct)/tot_len)
ths.append(float(th.th))
with open(os.path.join(eval_output_dir, "prc.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": ps,
"recall": rs,
"threshold": ths,
}, fout, ensure_ascii=False)
results['auc'] = metrics.auc(rs, ps)
r"""opennre"""
sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True)
prec = []
rec = []
correct = 0
# import ipdb; ipdb.set_trace()
tot_count_flags = labels.copy()
tot_count_flags[tot_count_flags > 0] = 1
tot_count = int(tot_count_flags.sum())
# take `all` non-na probs
correct_k_with_rel = {"k": list(), "covered_rel": list()}
correct_covered_rel = set()
all_k_with_rel = {"k": list(), "covered_rel": list()}
all_covered_rel = set()
for i, item in enumerate(sorted_pred_result):
correct += item['label']
prec.append(float(correct) / float(i + 1))
rec.append(float(correct) / float(tot_count))
if item['label'] > 0:
correct_covered_rel.add(item['pred_label'])
correct_k_with_rel['k'].append(i + 1)
correct_k_with_rel['covered_rel'].append(len(correct_covered_rel))
all_covered_rel.add(item['pred_label'])
all_k_with_rel['k'].append(i + 1)
all_k_with_rel['covered_rel'].append(len(all_covered_rel))
non_na_auc = metrics.auc(x=rec, y=prec)
np_prec = np.array(prec)
np_rec = np.array(rec)
with open(os.path.join(eval_output_dir, "prc_opennre.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": prec,
"recall": rec,
}, fout, ensure_ascii=False)
with open(os.path.join(eval_output_dir, "k_covered_rel.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"correct": correct_k_with_rel,
"all": all_k_with_rel,
}, fout, ensure_ascii=False)
max_f1 = (2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).max()
mean_prec = np_prec.mean()
results['non_na_auc'] = non_na_auc
results['max_f1'] = max_f1
results['mean_prec'] = mean_prec
r"""end of opennre"""
# -------------------------------------------------------------------------------------------------
if dataset_name == 'test' and config.task_name == "bag":
"""opennre for bag_labels"""
b_pred_result = list(filter(lambda x: "b_label" in x, pred_result))
b_sorted_pred_result = sorted(b_pred_result, key=lambda x: x['score'], reverse=True)
b_prec = []
b_rec = []
b_correct = 0
b_tot_count = sum([x['b_label'] for x in b_sorted_pred_result])
# take `all` non-na probs
for i, item in enumerate(b_sorted_pred_result):
b_correct += item['b_label']
b_prec.append(float(b_correct) / float(i + 1))
b_rec.append(float(b_correct) / float(b_tot_count))
if i + 1 in [50, 100, 200, 300, 400, 500, 1000, 2000]:
results[f'b_P@{i + 1}'] = float(b_correct) / float(i + 1)
b_non_na_auc = metrics.auc(x=b_rec, y=b_prec)
np_b_prec = np.array(b_prec)
np_b_rec = np.array(b_rec)
with open(os.path.join(eval_output_dir, "b_prc_opennre.json"), 'wt', encoding='utf-8') as fout:
json.dump({
"precision": b_prec,
"recall": b_rec,
}, fout, ensure_ascii=False)
b_max_f1 = (2 * np_b_prec * np_b_rec / (np_b_prec + np_b_rec + 1e-20)).max()
b_mean_prec = np_b_prec.mean()
results['b_non_na_auc'] = b_non_na_auc
results['b_max_f1'] = b_max_f1
results['b_mean_prec'] = b_mean_prec
"""end of opennre for bag_labels"""
# -------------------------------------------------------------------------------------------------
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'wt', encoding='utf-8') as fin:
for ins_id, bag_id, l, t, p in zip(instance_ids, bag_ids, labels, tokens, preds):
rel2results = OrderedDict()
for rel in processor.rel2id:
if rel != 'NA':
if rel == processor.id2rel[p]:
rel2results[rel] = True
else:
rel2results[rel] = False
l = processor.id2rel[l]
p = processor.id2rel[p]
result = OrderedDict()
result["instance_id"] = ins_id
result["bag_id"] = bag_id
result["result"] = str(l==p)
result["label"] = l
result["pred"] = p
result["tokens"] = t
result["rel2result"] = rel2results
fin.write('{}\n'.format(json.dumps(result)))
ds_p, ds_r, ds_f1 = compute_dsgt(labels, preds, processor.rel2id, verbose=False)
results.update({"dsgt_p": ds_p, "dsgt_r": ds_r, "dsgt_f1": ds_f1})
if dataset_name == 'test':
idname = "bag_id" if config.task_name == "bag" else "instance_id"
id2results = dict()
with open(os.path.join(eval_output_dir, 'eval_mc.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
ins = json.loads(line)
id2results[ins[idname]] = ins
ma_p, ma_r, ma_f1 = compute_magt(labels, preds, config.task_name,
processor.rel2id, processor.test_dataset.data, id2results, verbose=False)
results.update({"magt_p": ma_p, "magt_r": ma_r, "magt_f1": ma_f1})
logger.info("***** {} Eval results {} *****".format(dataset_name, prefix))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, eval_loss, preds, labels, outs
def evaluate_crcnn(model, criterion, logger, processor, config, dataset_name, prefix=""):
r"""evaluate the """
eval_output_dir = os.path.join(config.output_dir, "eval")
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
number_of_total_examples = {
'train': processor.num_train_examples,
'dev': processor.num_dev_examples,
'test': processor.num_test_examples,
}
logger.info(f"***** Running evaluation {prefix} *****")
logger.info(f" Num examples = {number_of_total_examples[dataset_name]}")
results = dict()
eval_loss = 0.0
nb_eval_steps = 0
preds = list()
labels = list()
outs = list()
tokens = list()
instance_ids = list()
bag_ids = list()
data_loaders = {
"train": processor.train_loader,
"dev": processor.dev_loader,
"test": processor.test_loader
}
data_loader = data_loaders[dataset_name]
with torch.no_grad():
model.eval()
for raw_batch in tqdm(data_loader, desc="Evaluating", ncols=60):
batch = tuple(t.to(config.device) for t in raw_batch[:-2])
inputs = {
"token2ids": batch[0],
"pos1s": batch[1],
"pos2s": batch[2],
"mask": batch[3],
}
rel_labels = batch[4]
bag_labels = batch[5]
instance_id = raw_batch[6]
bag_id = raw_batch[7]
instance_ids.extend(instance_id)
bag_ids.extend(bag_id)
out = model(**inputs)
loss = criterion(out, rel_labels)
eval_loss += loss.item()
nb_eval_steps += 1
scores, pred = torch.max(out[:, 1:], dim=1)
pred = pred + 1
scores = scores.cpu().numpy().reshape((-1, 1))
pred = pred.cpu().numpy().reshape((-1, 1))
for i in range(pred.shape[0]):
if scores[i][0] < 0:
pred[i][0] = 0
rel_labels = rel_labels.cpu().numpy().reshape((-1, 1))
for x in batch[0].cpu().numpy():
tokens.append(" ".join([processor.id2word[y] for y in x.tolist()]))
outs.append(out.detach().cpu().numpy())
preds.append(pred)
| |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
#from Functions import generate_grid_unit
from Functions import generate_grid
'''
TODOs:
- add dice loss
- add mutual information loss
- add mse loss
'''
# neural network module
class Miccai2020_LDR_laplacian_unit_disp_add_lvl1(nn.Module):
def __init__(self, in_channel, n_classes, start_channel, is_train=True, imgshape=(160, 192, 144), range_flow=0.4):
super(Miccai2020_LDR_laplacian_unit_disp_add_lvl1, self).__init__()
self.in_channel = in_channel
self.n_classes = n_classes
self.start_channel = start_channel
self.range_flow = range_flow
self.is_train = is_train
self.imgshape = imgshape
# genearte a grid for displacement field
#self.grid_1 = generate_grid_unit(self.imgshape)
self.grid_1 = generate_grid(self.imgshape,1)
#convert to tensor
self.grid_1 = torch.from_numpy(np.reshape(self.grid_1, (1,) + self.grid_1.shape)).cuda().float()
# this returns wrapped image at sample grid points: flow = torch.nn.functional.grid_sample(x, sample_grid, mode=self.interpolator, padding_mode="border", align_corners=True)
self.transform = SpatialTransform_unit().cuda()
bias_opt = False
self.input_encoder_lvl1 = self.input_feature_extract(self.in_channel, self.start_channel * 4, bias=bias_opt)
## the above returns a network layer or double layer, note start_channel * 4 = out_channel
## if batchnorm:
## layer = nn.Sequential(
## nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
## nn.BatchNorm3d(out_channels), nn.ReLU())
## else:
## layer = nn.Sequential(
## nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
## nn.LeakyReLU(0.2),
## nn.Conv3d(out_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
## )
self.down_conv = nn.Conv3d(self.start_channel * 4, self.start_channel * 4, 3, stride=2, padding=1, bias=bias_opt)
self.resblock_group_lvl1 = self.resblock_seq(self.start_channel * 4, bias_opt=bias_opt)
# upsampling : increasing the size of the image from ? to ?
self.up = nn.ConvTranspose3d(self.start_channel * 4, self.start_channel * 4, 2, stride=2, padding=0, output_padding=0, bias=bias_opt)
# downsampling : decrease the size of the image by 1/stride from input_size to (input_size / stride)
self.down_avg = nn.AvgPool3d(kernel_size=3, stride=2, padding=1, count_include_pad=False)
# downsampling : decrease the size of the image from ? to ?
self.output_lvl1 = self.outputs(self.start_channel * 8, self.n_classes, kernel_size=3, stride=1, padding=1, bias=False)
# if batchnorm:
# layer = nn.Sequential(
# nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
# nn.BatchNorm3d(out_channels),
# nn.Tanh())
# else:
# layer = nn.Sequential(
# nn.Conv3d(in_channels, int(in_channels/2), kernel_size, stride=stride, padding=padding, bias=bias),
# nn.LeakyReLU(0.2),
# nn.Conv3d(int(in_channels/2), out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
# nn.Softsign())
def resblock_seq(self, in_channels, bias_opt=False):
layer = nn.Sequential(
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2)
)
return layer
def input_feature_extract(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential( nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(out_channels), nn.ReLU() )
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(out_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
)
return layer
# def decoder(self, in_channels, out_channels, kernel_size=2, stride=2, padding=0,
# output_padding=0, bias=True):
# layer = nn.Sequential(
# nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,
# padding=padding, output_padding=output_padding, bias=bias),
# nn.ReLU())
# return layer
def outputs(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(out_channels),
nn.Tanh())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, int(in_channels/2), kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(int(in_channels/2), out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.Softsign())
return layer
# the neural network model starts after the initialisation
def forward(self, x, y):
#x and y is the model input tensors
# x is the moving image and y is the fixed image
cat_input = torch.cat((x, y), 1) # put them in one tensor
cat_input = self.down_avg(cat_input) # decrease the size by half = 1/2 of the image
cat_input_lvl1 = self.down_avg(cat_input) # decrease the size again by half = 1/4 of the image
down_y = cat_input_lvl1[:, 1:2, :, :, :] # get the downsampled fixed image (not a tensor)
#TODO: probably this is more clear: down_y = cat_input_lvl1[:, 1, :, :, :]
fea_e0 = self.input_encoder_lvl1(cat_input_lvl1) # apply two lyers convolution on the 1/4 images
e0 = self.down_conv(fea_e0) # downsampling
e1 = self.resblock_group_lvl1(e0) # R_Bolck
e2 = self.up(e1) # upsampling
output_disp_e0_v = self.output_lvl1(torch.cat([e2, fea_e0], dim=1)) * self.range_flow # displacement field
warpped_inputx_lvl1_out = self.transform(x, output_disp_e0_v.permute(0, 2, 3, 4, 1), self.grid_1) # transformed moving image
# it seems this is not important, we still can return all output in the case of testing as well
f_output = output_disp_e0_v
if self.is_train is True:
f_output = [output_disp_e0_v, warpped_inputx_lvl1_out, down_y, output_disp_e0_v, e2]
return f_output
class Miccai2020_LDR_laplacian_unit_disp_add_lvl2(nn.Module):
def __init__(self, in_channel, n_classes, start_channel, is_train=True, imgshape=(160, 192, 144), range_flow=0.4, model_lvl1=None):
super(Miccai2020_LDR_laplacian_unit_disp_add_lvl2, self).__init__()
self.in_channel = in_channel
self.n_classes = n_classes
self.start_channel = start_channel
self.range_flow = range_flow
self.is_train = is_train
self.imgshape = imgshape
self.model_lvl1 = model_lvl1
#self.grid_1 = generate_grid_unit(self.imgshape)
self.grid_1 = generate_grid(self.imgshape,1)
self.grid_1 = torch.from_numpy(np.reshape(self.grid_1, (1,) + self.grid_1.shape)).cuda().float()
self.transform = SpatialTransform_unit().cuda()
bias_opt = False
self.input_encoder_lvl1 = self.input_feature_extract(self.in_channel+3, self.start_channel * 4, bias=bias_opt)
self.down_conv = nn.Conv3d(self.start_channel * 4, self.start_channel * 4, 3, stride=2, padding=1, bias=bias_opt)
self.resblock_group_lvl1 = self.resblock_seq(self.start_channel * 4, bias_opt=bias_opt)
self.up_tri = torch.nn.Upsample(scale_factor=2, mode="trilinear")
self.up = nn.ConvTranspose3d(self.start_channel * 4, self.start_channel * 4, 2, stride=2, padding=0, output_padding=0, bias=bias_opt)
self.down_avg = nn.AvgPool3d(kernel_size=3, stride=2, padding=1, count_include_pad=False)
self.output_lvl1 = self.outputs(self.start_channel * 8, self.n_classes, kernel_size=3, stride=1, padding=1, bias=False)
def unfreeze_modellvl1(self):
# unFreeze model_lvl1 weight
print("\nunfreeze model_lvl1 parameter")
for param in self.model_lvl1.parameters():
param.requires_grad = True
def resblock_seq(self, in_channels, bias_opt=False):
layer = nn.Sequential(
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt), nn.LeakyReLU(0.2)
)
return layer
def input_feature_extract(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(out_channels),
nn.ReLU())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(out_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
return layer
# def decoder(self, in_channels, out_channels, kernel_size=2, stride=2, padding=0,
# output_padding=0, bias=True):
# layer = nn.Sequential(
# nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,
# padding=padding, output_padding=output_padding, bias=bias),
# nn.ReLU())
# return layer
def outputs(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(out_channels),
nn.Tanh())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, int(in_channels/2), kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(int(in_channels/2), out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.Softsign())
return layer
def forward(self, x, y):
# output_disp_e0, warpped_inputx_lvl1_out, down_y, output_disp_e0_v, e0
lvl1_disp, _, _, lvl1_v, lvl1_embedding = self.model_lvl1(x, y)
lvl1_disp_up = self.up_tri(lvl1_disp)
x_down = self.down_avg(x) # moving image
y_down = self.down_avg(y)
warpped_x = self.transform(x_down, lvl1_disp_up.permute(0, 2, 3, 4, 1), self.grid_1)
cat_input_lvl2 = torch.cat((warpped_x, y_down, lvl1_disp_up), 1)
fea_e0 = self.input_encoder_lvl1(cat_input_lvl2)
e0 = self.down_conv(fea_e0)
e0 = e0 + lvl1_embedding
e0 = self.resblock_group_lvl1(e0)
e0 = self.up(e0)
output_disp_e0_v = self.output_lvl1(torch.cat([e0, fea_e0], dim=1)) * self.range_flow
compose_field_e0_lvl1 = lvl1_disp_up + output_disp_e0_v
warpped_inputx_lvl1_out = self.transform(x, compose_field_e0_lvl1.permute(0, 2, 3, 4, 1), self.grid_1)
if self.is_train is True:
return compose_field_e0_lvl1, warpped_inputx_lvl1_out, y_down, output_disp_e0_v, lvl1_v, e0
else:
return compose_field_e0_lvl1
class Miccai2020_LDR_laplacian_unit_disp_add_lvl3(nn.Module):
def __init__(self, in_channel, n_classes, start_channel, is_train=True, imgshape=(160, 192, 144), range_flow=0.4,
model_lvl2=None):
super(Miccai2020_LDR_laplacian_unit_disp_add_lvl3, self).__init__()
self.in_channel = in_channel
self.n_classes = n_classes
self.start_channel = start_channel
self.range_flow = range_flow
self.is_train = is_train
self.imgshape = imgshape
self.model_lvl2 = model_lvl2
#self.grid_1 = generate_grid_unit(self.imgshape)
self.grid_1 = generate_grid(self.imgshape,1)
self.grid_1 = torch.from_numpy(np.reshape(self.grid_1, (1,) + self.grid_1.shape)).cuda().float()
self.transform = SpatialTransform_unit().cuda()
bias_opt = False
self.input_encoder_lvl1 = self.input_feature_extract(self.in_channel+3, self.start_channel * 4, bias=bias_opt)
self.down_conv = nn.Conv3d(self.start_channel * 4, self.start_channel * 4, 3, stride=2, padding=1, bias=bias_opt)
self.resblock_group_lvl1 = self.resblock_seq(self.start_channel * 4, bias_opt=bias_opt)
self.up_tri = torch.nn.Upsample(scale_factor=2, mode="trilinear")
self.up = nn.ConvTranspose3d(self.start_channel * 4, self.start_channel * 4, 2, stride=2,
padding=0, output_padding=0, bias=bias_opt)
self.output_lvl1 = self.outputs(self.start_channel * 8, self.n_classes, kernel_size=3, stride=1, padding=1, bias=False)
def unfreeze_modellvl2(self):
# unFreeze model_lvl1 weight
print("\nunfreeze model_lvl2 parameter")
for param in self.model_lvl2.parameters():
param.requires_grad = True
def resblock_seq(self, in_channels, bias_opt=False):
layer = nn.Sequential(
PreActBlock(in_channels, in_channels, bias=bias_opt),
nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt),
nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt),
nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt),
nn.LeakyReLU(0.2),
PreActBlock(in_channels, in_channels, bias=bias_opt),
nn.LeakyReLU(0.2)
)
return layer
def input_feature_extract(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(out_channels),
nn.ReLU())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(out_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
return layer
# def decoder(self, in_channels, out_channels, kernel_size=2, stride=2, padding=0,
# output_padding=0, bias=True):
# layer = nn.Sequential(
# nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,
# padding=padding, output_padding=output_padding, bias=bias),
# nn.ReLU())
# return layer
def outputs(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
bias=False, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(out_channels),
nn.Tanh())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, int(in_channels/2), kernel_size, stride=stride, padding=padding, bias=bias),
nn.LeakyReLU(0.2),
nn.Conv3d(int(in_channels/2), out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.Softsign())
return layer
def forward(self, x, y):
# compose_field_e0_lvl1, warpped_inputx_lvl1_out, down_y, output_disp_e0_v, lvl1_v, e0
lvl2_disp, _, _, | |
duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(self, keep="first") -> Series:
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
res = self._duplicated(keep=keep)
result = self._constructor(res, index=self.index)
return result.__finalize__(self, method="duplicated")
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
i = self.argmin(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
i = self.argmax(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
def round(self, decimals=0, *args, **kwargs) -> Series:
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Examples
--------
>>> s = pd.Series([0.1, 1.3, 2.7])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
result = self._values.round(decimals)
result = self._constructor(result, index=self.index).__finalize__(
self, method="round"
)
return result
def quantile(self, q=0.5, interpolation="linear"):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
validate_percentile(q)
# We dispatch to DataFrame so that | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import print_function
# stdlib
import itertools
import pdb
import pprint
import time
import unittest
# local
from ..compat import pickle
from ..util import encode_session_payload, int_time, LAZYCREATE_SESSION
from ..exceptions import (
InvalidSession,
InvalidSession_PayloadTimeout,
InvalidSession_PayloadLegacy,
)
from . import DummyRedis
from ..session import RedisSession
# ==============================================================================
class TestRedisSession(unittest.TestCase):
def _makeOne(
self,
redis,
session_id,
new,
func_new_session,
serialize=pickle.dumps,
deserialize=pickle.loads,
detect_changes=True,
set_redis_ttl=True,
deserialized_fails_new=None,
timeout_trigger=None,
timeout=1200,
python_expires=True,
set_redis_ttl_readheavy=None,
):
_set_redis_ttl_onexit = False
if (timeout and set_redis_ttl) and (
not timeout_trigger and not python_expires and not set_redis_ttl_readheavy
):
_set_redis_ttl_onexit = True
return RedisSession(
redis=redis,
session_id=session_id,
new=new,
new_session=func_new_session,
serialize=serialize,
deserialize=deserialize,
detect_changes=detect_changes,
set_redis_ttl=set_redis_ttl,
set_redis_ttl_readheavy=set_redis_ttl_readheavy,
_set_redis_ttl_onexit=_set_redis_ttl_onexit,
deserialized_fails_new=deserialized_fails_new,
timeout_trigger=timeout_trigger,
timeout=timeout,
python_expires=python_expires,
)
def _set_up_session_in_redis(
self, redis, session_id, timeout, session_dict=None, serialize=pickle.dumps
):
"""
Note: this will call `encode_session_payload` with the initial session
data. On a typical test this will mean an extra initial call to
`encode_session_payload``
"""
if session_dict is None:
session_dict = {}
time_now = int_time()
expires = time_now + timeout if timeout else None
payload = encode_session_payload(session_dict, time_now, timeout, expires)
redis.set(session_id, serialize(payload))
return session_id
def _make_id_generator(self):
ids = itertools.count(start=0, step=1)
return lambda: str(next(ids))
def _set_up_session_in_Redis_and_makeOne(
self,
session_id=None,
session_dict=None,
new=True,
timeout=300,
detect_changes=True,
):
redis = DummyRedis()
id_generator = self._make_id_generator()
if session_id is None:
session_id = id_generator()
self._set_up_session_in_redis(
redis=redis,
session_id=session_id,
session_dict=session_dict,
timeout=timeout,
)
func_new_session = lambda: self._set_up_session_in_redis(
redis=redis,
session_id=id_generator(),
session_dict=session_dict,
timeout=timeout,
)
return self._makeOne(
redis=redis,
session_id=session_id,
new=new,
func_new_session=func_new_session,
detect_changes=detect_changes,
timeout=timeout,
)
def test_init_new_session(self):
session_id = "session_id"
new = True
timeout = 300
inst = self._set_up_session_in_Redis_and_makeOne(
session_id=session_id, new=new, timeout=timeout
)
self.assertEqual(inst.session_id, session_id)
self.assertIs(inst.new, new)
self.assertDictEqual(dict(inst), {})
def test_init_existing_session(self):
session_id = "session_id"
session_dict = {"key": "value"}
new = False
timeout = 300
inst = self._set_up_session_in_Redis_and_makeOne(
session_id=session_id, session_dict=session_dict, new=new, timeout=timeout
)
self.assertEqual(inst.session_id, session_id)
self.assertIs(inst.new, new)
self.assertDictEqual(dict(inst), session_dict)
def test_delitem(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
del inst["key"]
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
self.assertNotIn("key", inst)
self.assertNotIn("key", session_dict_in_redis)
def test_setitem(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
self.assertIn("key", inst)
self.assertIn("key", session_dict_in_redis)
def test_getitem(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
self.assertEqual(inst["key"], session_dict_in_redis["key"])
def test_contains(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
self.assertTrue("key" in inst)
self.assertTrue("key" in session_dict_in_redis)
def test_setdefault(self):
inst = self._set_up_session_in_Redis_and_makeOne()
result = inst.setdefault("key", "val")
self.assertEqual(result, inst["key"])
def test_keys(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key1"] = ""
inst["key2"] = ""
inst_keys = inst.keys()
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
persisted_keys = session_dict_in_redis.keys()
self.assertEqual(inst_keys, persisted_keys)
def test_items(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
inst_items = inst.items()
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
persisted_items = session_dict_in_redis.items()
self.assertEqual(inst_items, persisted_items)
def test_clear(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst.clear()
session_dict_in_redis = inst.from_redis()["m"]
self.assertNotIn("a", inst)
self.assertNotIn("a", session_dict_in_redis)
def test_get(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
get_from_inst = inst.get("key")
self.assertEqual(get_from_inst, "val")
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
get_from_redis = session_dict_in_redis.get("key")
self.assertEqual(get_from_inst, get_from_redis)
def test_get_default(self):
inst = self._set_up_session_in_Redis_and_makeOne()
get_from_inst = inst.get("key", "val")
self.assertEqual(get_from_inst, "val")
session_dict_in_redis = inst.from_redis()["m"]
get_from_redis = session_dict_in_redis.get("key", "val")
self.assertEqual(get_from_inst, get_from_redis)
def test_pop(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["key"] = "val"
popped = inst.pop("key")
self.assertEqual(popped, "val")
session_dict_in_redis = inst.from_redis()["m"]
self.assertNotIn("key", session_dict_in_redis)
def test_pop_default(self):
inst = self._set_up_session_in_Redis_and_makeOne()
popped = inst.pop("key", "val")
self.assertEqual(popped, "val")
def test_update(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
to_be_updated = {"a": "overriden", "b": 2}
inst.update(to_be_updated)
self.assertEqual(inst["a"], "overriden")
self.assertEqual(inst["b"], 2)
inst.do_persist()
session_dict_in_redis = inst.from_redis()["m"]
self.assertEqual(session_dict_in_redis["a"], "overriden")
self.assertEqual(session_dict_in_redis["b"], 2)
def test_iter(self):
inst = self._set_up_session_in_Redis_and_makeOne()
keys = ["a", "b", "c"]
for k in keys:
inst[k] = k
itered = list(inst.__iter__())
itered.sort()
self.assertEqual(keys, itered)
def test_has_key(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["actual_key"] = ""
self.assertIn("actual_key", inst)
self.assertNotIn("not_a_key", inst)
def test_values(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
expected_values = [1, 2]
actual_values = sorted(inst.values())
self.assertEqual(actual_values, expected_values)
def test_itervalues(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
itered = list(inst.itervalues())
itered.sort()
expected = [1, 2]
self.assertEqual(expected, itered)
def test_iteritems(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
itered = list(inst.iteritems())
itered.sort()
expected = [("a", 1), ("b", 2)]
self.assertEqual(expected, itered)
def test_iterkeys(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
itered = list(inst.iterkeys())
itered.sort()
expected = ["a", "b"]
self.assertEqual(expected, itered)
def test_popitem(self):
inst = self._set_up_session_in_Redis_and_makeOne()
inst["a"] = 1
inst["b"] = 2
popped = inst.popitem()
options = [("a", 1), ("b", 2)]
self.assertIn(popped, options)
session_dict_in_redis = inst.from_redis()["m"]
self.assertNotIn(popped, session_dict_in_redis)
def test_IDict_instance_conforms(self):
from pyramid.interfaces import IDict
from zope.interface.verify import verifyObject
inst = self._set_up_session_in_Redis_and_makeOne()
verifyObject(IDict, inst)
def test_created(self):
inst = self._set_up_session_in_Redis_and_makeOne()
created = inst.from_redis()["c"]
self.assertEqual(inst.created, created)
def test_timeout(self):
inst = self._set_up_session_in_Redis_and_makeOne()
timeout = inst.from_redis()["t"]
self.assertEqual(inst.timeout, timeout)
def test_invalidate(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
self.assertNotIn(first_session_id, inst.redis.store)
self.assertIs(inst._invalidated, True)
def test_dict_multilevel(self):
inst = self._set_up_session_in_Redis_and_makeOne(session_id="test1")
inst["dict"] = {"foo": {"bar": 1}}
inst.do_persist()
get_from_inst = inst["dict"]["foo"]["bar"]
self.assertEqual(get_from_inst, 1)
session_dict_in_redis = inst.from_redis()["m"]
get_from_redis = session_dict_in_redis["dict"]["foo"]["bar"]
self.assertEqual(get_from_redis, 1)
inst["dict"]["foo"]["bar"] = 2
inst.do_persist()
session_dict_in_redis2 = inst.from_redis()["m"]
get_from_redis2 = session_dict_in_redis2["dict"]["foo"]["bar"]
self.assertEqual(get_from_redis2, 2)
def test_dict_multilevel_detect_changes_on(self):
inst = self._set_up_session_in_Redis_and_makeOne(
session_id="test1", detect_changes=True
)
# set a base dict and ensure it worked
inst["dict"] = {"foo": {"bar": 1}}
inst.do_persist()
get_from_inst = inst["dict"]["foo"]["bar"]
self.assertEqual(get_from_inst, 1)
# grab the dict and edit it
session_dict_in_redis = inst.from_redis()["m"]
get_from_redis = session_dict_in_redis["dict"]["foo"]["bar"]
self.assertEqual(get_from_redis, 1)
inst["dict"]["foo"]["bar"] = 2
# ensure the change was detected
should_persist = inst._session_state.should_persist(inst)
self.assertTrue(should_persist)
def test_dict_multilevel_detect_changes_off(self):
inst = self._set_up_session_in_Redis_and_makeOne(
session_id="test1", detect_changes=False
)
# set a base dict and ensure it worked
inst["dict"] = {"foo": {"bar": 1}}
inst.do_persist()
get_from_inst = inst["dict"]["foo"]["bar"]
self.assertEqual(get_from_inst, 1)
# grab the dict and edit it
session_dict_in_redis = inst.from_redis()["m"]
get_from_redis = session_dict_in_redis["dict"]["foo"]["bar"]
self.assertEqual(get_from_redis, 1)
inst["dict"]["foo"]["bar"] = 2
# ensure the change was NOT detected
should_persist = inst._session_state.should_persist(inst)
self.assertFalse(should_persist)
def test_new_session_after_invalidate(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst["key"] = "value"
inst.invalidate()
inst.ensure_id() # ensure we have an id in redis, which creates a null payload
second_session_id = inst.session_id
self.assertSetEqual(set(inst.redis.store.keys()), {second_session_id})
self.assertNotEqual(second_session_id, first_session_id)
self.assertIs(bool(second_session_id), True)
self.assertDictEqual(dict(inst), {})
self.assertIs(inst.new, True)
self.assertIs(inst._invalidated, False)
def test_session_id_access_after_invalidate_creates_new_session(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
# 1.4.x+| session_id defaults to a LAZYCREATE
self.assertIs(inst.session_id_safecheck, None)
self.assertIs(inst.session_id, LAZYCREATE_SESSION)
second_session_id = inst.session_id
self.assertNotEqual(second_session_id, first_session_id)
self.assertIs(bool(second_session_id), True)
def test_managed_dict_access_after_invalidate_creates_new_session(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
inst.managed_dict # access
# 1.4.x+| session_id defaults to a LAZYCREATE
# 1.4.x+| session_id is only created via ensure_id()
self.assertIs(inst.session_id_safecheck, None)
self.assertIs(inst.session_id, LAZYCREATE_SESSION)
inst.ensure_id()
# ORIGINALLY
# .session_id attribute access also creates a new session after
# invalidate, so just asserting .session_id is not enough to prove that
# a new session was created after .managed_dict access. Here we note
# down the session_ids in Redis right after .managed_dict access for an
# additional check.
session_ids_in_redis = inst.redis.store.keys()
second_session_id = inst.session_id
self.assertSetEqual(set(session_ids_in_redis), {second_session_id})
self.assertNotEqual(second_session_id, first_session_id)
self.assertIs(bool(second_session_id), True)
def test_created_access_after_invalidate_creates_new_session(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
inst.created # access
# 1.4.x+| session_id defaults to a LAZYCREATE
# 1.4.x+| session_id is only created via ensure_id()
self.assertIs(inst.session_id_safecheck, None)
self.assertIs(inst.session_id, LAZYCREATE_SESSION)
inst.ensure_id()
# ORIGINALLY
# .session_id attribute access also creates a new session after
# invalidate, so just asserting .session_id was not enough to prove that
# a new session was created after .created access. Here we noted down
# the session_ids in Redis right after .created access for an
# additional check.
session_ids_in_redis = inst.redis.store.keys()
second_session_id = inst.session_id
self.assertSetEqual(set(session_ids_in_redis), {second_session_id})
self.assertNotEqual(second_session_id, first_session_id)
self.assertIs(bool(second_session_id), True)
def test_timeout_access_after_invalidate_creates_new_session(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
inst.timeout # access
# 1.4.x+| session_id defaults to a LAZYCREATE
# 1.4.x+| session_id is only created via ensure_id()
self.assertIs(inst.session_id_safecheck, None)
self.assertIs(inst.session_id, LAZYCREATE_SESSION)
inst.ensure_id()
# ORIGINALLY:
# .session_id attribute access also creates a new session after
# invalidate, so just asserting .session_id is not enough to prove that
# a new session was created after .timeout access. Here we note down
# the session_ids in Redis right after .timeout access for an
# additional check.
session_ids_in_redis = inst.redis.store.keys()
second_session_id = inst.session_id
self.assertSetEqual(set(session_ids_in_redis), {second_session_id})
self.assertNotEqual(second_session_id, first_session_id)
self.assertIs(bool(second_session_id), True)
def test_new_attribute_access_after_invalidate_creates_new_session(self):
inst = self._set_up_session_in_Redis_and_makeOne()
first_session_id = inst.session_id
inst.invalidate()
inst.new # access
# 1.4.x+| session_id defaults to a LAZYCREATE
# 1.4.x+| session_id is only created via ensure_id()
self.assertIs(inst.session_id_safecheck, None)
self.assertIs(inst.session_id, LAZYCREATE_SESSION)
inst.ensure_id()
# ORIGINALLY
# .session_id attribute access also creates a new session after
# invalidate, so just asserting .session_id is not enough to prove that
# a new session was created after .created access. Here we note down
# session_ids in Redis right after .new access for an additional check.
session_ids_in_redis = | |
= _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("container", container, "shared_name", shared_name)
_result = _execute.execute(b"IdentityReaderV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"IdentityReaderV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def lmdb_reader(container="", shared_name="", name=None):
r"""A Reader that outputs the records from a LMDB file.
Args:
container: An optional `string`. Defaults to `""`.
If non-empty, this reader is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this reader is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("lmdb_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
# Add nodes to the TensorFlow graph.
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LMDBReader", container=container, shared_name=shared_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LMDBReader", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LMDBReader = tf_export("raw_ops.LMDBReader")(_ops.to_raw_op(lmdb_reader))
def lmdb_reader_eager_fallback(container, shared_name, name, ctx):
raise RuntimeError("lmdb_reader op does not support eager execution. Arg 'reader_handle' is a ref.")
@_dispatch.add_dispatch_list
@tf_export('io.matching_files', v1=['io.matching_files', 'matching_files'])
@deprecated_endpoints('matching_files')
def matching_files(pattern, name=None):
r"""Returns the set of files matching one or more glob patterns.
Note that this routine only supports wildcard characters in the
basename portion of the pattern, not in the directory portion.
Note also that the order of filenames returned is deterministic.
Args:
pattern: A `Tensor` of type `string`.
Shell wildcard pattern(s). Scalar or vector of type string.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MatchingFiles", name, pattern)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return matching_files_eager_fallback(
pattern, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
matching_files, (), dict(pattern=pattern, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MatchingFiles", pattern=pattern, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
matching_files, (), dict(pattern=pattern, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"MatchingFiles", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MatchingFiles = tf_export("raw_ops.MatchingFiles")(_ops.to_raw_op(matching_files))
def matching_files_eager_fallback(pattern, name, ctx):
pattern = _ops.convert_to_tensor(pattern, _dtypes.string)
_inputs_flat = [pattern]
_attrs = None
_result = _execute.execute(b"MatchingFiles", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MatchingFiles", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def merge_v2_checkpoints(checkpoint_prefixes, destination_prefix, delete_old_dirs=True, name=None):
r"""V2 format specific: merges the metadata files of sharded checkpoints. The
result is one logical checkpoint, with one physical metadata file and renamed
data files.
Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
If delete_old_dirs is true, attempts to delete recursively the dirname of each
path in the input checkpoint_prefixes. This is useful when those paths are non
user-facing temporary locations.
Args:
checkpoint_prefixes: A `Tensor` of type `string`.
prefixes of V2 checkpoints to merge.
destination_prefix: A `Tensor` of type `string`.
scalar. The desired final prefix. Allowed to be the same
as one of the checkpoint_prefixes.
delete_old_dirs: An optional `bool`. Defaults to `True`. see above.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MergeV2Checkpoints", name, checkpoint_prefixes,
destination_prefix, "delete_old_dirs", delete_old_dirs)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return merge_v2_checkpoints_eager_fallback(
checkpoint_prefixes, destination_prefix,
delete_old_dirs=delete_old_dirs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if delete_old_dirs is None:
delete_old_dirs = True
delete_old_dirs = _execute.make_bool(delete_old_dirs, "delete_old_dirs")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MergeV2Checkpoints", checkpoint_prefixes=checkpoint_prefixes,
destination_prefix=destination_prefix,
delete_old_dirs=delete_old_dirs, name=name)
return _op
MergeV2Checkpoints = tf_export("raw_ops.MergeV2Checkpoints")(_ops.to_raw_op(merge_v2_checkpoints))
def merge_v2_checkpoints_eager_fallback(checkpoint_prefixes, destination_prefix, delete_old_dirs, name, ctx):
if delete_old_dirs is None:
delete_old_dirs = True
delete_old_dirs = _execute.make_bool(delete_old_dirs, "delete_old_dirs")
checkpoint_prefixes = _ops.convert_to_tensor(checkpoint_prefixes, _dtypes.string)
destination_prefix = _ops.convert_to_tensor(destination_prefix, _dtypes.string)
_inputs_flat = [checkpoint_prefixes, destination_prefix]
_attrs = ("delete_old_dirs", delete_old_dirs)
_result = _execute.execute(b"MergeV2Checkpoints", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def read_file(filename, name=None):
r"""Reads and outputs the entire contents of the input filename.
Args:
filename: A `Tensor` of type `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "ReadFile", name, filename)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return read_file_eager_fallback(
filename, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ReadFile", filename=filename, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"ReadFile", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ReadFile = tf_export("raw_ops.ReadFile")(_ops.to_raw_op(read_file))
def read_file_eager_fallback(filename, name, ctx):
filename = _ops.convert_to_tensor(filename, _dtypes.string)
_inputs_flat = [filename]
_attrs = None
_result = _execute.execute(b"ReadFile", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"ReadFile", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def reader_num_records_produced(reader_handle, name=None):
r"""Returns the number of records this Reader has produced.
This is the same as the number of ReaderRead executions that have
succeeded.
Args:
reader_handle: A `Tensor` of type mutable `string`. Handle to a Reader.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ReaderNumRecordsProduced", reader_handle=reader_handle, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"ReaderNumRecordsProduced", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ReaderNumRecordsProduced = tf_export("raw_ops.ReaderNumRecordsProduced")(_ops.to_raw_op(reader_num_records_produced))
def reader_num_records_produced_eager_fallback(reader_handle, name, ctx):
raise RuntimeError("reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref.")
def reader_num_records_produced_v2(reader_handle, name=None):
r"""Returns the number of records this Reader has produced.
This is the same as the number of ReaderRead executions that have
succeeded.
Args:
reader_handle: A `Tensor` of type `resource`. Handle to a Reader.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "ReaderNumRecordsProducedV2", name, reader_handle)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return reader_num_records_produced_v2_eager_fallback(
reader_handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ReaderNumRecordsProducedV2", reader_handle=reader_handle, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"ReaderNumRecordsProducedV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ReaderNumRecordsProducedV2 = tf_export("raw_ops.ReaderNumRecordsProducedV2")(_ops.to_raw_op(reader_num_records_produced_v2))
def reader_num_records_produced_v2_eager_fallback(reader_handle, name, ctx):
reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource)
_inputs_flat = [reader_handle]
_attrs = None
_result = _execute.execute(b"ReaderNumRecordsProducedV2", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"ReaderNumRecordsProducedV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def reader_num_work_units_completed(reader_handle, name=None):
r"""Returns the number of work units this Reader has finished processing.
Args:
reader_handle: A `Tensor` of type mutable `string`. Handle to a Reader.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ReaderNumWorkUnitsCompleted", reader_handle=reader_handle, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"ReaderNumWorkUnitsCompleted", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ReaderNumWorkUnitsCompleted = tf_export("raw_ops.ReaderNumWorkUnitsCompleted")(_ops.to_raw_op(reader_num_work_units_completed))
def reader_num_work_units_completed_eager_fallback(reader_handle, name, ctx):
raise RuntimeError("reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref.")
def reader_num_work_units_completed_v2(reader_handle, name=None):
r"""Returns the number of work units this Reader has | |
294, 64, 86, 528, 550, 493, 565, 298, 230, 312, 295, 538, 298, 295,
230, 54, 374, 516, 441, 54, 54, 323, 401, 401, 382, 159, 837, 159,
54, 401, 592, 159, 401, 417, 610, 264, 150, 323, 452, 185, 323, 323,
185, 403, 185, 423, 165, 425, 219, 407, 270, 231, 99, 93, 231, 631,
756, 71, 364, 434, 213, 86, 102, 434, 102, 86, 23, 71, 335, 164, 323,
409, 381, 4, 124, 41, 424, 206, 41, 124, 41, 41, 703, 635, 124, 493,
41, 41, 487, 492, 124, 175, 124, 261, 600, 488, 261, 488, 261, 206,
677, 261, 308, 723, 908, 704, 691, 723, 488, 488, 441, 136, 476, 312,
136, 550, 572, 728, 550, 22, 312, 312, 22, 55, 413, 183, 280, 593,
191, 36, 36, 427, 36, 695, 592, 19, 544, 13, 468, 13, 544, 72, 437,
321, 266, 461, 266, 441, 230, 409, 93, 521, 521, 345, 235, 22, 142,
150, 102, 569, 235, 264, 91, 521, 264, 7, 102, 7, 498, 521, 235, 537,
235, 6, 241, 420, 420, 631, 41, 527, 103, 67, 337, 62, 264, 527, 131,
67, 174, 263, 264, 36, 36, 263, 581, 253, 465, 160, 286, 91, 160, 55,
4, 4, 631, 631, 608, 365, 465, 294, 427, 427, 335, 669, 669, 129, 93,
93, 93, 93, 74, 66, 758, 504, 347, 130, 505, 504, 143, 505, 550, 222,
13, 352, 529, 291, 538, 50, 68, 269, 130, 295, 130, 511, 295, 295,
130, 486, 132, 61, 206, 185, 368, 669, 22, 175, 492, 207, 373, 452,
432, 327, 89, 550, 496, 611, 527, 89, 527, 496, 550, 516, 516, 91,
136, 538, 264, 264, 124, 264, 264, 264, 264, 264, 535, 264, 150, 285,
398, 285, 582, 398, 475, 81, 694, 694, 64, 81, 694, 234, 607, 723,
513, 234, 64, 581, 64, 124, 64, 607, 234, 723, 717, 367, 64, 513,
607, 488, 183, 488, 450, 183, 550, 286, 183, 363, 286, 414, 67, 449,
449, 366, 215, 235, 95, 295, 295, 41, 335, 21, 445, 225, 21, 295,
372, 749, 461, 53, 481, 397, 427, 427, 427, 714, 481, 714, 427, 717,
165, 245, 486, 415, 245, 415, 486, 274, 415, 441, 456, 300, 548, 300,
422, 422, 757, 11, 74, 430, 430, 136, 409, 430, 749, 191, 819, 592,
136, 364, 465, 231, 231, 918, 160, 589, 160, 160, 465, 465, 231, 157,
538, 538, 259, 538, 326, 22, 22, 22, 179, 22, 22, 550, 179, 287, 287,
417, 327, 498, 498, 287, 488, 327, 538, 488, 583, 488, 287, 335, 287,
335, 287, 41, 287, 335, 287, 327, 441, 335, 287, 488, 538, 327, 498,
8, 8, 374, 8, 64, 427, 8, 374, 417, 760, 409, 373, 160, 423, 206,
160, 106, 499, 160, 271, 235, 160, 590, 353, 695, 478, 619, 590, 353,
13, 63, 189, 420, 605, 427, 643, 121, 280, 415, 121, 415, 595, 417,
121, 398, 55, 330, 463, 463, 123, 353, 330, 582, 309, 582, 582, 405,
330, 550, 405, 582, 353, 309, 308, 60, 353, 7, 60, 71, 353, 189, 183,
183, 183, 582, 755, 189, 437, 287, 189, 183, 668, 481, 384, 384, 481,
481, 481, 477, 582, 582, 499, 650, 481, 121, 461, 231, 36, 235, 36,
413, 235, 209, 36, 689, 114, 353, 353, 235, 592, 36, 353, 413, 209,
70, 308, 70, 699, 308, 70, 213, 292, 86, 689, 465, 55, 508, 128, 452,
29, 41, 681, 573, 352, 21, 21, 648, 648, 69, 509, 409, 21, 264, 21,
509, 514, 514, 409, 21, 264, 443, 443, 427, 160, 433, 663, 433, 231,
646, 185, 482, 646, 433, 13, 398, 172, 234, 42, 491, 172, 234, 234,
832, 775, 172, 196, 335, 822, 461, 298, 461, 364, 1120, 537, 169,
169, 364, 694, 219, 612, 231, 740, 42, 235, 321, 279, 960, 279, 353,
492, 159, 572, 321, 159, 287, 353, 287, 287, 206, 206, 321, 287, 159,
321, 492, 159, 55, 572, 600, 270, 492, 784, 173, 91, 91, 443, 443,
582, 261, 497, 572, 91, 555, 352, 206, 261, 555, 285, 91, 555, 497,
83, 91, 619, 353, 488, 112, 4, 592, 295, 295, 488, 235, 231, 769,
568, 581, 671, 451, 451, 483, 299, 1011, 432, 422, 207, 106, 701,
508, 555, 508, 555, 125, 870, 555, 589, 508, 125, 749, 482, 125, 125,
130, 544, 643, 643, 544, 488, 22, 643, 130, 335, 544, 22, 130, 544,
544, 488, 426, 426, 4, 180, 4, 695, 35, 54, 433, 500, 592, 433, 262,
94, 401, 401, 106, 216, 216, 106, 521, 102, 462, 518, 271, 475, 365,
193, 648, 206, 424, 206, 193, 206, 206, 424, 299, 590, 590, 364, 621,
67, 538, 488, 567, 51, 51, 513, 194, 81, 488, 486, 289, 567, 563,
749, 563, 338, 338, 502, 563, 822, 338, 563, 338, 502, 201, 230, 201,
533, 445, 175, 201, 175, 13, 85, 960, 103, 85, 175, 30, 445, 445,
175, 573, 196, 877, 287, 356, 678, 235, 489, 312, 572, 264, 717, 138,
295, 6, 295, 523, 55, 165, 165, 295, 138, 663, 6, 295, 6, 353, 138,
6, 138, 169, 129, 784, 12, 129, 194, 605, 784, 445, 234, 627, 563,
689, 627, 647, 570, 627, 570, 647, 206, 234, 215, 234, 816, 627, 816,
234, 627, 215, 234, 627, 264, 427, 427, 30, 424, 161, 161, 916, 740,
180, 616, 481, 514, 383, 265, 481, 164, 650, 121, 582, 689, 420, 669,
589, 420, 788, 549, 165, 734, 280, 224, 146, 681, 788, 184, 398, 784,
4, 398, 417, 417, 398, 636, 784, 417, 81, 398, 417, 81, 185, 827,
420, 241, 420, 41, 185, 185, 718, 241, 101, 185, 185, 241, 241, 241,
241, 241, 185, 324, 420, 420, 1011, 420, 827, 241, 184, 563, 241,
183, 285, 529, 285, 808, 822, 891, 822, 488, 285, 486, 619, 55, 869,
39, 567, 39, 289, 203, 158, 289, 710, 818, 158, 818, 355, 29, 409,
203, 308, 648, 792, 308, 308, 91, 308, 6, 592, 792, 106, 106, 308,
41, 178, 91, 751, 91, 259, 734, 166, 36, 327, 166, 230, 205, 205,
172, 128, 230, 432, 623, 838, 623, 432, 278, 432, 42, 916, 432, 694,
623, 352, 452, 93, 314, 93, 93, 641, 88, 970, 914, 230, 61, 159, 270,
159, 493, 159, 755, 159, 409, 30, 30, 836, 128, 241, 99, 102, 984,
538, 102, 102, 273, 639, 838, 102, 102, 136, 637, 508, 627, 285, 465,
327, 327, 21, 749, 327, 749, 21, 845, 21, 21, 409, 749, 1367, 806,
616, 714, 253, 616, 714, 714, 112, 375, 21, 112, 375, 375, 51, 51,
51, 51, 393, 206, 870, 713, 193, 802, 21, 1061, 42, 382, 42, 543,
876, 42, 876, 382, 696, 543, 635, 490, 353, 353, 417, 64, 1257, 271,
64, 377, 127, 127, 537, 417, 905, 353, 538, 465, 605, 876, 427, 324,
514, 852, 427, 53, 427, 557, 173, 173, 7, 1274, 563, 31, 31, 31, 745,
392, 289, 230, 230, 230, 91, 218, 327, 420, 420, 128, 901, 552, 420,
230, 608, 552, 476, 347, 476, 231, 159, 137, 716, 648, 716, 627, 740,
718, 679, 679, 6, 718, 740, 6, 189, 679, 125, 159, 757, 1191, 409,
175, 250, 409, 67, 324, 681, 605, 550, 398, 550, 931, 478, 174, 21,
316, 91, 316, 654, 409, 425, 425, 699, 61, 699, 321, 698, 321, 698,
61, 425, 699, 321, 409, 699, 299, 335, 321, 335, 61, 698, 699, 654,
698, 299, 425, 231, 14, 121, 515, 121, 14, 165, 81, 409, 189, 81,
373, 465, 463, 1055, 507, 81, 81, 189, 1246, 321, 409, 886, 104, 842,
689, 300, 740, 380, 656, 656, | |
<reponame>valerio-vaccaro/seedsigner<filename>src/seedsigner/controller.py
# External Dependencies
import time
from multiprocessing import Process, Queue
from subprocess import call
# Internal file class dependencies
from .views import (View, MenuView, SeedToolsView,SigningToolsView,
SettingsToolsView, IOTestView)
from .helpers import Buttons, B, CameraProcess,Path
from .models import (SeedStorage, SpecterDesktopWallet, BlueWallet,
SparrowWallet, GenericUR2Wallet, Wallet)
class Controller:
"""
The Controller is a globally available singleton that maintains SeedSigner state.
It only makes sense to ever have a single Controller instance so it is
implemented here as a singleton. One departure from the typical singleton pattern
is the addition of a `configure_instance()` call to pass run-time settings into
the Controller.
Any code that needs to interact with the one and only Controller can just run:
```
from seedsigner.controller import Controller
controller = Controller.get_instance()
```
Note: In many/most cases you'll need to do the Controller import within a method
rather than at the top in order avoid circular imports.
"""
VERSION = "0.4.3"
_instance = None
def __init__(self):
# Singleton pattern must prevent normal instantiation
raise Exception("Cannot directly instantiate the Controller. Access via Controller.get_instance()")
@classmethod
def get_instance(cls):
# This is the only way to access the one and only Controller
if cls._instance:
return cls._instance
else:
raise Exception("Must call Controller.configure_instance(config) first")
@classmethod
def configure_instance(cls, config=None):
# Must be called before the first get_instance() call
if cls._instance:
raise Exception("Instance already configured")
# Instantiate the one and only Controller instance
controller = cls.__new__(cls)
cls._instance = controller
# settings
controller.DEBUG = config.getboolean("system", "DEBUG")
controller.color = config["display"]["TEXT_COLOR"]
# Input Buttons
controller.buttons = Buttons()
# models
controller.storage = SeedStorage()
controller.wallet_klass = globals()["SpecterDesktopWallet"]
controller.wallet = controller.wallet_klass()
# Views
controller.menu_view = MenuView()
controller.seed_tools_view = SeedToolsView()
controller.io_test_view = IOTestView()
controller.signing_tools_view = SigningToolsView(controller.storage)
controller.settings_tools_view = SettingsToolsView()
# Then start seperate background camera process with two queues for communication
# CameraProcess handles connecting to camera hardware and passing back barcode data via from camera queue
controller.from_camera_queue = Queue()
controller.to_camera_queue = Queue()
p = Process(target=CameraProcess.start, args=(controller.from_camera_queue, controller.to_camera_queue))
p.start()
def start(self) -> None:
if self.DEBUG:
# Let Exceptions halt execution
try:
self.show_main_menu()
finally:
# Clear the screen when exiting
self.menu_view.display_blank_screen()
else:
# Handle Unexpected crashes by restarting up to 3 times
crash_cnt = 0
while True:
try:
self.show_main_menu()
except Exception as error:
if crash_cnt >= 3:
break
else:
print('Caught this error: ' + repr(error)) # debug
self.menu_view.draw_modal(["Crashed ..."], "", "restarting")
time.sleep(5)
crash_cnt += 1
self.menu_view.draw_modal(["Crashed ..."], "", "requires hard restart")
### Menu
### Menu View handles navigation within the menu
### Sub Menu's like Seed Tools, Signing Tools, Settings are all in the Menu View
def show_main_menu(self, sub_menu = 0):
ret_val = sub_menu
while True:
ret_val = self.menu_view.display_main_menu(ret_val)
if ret_val == Path.MAIN_MENU:
ret_val = Path.MAIN_MENU
elif ret_val == Path.GEN_LAST_WORD:
ret_val = self.show_generate_last_word_tool()
elif ret_val == Path.DICE_GEN_SEED:
ret_val = self.show_create_seed_with_dice_tool()
elif ret_val == Path.IMAGE_GEN_SEED:
ret_val = self.show_create_seed_with_image_tool()
elif ret_val == Path.SAVE_SEED:
ret_val = self.show_store_a_seed_tool()
elif ret_val == Path.PASSPHRASE_SEED:
ret_val = self.show_add_remove_passphrase_tool()
elif ret_val == Path.GEN_XPUB:
ret_val = self.show_generate_xpub()
elif ret_val == Path.SIGN_TRANSACTION:
ret_val = self.show_sign_transaction()
elif ret_val == Path.IO_TEST_TOOL:
ret_val = self.show_io_test_tool()
elif ret_val == Path.VERSION_INFO:
ret_val = self.show_version_info()
elif ret_val == Path.CURRENT_NETWORK:
ret_val = self.show_current_network_tool()
elif ret_val == Path.WALLET:
ret_val = self.show_wallet_tool()
elif ret_val == Path.QR_DENSITY_SETTING:
ret_val = self.show_qr_density_tool()
elif ret_val == Path.WALLET_POLICY:
ret_val = self.show_wallet_policy_tool()
elif ret_val == Path.DONATE:
ret_val = self.show_donate_tool()
elif ret_val == Path.POWER_OFF:
ret_val = self.show_power_off()
raise Exception("Unhandled case")
### Power Off
def show_power_off(self):
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Power Off?")
if r == 1: #Yes
self.menu_view.display_power_off_screen()
call("sudo shutdown --poweroff now", shell=True)
time.sleep(10)
else: # No
return Path.MAIN_MENU
###
### Seed Tools Controller Naviation/Launcher
###
### Generate Last Word 12 / 24 Menu
def show_generate_last_word_tool(self) -> int:
seed_phrase = []
ret_val = 0
while True:
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_12_24_word_menu("... [ Return to Seed Tools ]")
if ret_val == Path.SEED_WORD_12:
seed_phrase = self.seed_tools_view.display_manual_seed_entry(11)
elif ret_val == Path.SEED_WORD_24:
seed_phrase = self.seed_tools_view.display_manual_seed_entry(23)
else:
return Path.SEED_TOOLS_SUB_MENU
if len(seed_phrase) > 0:
completed_seed_phrase = self.seed_tools_view.display_last_word(seed_phrase)
break
# display seed phrase
while True:
ret_val = self.seed_tools_view.display_seed_phrase(completed_seed_phrase, show_qr_option=True)
if ret_val == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.save_seed_phrase(completed_seed_phrase, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Create a Seed w/ Dice Screen
def show_create_seed_with_dice_tool(self) -> int:
seed_phrase = []
ret_val = True
while True:
seed_phrase = self.seed_tools_view.display_generate_seed_from_dice()
if len(seed_phrase) > 0:
break
else:
return Path.SEED_TOOLS_SUB_MENU
# display seed phrase (24 words)
while True:
ret_val = self.seed_tools_view.display_seed_phrase(seed_phrase, show_qr_option=True)
if ret_val == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.save_seed_phrase(seed_phrase, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
def show_create_seed_with_image_tool(self) -> int:
seed_phrase = []
ret_val = True
while True:
(reshoot, seed_phrase) = self.seed_tools_view.seed_phrase_from_camera_image()
if reshoot:
# Relaunch into another image capture cycle
continue
if len(seed_phrase) > 0:
break
else:
return Path.SEED_TOOLS_SUB_MENU
# display seed phrase (24 words)
while True:
ret_val = self.seed_tools_view.display_seed_phrase(seed_phrase, show_qr_option=True)
if ret_val == True:
break
else:
# Start over
return self.show_create_seed_with_image_tool()
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.save_seed_phrase(seed_phrase, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Store a seed (temp) Menu
def show_store_a_seed_tool(self):
seed_phrase = []
ret_val = 0
display_saved_seed = False
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 1, "... [ Return to Seed Tools ]")
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
slot_num = ret_val
if self.storage.check_slot(slot_num) == True:
display_saved_seed = True
# show seed phrase
# display seed phrase (24 words)
while True:
r = self.seed_tools_view.display_seed_phrase(self.storage.get_seed_phrase(abs(slot_num)), self.storage.get_passphrase(abs(slot_num)), show_qr_option=True)
if r == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
return Path.MAIN_MENU
else:
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_qr_12_24_word_menu("... [ Return to Seed Tools ]")
if ret_val == Path.SEED_WORD_12:
seed_phrase = self.seed_tools_view.display_manual_seed_entry(12)
elif ret_val == Path.SEED_WORD_24:
seed_phrase = self.seed_tools_view.display_manual_seed_entry(24)
elif ret_val == Path.SEED_WORD_QR:
seed_phrase = self.seed_tools_view.read_seed_phrase_qr()
else:
return Path.SEED_TOOLS_SUB_MENU
if len(seed_phrase) == 0:
return Path.SEED_TOOLS_SUB_MENU
if ret_val == Path.SEED_WORD_QR and len(seed_phrase) > 0:
show_qr_option = False
else:
show_qr_option = True
while display_saved_seed == False:
r = self.seed_tools_view.display_seed_phrase(seed_phrase, show_qr_option=show_qr_option )
if r == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
self.menu_view.draw_modal(["Validating ..."])
is_valid = self.storage.check_if_seed_valid(seed_phrase)
if is_valid:
self.storage.save_seed_phrase(seed_phrase, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
else:
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Add a PassPhrase Menu
def show_add_remove_passphrase_tool(self):
ret_val = 0
r = 0
if self.storage.num_of_saved_seeds() == 0:
self.menu_view.draw_modal(["Store a seed phrase", "prior to adding", "a passphrase"], "Error", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
if self.storage.num_of_passphrase_seeds() > 0:
r = self.menu_view.display_generic_selection_menu(["Add", "Change", "Remove"], "Passphrase Action")
if r == 1: # Add
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 3, None)
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
# continue after top level if to capture and store passphrase
elif r == 2: #Change
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 4, None)
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
# continue after top level if to capture and store new passphrase
elif r == 3:
# Remove Passphrase Workflow
if self.storage.num_of_saved_seeds() == 0:
self.menu_view.draw_modal(["Store a seed phrase", "prior to adding", "a passphrase"], "Error", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
else:
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 4, None)
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
slot_num = ret_val
if slot_num > 0:
self.storage.delete_passphrase(slot_num)
self.menu_view.draw_modal(["Passphrase Deleted", "from Slot #" + str(slot_num)], | |
1 or rhov <= rho <= rhol:
def funcion(parr):
rho, T = parr
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
vu = vapor["h"]-Ps/rhov
lu = liquido["h"]-Ps/rhol
x = (1./rho-1/rhol)/(1/rhov-1/rhol)
return (vapor["s"]*x+liquido["s"]*(1-x)-s,
vu*x+lu*(1-x)-u)
To = [500, 700, 300, 900]
if self.kwargs["T0"]:
To.insert(0, self.kwargs["T0"])
rhov = self._Vapor_Density(self.Tt)
rhol = self._Liquid_Density(self.Tt)
ro = [1, 1e-3, rhov, rhol]
if self.kwargs["rho0"]:
ro.insert(0, self.kwargs["rho0"])
for r, t in product(ro, To):
sol = fsolve(funcion, [r, t], full_output=True)
rho, T = sol[0]
if sol[2] == 1 and sum(abs(sol[1]["fvec"])) < 1e-5:
break
if sum(abs(sol[1]["fvec"])) > 1e-5:
raise(RuntimeError(sol[3]))
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
sv = vapor["s"]
sl = liquido["s"]
x = (s-sl)/(sv-sl)
P = Ps/1000
elif self._mode == "Trho":
if T < self.Tc:
rhov = self._Vapor_Density(T)
rhol = self._Liquid_Density(T)
if rhol > rho > rhov:
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
x = (1/rho-1/rhol)/(1/rhov-1/rhol)
rho = 1/(x/rhov-(1-x)/rhol)
P = Ps/1000
rho = float(rho)
T = float(T)
propiedades = self._Helmholtz(rho, T)
if T > self.Tc:
x = 1
elif x is None:
x = 0
if not P:
P = propiedades["P"]/1000.
elif self._mode == "Tx":
# Check input T in saturation range
if self.Tt > T or self.Tc < T or x > 1 or x < 0:
raise NotImplementedError("Incoming out of bound")
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
if x == 0:
propiedades = liquido
elif x == 1:
propiedades = vapor
P = Ps/1000.
elif self._mode == "Px":
# Check input P in saturation range
if self.Pc < P or x > 1 or x < 0:
raise NotImplementedError("Incoming out of bound")
# Iterate over saturation routine to get T
def funcion(T):
rhol = self._Liquid_Density(T)
rhog = self._Vapor_Density(T)
deltaL = rhol/self.rhoc
deltaG = rhog/self.rhoc
liquido = self._Helmholtz(rhol, T)
vapor = self._Helmholtz(rhog, T)
Ps = self.R*T*rhol*rhog/(rhol-rhog)*(
liquido["fir"]-vapor["fir"]+log(deltaL/deltaG))
return Ps/1000-P
if T0:
To = T0
elif self.name == "water":
To = _TSat_P(P)
else:
To = (self.Tc+self.Tt)/2
T = fsolve(funcion, To)[0]
rhol, rhov, Ps = self._saturation(T)
vapor = self._Helmholtz(rhov, T)
liquido = self._Helmholtz(rhol, T)
if x == 0:
propiedades = liquido
elif x == 1:
propiedades = vapor
self.T = T
self.Tr = T/self.Tc
self.P = P
self.Pr = self.P/self.Pc
self.x = x
if self._mode in ["Tx", "Px"] or 0 < x < 1:
region = 4
else:
region = 0
self.phase = getphase(self.Tc, self.Pc, self.T, self.P, self.x, region)
self.Liquid = _fase()
self.Gas = _fase()
if x == 0:
# liquid phase
self.fill(self.Liquid, propiedades)
self.fill(self, propiedades)
elif x == 1:
# vapor phase
self.fill(self.Gas, propiedades)
self.fill(self, propiedades)
else:
self.fill(self.Liquid, liquido)
self.fill(self.Gas, vapor)
self.v = x*self.Gas.v+(1-x)*self.Liquid.v
self.rho = 1./self.v
self.h = x*self.Gas.h+(1-x)*self.Liquid.h
self.s = x*self.Gas.s+(1-x)*self.Liquid.s
self.u = x*self.Gas.u+(1-x)*self.Liquid.u
self.a = x*self.Gas.a+(1-x)*self.Liquid.a
self.g = x*self.Gas.g+(1-x)*self.Liquid.g
self.Z = x*self.Gas.Z+(1-x)*self.Liquid.Z
self.f = x*self.Gas.f+(1-x)*self.Liquid.f
self.Z_rho = x*self.Gas.Z_rho+(1-x)*self.Liquid.Z_rho
self.IntP = x*self.Gas.IntP+(1-x)*self.Liquid.IntP
# Calculate special properties useful only for one phase
if self._mode in ("Px", "Tx") or (x < 1 and self.Tt <= T <= self.Tc):
self.sigma = self._surface(T)
else:
self.sigma = None
vir = self._virial(T)
self.virialB = vir["B"]/self.rhoc
self.virialC = vir["C"]/self.rhoc**2
if 0 < x < 1:
self.Hvap = vapor["h"]-liquido["h"]
self.Svap = vapor["s"]-liquido["s"]
else:
self.Hvap = None
self.Svap = None
self.invT = -1/self.T
# Ideal properties
cp0 = self._prop0(self.rho, self.T)
self.v0 = self.R*self.T/self.P/1000
self.rho0 = 1./self.v0
self.h0 = cp0.h
self.u0 = self.h0-self.P*self.v0
self.s0 = cp0.s
self.a0 = self.u0-self.T*self.s0
self.g0 = self.h0-self.T*self.s0
self.cp0 = cp0.cp
self.cv0 = cp0.cv
self.cp0_cv = self.cp0/self.cv0
cp0.v = self.v0
self.gamma0 = -self.v0/self.P/1000*self.derivative("P", "v", "s", cp0)
def fill(self, fase, estado):
"""Fill phase properties"""
fase.rho = estado["rho"]
fase.v = 1/fase.rho
fase.h = estado["h"]
fase.s = estado["s"]
fase.u = fase.h-self.P*1000*fase.v
fase.a = fase.u-self.T*fase.s
fase.g = fase.h-self.T*fase.s
fase.Z = self.P*fase.v/self.T/self.R*1e3
fase.fi = exp(estado["fir"]+estado["delta"]*estado["fird"] -
log(1+estado["delta"]*estado["fird"]))
fase.f = fase.fi*self.P
fase.cv = estado["cv"]
fase.rhoM = fase.rho/self.M
fase.hM = fase.h*self.M
fase.sM = fase.s*self.M
fase.uM = fase.u*self.M
fase.aM = fase.a*self.M
fase.gM = fase.g*self.M
fase.alfap = estado["alfap"]
fase.betap = estado["betap"]
fase.cp = self.derivative("h", "T", "P", fase)
fase.cp_cv = fase.cp/fase.cv
fase.w = (self.derivative("P", "rho", "s", fase)*1000)**0.5
fase.cvM = fase.cv*self.M
fase.cpM = fase.cp*self.M
fase.joule = self.derivative("T", "P", "h", fase)*1e3
fase.Gruneisen = fase.v/fase.cv*self.derivative("P", "T", "v", fase)
fase.alfav = self.derivative("v", "T", "P", fase)/fase.v
fase.kappa = -self.derivative("v", "P", "T", fase)/fase.v*1e3
fase.betas = self.derivative("T", "P", "s", fase)
fase.gamma = -fase.v/self.P*self.derivative("P", "v", "s", fase)*1e-3
fase.kt = -fase.v/self.P*self.derivative("P", "v", "T", fase)*1e-3
fase.ks = -self.derivative("v", "P", "s", fase)/fase.v*1e3
fase.Kt = -fase.v*self.derivative("P", "v", "s", fase)*1e-3
fase.Ks = -fase.v*self.derivative("P", "v", "T", fase)*1e-3
fase.dhdT_rho = self.derivative("h", "T", "rho", fase)
fase.dhdT_P = self.derivative("h", "T", "P", fase)
fase.dhdP_T = self.derivative("h", "P", "T", fase)*1e3
fase.dhdP_rho = self.derivative("h", "P", "rho", fase)*1e3
fase.dhdrho_T = self.derivative("h", "rho", "T", fase)
fase.dhdrho_P = self.derivative("h", "rho", "P", fase)
fase.dpdT_rho = self.derivative("P", "T", "rho", fase)*1e-3
fase.dpdrho_T = self.derivative("P", "rho", "T", fase)*1e-3
fase.drhodP_T = self.derivative("rho", "P", "T", fase)*1e3
fase.drhodT_P = self.derivative("rho", "T", "P", fase)
fase.Z_rho = (fase.Z-1)/fase.rho
fase.IntP = self.T*self.derivative("P", "T", "rho", fase)*1e-3-self.P
fase.hInput = fase.v*self.derivative("h", "v", "P", fase)
fase.mu = self._visco(fase.rho, self.T, fase)
fase.k = self._thermo(fase.rho, self.T, fase)
fase.nu = fase.mu/fase.rho
fase.alfa = fase.k/1000/fase.rho/fase.cp
fase.Prandt = fase.mu*fase.cp*1000/fase.k
if self.name == "water":
try:
fase.epsilon = _Dielectric(fase.rho, self.T)
except NotImplementedError:
fase.epsilon = None
try:
fase.n = _Refractive(fase.rho, self.T, self.kwargs["l"])
except NotImplementedError:
fase.n = None
else:
fase.epsilon = None
fase.n = None
def derivative(self, z, x, y, fase):
"""Wrapper derivative for custom derived properties
where x, y, z can be: P, T, v, rho, u, h, s, g, a"""
return deriv_H(self, z, x, y, fase)
def _saturation(self, T):
"""Saturation calculation for two phase search"""
if T > self.Tc:
T = self.Tc
rhoLo = self._Liquid_Density(T)
rhoGo = self._Vapor_Density(T)
def f(parr):
rhol, rhog = parr
deltaL = rhol/self.rhoc
deltaG = rhog/self.rhoc
liquido = self._Helmholtz(rhol, T)
vapor = self._Helmholtz(rhog, T)
Jl = deltaL*(1+deltaL*liquido["fird"])
Jv = deltaG*(1+deltaG*vapor["fird"])
Kl = deltaL*liquido["fird"]+liquido["fir"]+log(deltaL)
Kv = deltaG*vapor["fird"]+vapor["fir"]+log(deltaG)
return Kv-Kl, Jv-Jl
rhoL, rhoG = fsolve(f, [rhoLo, rhoGo])
if rhoL == rhoG:
Ps = self.Pc
else:
liquido = self._Helmholtz(rhoL, T)
vapor = self._Helmholtz(rhoG, T)
deltaL = rhoL/self.rhoc
deltaG = rhoG/self.rhoc
Ps = self.R*T*rhoL*rhoG/(rhoL-rhoG)*(
liquido["fir"]-vapor["fir"]+log(deltaL/deltaG))
return rhoL, rhoG, Ps
def _Helmholtz(self, rho, T):
"""Calculated properties from helmholtz free energy and derivatives
Parameters
----------
rho : float
Density [kg/m³]
T : float
Temperature [K]
Returns
-------
prop : dictionary with calculated properties
fir: [-]
fird: [∂fir/∂δ]τ [-]
firdd: [∂²fir/∂δ²]τ [-]
delta: Reducen density, rho/rhoc [-]
P: Pressure [kPa]
h: Enthalpy [kJ/kg]
s: Entropy [kJ/kgK]
cv: Isochoric specific heat [kJ/kgK]
alfav: Thermal expansion coefficient [1/K]
betap: Isothermal compressibility [1/kPa]
References
----------
IAPWS, Revised Release on the IAPWS Formulation 1995 for the
Thermodynamic Properties of Ordinary Water Substance for General and
Scientific Use, September 2016, Table 3
http://www.iapws.org/relguide/IAPWS-95.html
"""
if isinstance(rho, ndarray):
rho = rho[0]
if isinstance(T, ndarray):
T = T[0]
if rho < 0:
rho = 1e-20
if T < 50:
T = 50
rhoc = self._constants.get("rhoref", self.rhoc)
Tc = self._constants.get("Tref", self.Tc)
delta = rho/rhoc
tau = Tc/T
ideal = self._phi0(tau, delta)
fio = ideal["fio"]
fiot = ideal["fiot"]
fiott = ideal["fiott"]
res = self._phir(tau, delta)
fir = res["fir"]
firt = res["firt"]
firtt = res["firtt"]
fird = res["fird"]
firdd = res["firdd"]
firdt = res["firdt"]
propiedades = {}
propiedades["fir"] = fir
propiedades["fird"] = fird
propiedades["firdd"] = firdd
propiedades["delta"] = delta
propiedades["rho"] = rho
propiedades["P"] = (1+delta*fird)*self.R*T*rho
propiedades["h"] = self.R*T*(1+tau*(fiot+firt)+delta*fird)
propiedades["s"] = self.R*(tau*(fiot+firt)-fio-fir)
propiedades["cv"] = -self.R*tau**2*(fiott+firtt)
propiedades["alfap"] = (1-delta*tau*firdt/(1+delta*fird))/T
propiedades["betap"] = rho*(
1+(delta*fird+delta**2*firdd)/(1+delta*fird))
return propiedades
def _prop0(self, rho, T):
"""Ideal gas properties"""
rhoc = self._constants.get("rhoref", self.rhoc)
Tc = self._constants.get("Tref", self.Tc)
delta = rho/rhoc
tau = Tc/T
ideal = self._phi0(tau, delta)
fio = ideal["fio"]
fiot = ideal["fiot"]
fiott = ideal["fiott"]
propiedades = _fase()
propiedades.h = self.R*T*(1+tau*fiot)
propiedades.s = self.R*(tau*fiot-fio)
propiedades.cv = -self.R*tau**2*fiott
propiedades.cp = self.R*(-tau**2*fiott+1)
propiedades.alfap = 1/T
propiedades.betap = rho
return propiedades
def _phi0(self, tau, delta):
"""Ideal gas Helmholtz free energy and derivatives
Parameters
----------
tau : float
Inverse reduced temperature, Tc/T [-]
delta : float
Reduced density, rho/rhoc [-]
Returns
-------
prop : dictionary with | |
-435, "mehX": 436, "agX ": -437, "mXlh": -438,
"Xdev": 439, " aSX": -440, "grXt": 441, "UclX": 442, "rIsX": 443,
"OlgX": 444, "Xbid": 445, "rbrX": 446, "hXnk": 447, "Xhek": -448,
"Xssy": -449, "Xtc ": -450, "sn X": 451, "rlyX": 452, "nXkr": -453,
"Xlot": 454, "Xleg": -455, " sXo": 456, "tesX": 457, "Xkyu": 458,
"Xloj": 459, "dXed": -460, "rcXb": 461, "UysX": 462, "zdeX": 463,
"jXpo": 464, "OtlX": 465, "q sX": 466, "lXkk": -467, "aXsk": 468,
"tXpe": -469, "holX": 470, "tXem": 471, "etdX": 472, "Xndk": -473,
"tXbt": 474, "OrnX": 475, "Xsir": 476, "kalX": 477, "Xrds": -478,
"ybXk": 479, "fXzz": -480, "Xrua": -481, "genX": -482, "UrpX": 483,
"Xzbu": -484, "x lX": -485, "jXre": -486, "tCXo": -487, "bb X ": -488,
"b X ": 489, "ehXz": 490, "UnnX": 491, " Xnn": 492, "Xbut": 493,
"alXc": -494, "dUfX": 495, "hXkt": 496, "zbXk": 497, " Xsd": -498,
" Xrb": -499, "tezX": 500, "Xsei": -501, "lptX": -502, "UhtX": 503,
" Xcd": 504, "Xfad": 505, "hXla": 506, "UmmX": 507, "tXnr": 508,
"dXbe": 509, "nesX": 510, "yXrr": 511, "Xhih": 512, "Xlts": -513,
"hXup": 514, "fagX": -515, "pidX": -516, "inXs": 517, "dekX": -518,
"Xnia": -519, "UtcX": 520, " g X": 521, "SidX": 522, "kClX": 523,
"otCX": -524, "irXl": 525, "yerX": 526, "Xmld": -527, " Xry": -528,
" yXu": 529, "o pX": -530, "UzkX": 531, " Xzk": 532, "Xkbu": -533,
"Xzo ": -534, "kkXn": 535, "ddXp": 536, " sgX": -537, "vXke": 538,
"visX": -539, "ItrX": 540, "rXto": -541, "Co X": -542, "gelX": 543,
"cekX": -544, "adgX": -545, "CXrc": -546, "Xnug": 547, "OrcX": 548,
"eygX": 549, "cinX": -550, "Om X": 551, "sb X": 552, "anCX": -553,
"irXp": 554, " CXt": -555, "nbXk": 556, "U2nX": 557, "Xrro": -558,
"fXme": 559, "eptX": -560, "gXth": -561, "Xgar": -562, "Ub X": 563,
"dXvu": 564, "yadX": 565, "OtsX": 566, "ww X": -567, "Xtei": -568,
"ntXk": 569, "Xlek": 570, "d pX": -571, "Xttn": -572, "Xrru": -573,
"d Xp": -574, " Xin": 575, "erXf": 576, "Xnsi": 577, "Od X": 578,
"lXke": -579, "cXrm": 580, "I Xk": -581, "akXd": 582, "Xrio": -583,
"vrXb": 584, "kubX": -585, " Xke": 586, "rXdd": 587, "IldX": -588,
"Xyme": 589, "eobX": -590, "eatX": -591, "Xset": -592, "ebXz": 593,
"rcXr": -594, "Xldi": -595, "nXmb": -596, "Xrop": -597, "mXhn": 598,
"Xckn": -599, " CXp": -600, " Xh": 601, "9 rX": 602, "cetX": 603,
"sXpa": -604, "Xkev": -605, "aCbX": 606, "bylX": -607, "Xyap": 608,
" ngX": -609, "OC X": 610, "Xnim": -611, "Xkak": -612, "vvXl": 613,
"tUfX": 614, "sXts": 615, "ecXm": 616, "gerX": 617, "I Xu": 618,
"Xsol": -619, "3 jX": -620, "grgX": -621, "Xnny": -622, "3 Xs": -623,
"pXsr": 624, " imX": 625, "ayXl": 626, " agX": -627, " pXn": -628,
"nefX": 629, "cidX": 630, "yazX": 631, " Xrm": -632, "etXb": 633, "mXnk": -634,
"Xebe": -635, "Xsi ": -636, "anXd": 637, "eGXz": 638, "imiX": 639,
"ksXb": -640, " rX ": 641, "CXyd": -642, "Xcar": -643, "pXso": 644,
"Xpus": 645, "fasX": -646, "anhX": 647, "lalX": 648, "yhXl": 649,
"bXy ": 650, "sikX": 651, "radX": -652, "bXlf": -653, "mXve": 654,
"fsXr": 655, "gayX": 656, "edXc": -657, "nkXt": -658, "Xths": -659,
"anzX": -660, "Xlp ": -661, "Xrkh": -662, "urkX": -663, "umbX": -664,
"accX": 665, "aldX": -666, " Xlg": 667, "OCsX": 668, "Xlna": 669,
"iltX": 670, " hyX": -671, "ebbX": 672, "ddXt": 673, "aulX": 674,
"Xkti": 675, "batX": -676, "lXbr": -677, "Xrpr": 678, "Xsee": -679,
" tXv": -680, "jXl ": 681, "Xgi ": -682, "rXc ": -683, "dakX": -684,
"OmmX": 685, "Xth ": -686, "CXle": 687, "Xbuz": -688, "fXz ": -689,
"ozXn": -690, "Xnam": -691, "udXk": -692, "sokX": -693, "Xbai": -694,
"kXmk": -695, "prXn": 696, "obXs": 697, "adXf": 698, "Xgay": -699,
"nSXm": 700, "orXl": -701, "ekXl": 702, "Xfla": -703, "lXla": -704,
"Xra ": -705, "eyXp": 706, "prXl": 707, "Xsto": -708, "urmX": -709,
"sXtu": 710, "OrpX": 711, "ozXk": -712, "Xsem": 713, "orXc": -714,
"vXc ": -715, "cXrr": -716, "Xrri": 717, "Xkru": 718, "mXt ": -719,
"Xzan": -720, "unsX": -721, "oksX": -722, "udXr": -723, "bXtc": 724,
" Xfa": -725, "Xhar": -726, " UzX": 727, "uStX": -728, "ulXs": -729,
"sXna": -730, "Xtma": -731, "oplX": -732, "Xkla": -733, "hUkX": 734,
" olX": -735, "Xzig": 736, "kurX": -737, "lkXr": -738, "fdX": 739,
"mCX": -740, "iGX": 741, "zGX": 742, "CCX": -743, "OfX": 744,
"Xfh": -745, "dnX": 746, "Xbc": 747, "CcX": 748, "Xkv": -749,
"Xua": -750, "Xyn": 751, "Xhh": 752, "Xtw": -753, "UvX": 754,
"GaX": 755, "lSX": 756, "Xmn": -757, "Xhy": 758, "aiX": -759,
"byX": 760, "Xyz": 761, "hXv": 762, "Xgh": -763, "aXy": 764,
"jXn": -765, " Xo": 766, "acX": -767, "pXz": -768, "CyX": 769,
"mnX": -770, "icX": -771, "eaX": -772, "fXa": -773, "laX": -774,
"Xtk": -775, "ocX": -776, "sXc": -777, "UyX": 778, "OnX": 779,
"n konsensXs ": -780, "Uk bir tXr ": -781, "sI Xnvanini": 782, "yla tXrban ": -783, "k tXrkcell": -784,
"ler tXrunu": -785, " diGer Xc": 786, "ara bir dX": -787, "i iki tXr ": -788, "minde tXrb": -789,
"nI Xslup ": -790, "taki tXrb": -791, "Xmenalti ": -792, "i tXrkcel": -793, "I Xnvani ": 794,
"cak kXrsu": 795, "mada tXr ": 796, "yi enstrX": -797, "inin tXrb": -798, " nIn tXrb": -799,
"lan tXtun": 800, "Xniversia": -801, "ine kXrt ": -802, "terini dX": 803, " bir kXr": -804,
"Xverturle": 805, "usion tX": 806, "In usXl ": 807, "go tribX": -808, "Xkuneti ": -809,
"n bXyum ": -810, "z sXrat ": -811, "ttin kXr": -812, "yen ambX": 813, "i bXyur ": -814,
"aya tXrb": -815, "li tXrba": -816, "han abdX": -817, "eki Xcu ": -818, "Cok tXr ": 819,
"ir ergXn": -820, "tU bir X": 821, "kan kXrt": -822, "de ergXn": -823, "I tXtun ": 824,
"r Xnvani": 825, "uran kXr": -826, "lu ergX": -827, "Xrator ": 828, "en kXl ": 829,
"si Xzuy": -830, "bu tXmu": -831, "mir mXr": -832, "Xvette ": 833, "et ergX": -834,
"ar kXl ": 835, "bin kXs": 836, "l tXtun": 837, "hXkumuz": -838, " tXruyo": 839,
"in ambX": 840, "iyet Xn": 841, "a Xcunu": 842, "Ik tXru": 843, "a kXsuy": 844,
"zi kXrt": -845, "adet sX": 846, " bXyur": -847, "mle kXs": 848, "ve mXz ": -849,
"Um dXr ": 850, "di tXru": 851, "bXrundi": -852, "e ve Xs": 853, "Inar kX": 854,
"I tXru ": 855, "Ur mXsu": 856, "tmen mX": -857, "rdIm kX": -858, "lk tXru": -859,
"a mXsah": -860, "an Xcun": 861, "cU kXrs": -862, "r kondX": -863, "at tXru": 864,
"Xsunuyo": 865, "mon sX": -866, "Xkutuy": -867, " o mXd": -868, "oole X": -869,
"n sXr ": -870, "cla pX": -871, "re kXb": 872, "0 tXr ": 873, "k sXs ": 874,
"birt X": 875, "makulX": 876, "bXzul ": -877, "UC mX ": 878, "Xrtcuk": -879,
"dXslu ": -880, "eux nX": 881, "e 3 sX": 882, " sUtX": 883, "bXyurl": 884,
"gXlham": -885, "r sXs ": 886, "lkolsX": 887, "Xverno": -888, " akXsu": 889,
"sI Xn ": 890, " mXzu ": -891, "Xtena ": -892, " dXrda": 893, "gUl sX": -894,
"ney Xc": -895, "Xbapla": 896, "koldXr": 897, " gXru ": -898, " Xctur": 899,
"as mXs": -900, "Xrtceb": -901, "oyle X": -902, "Ur dX ": 903, "ara X ": 904,
"Xrdish": -905, "e Xzuy": -906, "Xrtaji": -907, "s ta X": 908, "suC mX": -909,
"kXstuk": 910, "sul mX": 911, "sus nX": 912, "i ambX": 913, " kXcu ": -914,
"t mXz ": -915, "bXrunl": -916, "kbuldX": 917, "i mXz ": -918, "g tXru": -919,
"ton tX": 920, "Xtture": 921, "Xralis": 922, "Xcunde": 923, "zor mX": -924,
"n Xnva": -925, "sIk sX": -926, "Xtunde": 927, "ir Xr ": -928, "troldX": 929,
"Xruyle": 930, "lkollX": 931, "tXrce ": 932, "ari Xn": -933, "kOr mX": 934,
"r Xzuy": -935, "Si yXn": -936, "na tXt": -937, "I tXtu": -938, "st sXp": -939,
"5 lXk ": -940, " Xnet": 941, "naryX": 942, " gXsa": 943, " ad X": 944,
"insXl": 945, "a sXh": -946, "i Xz ": 947, "Xgsas": | |
<filename>metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import requests
import time
from datetime import datetime
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
from resource_management.libraries.functions import format as ambari_format
from resource_management.libraries.functions.format import format
import metron_service
import metron_security
# Wrap major operations and functionality in this class
class IndexingCommands:
__params = None
__indexing_topic = None
__random_access_indexing_topology = None
__batch_indexing_topology = None
__configured = False
__acl_configured = False
__hdfs_perm_configured = False
__hbase_configured = False
__hbase_acl_configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__random_access_indexing_topology = params.metron_random_access_indexing_topology
self.__batch_indexing_topology = params.metron_batch_indexing_topology
self.__indexing_topic = params.indexing_input_topic
self.__configured = os.path.isfile(self.__params.indexing_configured_flag_file)
self.__acl_configured = os.path.isfile(self.__params.indexing_acl_configured_flag_file)
self.__hbase_configured = os.path.isfile(self.__params.indexing_hbase_configured_flag_file)
self.__hbase_acl_configured = os.path.isfile(self.__params.indexing_hbase_acl_configured_flag_file)
self.__elasticsearch_template_installed = os.path.isfile(self.__params.elasticsearch_template_installed_flag_file)
self.__solr_schema_installed = os.path.isfile(self.__params.solr_schema_installed_flag_file)
self.__hdfs_perm_configured = os.path.isfile(self.__params.indexing_hdfs_perm_configured_flag_file)
def __get_topics(self):
return [self.__indexing_topic]
def __get_kafka_acl_groups(self):
# Indexed topic names matches the group
return ['indexing-batch', 'indexing-ra']
def get_templates(self):
"""
Defines the Elasticsearch index templates.
:return: Dict where key is the name of an index template and the
value is a path to file containing the index template definition.
"""
from params import params
return {
"bro_index": params.bro_index_path,
"yaf_index": params.yaf_index_path,
"snort_index": params.snort_index_path,
"error_index": params.error_index_path,
"metaalert_index": params.meta_index_path
}
def get_solr_schemas(self):
"""
Defines the Solr schemas.
:return: Dict where key is the name of a collection and the
value is a path to file containing the schema definition.
"""
return [
"bro",
"yaf",
"snort",
"error",
"metaalert"
]
def is_configured(self):
return self.__configured
def is_acl_configured(self):
return self.__acl_configured
def is_hdfs_perm_configured(self):
return self.__hdfs_perm_configured
def is_hbase_configured(self):
return self.__hbase_configured
def is_hbase_acl_configured(self):
return self.__hbase_acl_configured
def is_elasticsearch_template_installed(self):
return self.__elasticsearch_template_installed
def is_solr_schema_installed(self):
return self.__solr_schema_installed
def set_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_configured_flag_file, "Setting Indexing configured to True")
def set_hbase_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_configured_flag_file, "Setting HBase configured to True for indexing")
def set_hbase_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_acl_configured_flag_file, "Setting HBase ACL configured to True for indexing")
def set_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_acl_configured_flag_file, "Setting Indexing ACL configured to True")
def set_hdfs_perm_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hdfs_perm_configured_flag_file, "Setting HDFS perm configured to True")
def set_elasticsearch_template_installed(self):
metron_service.set_configured(self.__params.metron_user, self.__params.elasticsearch_template_installed_flag_file, "Setting Elasticsearch template installed to True")
def set_solr_schema_installed(self):
metron_service.set_configured(self.__params.metron_user, self.__params.solr_schema_installed_flag_file, "Setting Solr schema installed to True")
def create_hbase_tables(self):
Logger.info("Creating HBase Tables for indexing")
metron_service.create_hbase_table(self.__params,
self.__params.update_hbase_table,
self.__params.update_hbase_cf)
Logger.info("Done creating HBase Tables for indexing")
self.set_hbase_configured()
def set_hbase_acls(self):
Logger.info("Setting HBase ACLs for indexing")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"grant '{0}', 'RW', '{1}'\" | hbase shell -n"
add_update_acl_cmd = cmd.format(self.__params.metron_user, self.__params.update_hbase_table)
Execute(add_update_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done setting HBase ACLs for indexing")
self.set_hbase_acl_configured()
def init_kafka_topics(self):
Logger.info('Creating Kafka topics for indexing')
metron_service.init_kafka_topics(self.__params, self.__get_topics())
def init_kafka_acls(self):
Logger.info('Creating Kafka ACLs for indexing')
metron_service.init_kafka_acls(self.__params, self.__get_topics())
metron_service.init_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
def init_hdfs_dir(self):
Logger.info('Setting up HDFS indexing directory')
# Non Kerberized Metron runs under 'storm', requiring write under the 'hadoop' group.
# Kerberized Metron runs under it's own user.
ownership = 0755 if self.__params.security_enabled else 0775
Logger.info('HDFS indexing directory ownership is: ' + str(ownership))
self.__params.HdfsResource(self.__params.metron_apps_indexed_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.hadoop_group,
mode=ownership,
)
Logger.info('Done creating HDFS indexing directory')
def check_elasticsearch_templates(self):
for template_name in self.get_templates():
# check for the index template
cmd = "curl -s -XGET \"http://{0}/_template/{1}\" | grep -o {1}"
err_msg="Missing Elasticsearch index template: name={0}"
metron_service.execute(
cmd=cmd.format(self.__params.es_http_url, template_name),
user=self.__params.metron_user,
err_msg=err_msg.format(template_name))
def solr_schema_install(self, env):
from params import params
env.set_params(params)
Logger.info("Installing Solr schemas")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.solr_keytab_path,
self.__params.solr_principal_name,
self.__params.solr_user)
commands = IndexingCommands(params)
for collection_name in commands.get_solr_schemas():
# install the schema
cmd = format((
"export ZOOKEEPER={solr_zookeeper_url};"
"export SECURITY_ENABLED={security_enabled};"
))
cmd += "{0}/bin/create_collection.sh {1};"
Execute(
cmd.format(params.metron_home, collection_name),
user=self.__params.solr_user)
def solr_schema_delete(self, env):
from params import params
env.set_params(params)
Logger.info("Deleting Solr schemas")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.solr_keytab_path,
self.__params.solr_principal_name,
self.__params.solr_user)
commands = IndexingCommands(params)
for collection_name in commands.get_solr_schemas():
# delete the schema
cmd = format((
"export ZOOKEEPER={solr_zookeeper_url};"
"export SECURITY_ENABLED={security_enabled};"
))
cmd += "{0}/bin/delete_collection.sh {1};"
Execute(
cmd.format(params.metron_home, collection_name),
user=self.__params.solr_user)
def start_batch_indexing_topology(self, env):
Logger.info('Starting ' + self.__batch_indexing_topology)
if not self.is_batch_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
start_cmd_template = """{0}/bin/start_hdfs_topology.sh"""
start_cmd = start_cmd_template.format(self.__params.metron_home)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Batch Indexing topology already running')
Logger.info('Finished starting batch indexing topology')
def start_random_access_indexing_topology(self, env):
Logger.info('Starting ' + self.__random_access_indexing_topology)
if not self.is_random_access_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
start_cmd_template = """{0}/bin/start_elasticsearch_topology.sh"""
if self.__params.ra_indexing_writer == 'Solr':
start_cmd_template = """{0}/bin/start_solr_topology.sh"""
start_cmd = start_cmd_template.format(self.__params.metron_home)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Random Access Indexing topology already running')
Logger.info('Finished starting random access indexing topology')
def start_indexing_topology(self, env):
self.start_batch_indexing_topology(env)
self.start_random_access_indexing_topology(env)
Logger.info('Finished starting indexing topologies')
def stop_batch_indexing_topology(self, env):
Logger.info('Stopping ' + self.__batch_indexing_topology)
if self.is_batch_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
stop_cmd = 'storm kill ' + self.__batch_indexing_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Batch Indexing topology already stopped")
Logger.info('Done stopping batch indexing topologies')
def stop_random_access_indexing_topology(self, env):
Logger.info('Stopping ' + self.__random_access_indexing_topology)
if self.is_random_access_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
stop_cmd = 'storm kill ' + self.__random_access_indexing_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Random Access Indexing topology already stopped")
Logger.info('Done stopping random access indexing topologies')
def stop_indexing_topology(self, env):
self.stop_batch_indexing_topology(env)
self.stop_random_access_indexing_topology(env)
Logger.info('Done stopping indexing topologies')
def restart_indexing_topology(self, env):
Logger.info('Restarting the indexing topologies')
self.restart_batch_indexing_topology(env)
self.restart_random_access_indexing_topology(env)
def restart_batch_indexing_topology(self, env):
Logger.info('Restarting the batch indexing topology')
self.stop_batch_indexing_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_batch_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing batch topology still active. Will wait and retry')
time.sleep(10)
retries += 1
topology_active = self.is_batch_topology_active(env)
if not topology_active:
Logger.info('Waiting for storm kill to complete')
time.sleep(30)
self.start_batch_indexing_topology(env)
Logger.info('Done restarting the batch indexing topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def restart_random_access_indexing_topology(self, env):
Logger.info('Restarting the random access indexing topology')
self.stop_random_access_indexing_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_random_access_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing random access topology still active. Will wait and retry')
time.sleep(10)
retries += 1
topology_active = self.is_random_access_topology_active(env)
if not topology_active:
Logger.info('Waiting for storm kill to complete')
time.sleep(30)
self.start_random_access_indexing_topology(env)
Logger.info('Done restarting the random access indexing topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def is_batch_topology_active(self, env):
env.set_params(self.__params)
topologies = metron_service.get_running_topologies(self.__params)
is_batch_running = False
if self.__batch_indexing_topology in topologies:
is_batch_running = topologies[self.__batch_indexing_topology] in ['ACTIVE', 'REBALANCING']
return is_batch_running
def is_random_access_topology_active(self, env):
env.set_params(self.__params)
topologies = metron_service.get_running_topologies(self.__params)
is_random_access_running = False
if self.__random_access_indexing_topology in topologies:
is_random_access_running = topologies[self.__random_access_indexing_topology] in ['ACTIVE', 'REBALANCING']
return is_random_access_running
def is_topology_active(self, env):
return self.is_batch_topology_active(env) and self.is_random_access_topology_active(env)
def service_check(self, env):
"""
Performs a service check for Indexing.
:param env: Environment
"""
metron_service.check_indexer_parameters()
Logger.info('Checking Kafka topics for Indexing')
metron_service.check_kafka_topics(self.__params, self.__get_topics())
Logger.info("Checking HBase for Indexing")
metron_service.check_hbase_table(self.__params, self.__params.update_hbase_table)
metron_service.check_hbase_column_family(self.__params, self.__params.update_hbase_table, self.__params.update_hbase_cf)
Logger.info('Checking Elasticsearch templates for Indexing')
self.check_elasticsearch_templates()
if self.__params.security_enabled:
Logger.info('Checking Kafka ACLs for Indexing')
metron_service.check_kafka_acls(self.__params, self.__get_topics())
metron_service.check_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
Logger.info("Checking HBase ACLs for Indexing")
metron_service.check_hbase_acls(self.__params, self.__params.update_hbase_table)
Logger.info("Checking for Indexing topology")
if not self.is_topology_active(env):
raise Fail("Indexing topology not running")
Logger.info("Indexing service check completed successfully")
def get_zeppelin_auth_details(self, ses, zeppelin_server_url, env):
"""
With Ambari 2.5+, Zeppelin server is enabled to work with Shiro authentication, which requires user/password
for authentication (see https://zeppelin.apache.org/docs/0.6.0/security/shiroauthentication.html for details).
This method checks if Shiro authentication is enabled on the Zeppelin server. And if enabled, it returns the
session connection details to be used for importing Zeppelin notebooks.
:param ses: Session handle
:param zeppelin_server_url: Zeppelin Server URL
:return: ses
"""
from params import params
env.set_params(params)
# Check if authentication is enabled on the Zeppelin server
try:
ses.get(ambari_format('http://{zeppelin_server_url}/api/login'))
# Establish connection if authentication is enabled
try:
Logger.info("Shiro authentication is found to be enabled on the Zeppelin server.")
# Read the Shiro admin user credentials from Zeppelin config in Ambari
seen_users = False
username = None
password = <PASSWORD>
if re.search(r'^\[users\]', params.zeppelin_shiro_ini_content, re.MULTILINE):
seen_users = True
tokens = re.search(r'^admin\ =.*', params.zeppelin_shiro_ini_content, | |
actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tensorboard(
tensorboard_service.GetTensorboardRequest(), name="name_value",
)
def test_update_tensorboard(
transport: str = "grpc", request_type=tensorboard_service.UpdateTensorboardRequest
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_tensorboard_from_dict():
test_update_tensorboard(request_type=dict)
def test_update_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
client.update_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
@pytest.mark.asyncio
async def test_update_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.UpdateTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.UpdateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_tensorboard_async_from_dict():
await test_update_tensorboard_async(request_type=dict)
def test_update_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRequest()
request.tensorboard.name = "tensorboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.UpdateTensorboardRequest()
request.tensorboard.name = "tensorboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tensorboard.name=tensorboard.name/value",) in kw[
"metadata"
]
def test_update_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tensorboard(
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tensorboard(
tensorboard_service.UpdateTensorboardRequest(),
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tensorboard(
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tensorboard(
tensorboard_service.UpdateTensorboardRequest(),
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_list_tensorboards(
transport: str = "grpc", request_type=tensorboard_service.ListTensorboardsRequest
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard_service.ListTensorboardsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardsPager)
assert response.next_page_token == "<PASSWORD>page_token_<PASSWORD>"
def test_list_tensorboards_from_dict():
test_list_tensorboards(request_type=dict)
def test_list_tensorboards_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
client.list_tensorboards()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
@pytest.mark.asyncio
async def test_list_tensorboards_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.ListTensorboardsRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard_service.ListTensorboardsResponse(
next_page_token="<PASSWORD>token_<PASSWORD>",
)
)
response = await client.list_tensorboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.ListTensorboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTensorboardsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tensorboards_async_from_dict():
await test_list_tensorboards_async(request_type=dict)
def test_list_tensorboards_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.ListTensorboardsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboards), "__call__"
) as | |
<filename>scripts/python/catalyst/tests/test_build_isothermal.py
''' Testing of building an isothermal model '''
import sys
sys.path.append('../..')
import unittest
import pytest
from catalyst.isothermal_monolith_catalysis import *
import logging
__author__ = "<NAME>"
_log = logging.getLogger(__name__)
# Start test class
class TestBasicIsothermalCatalystBuild():
@pytest.fixture(scope="class")
def isothermal_object(self):
obj = Isothermal_Monolith_Simulator()
return obj
@pytest.fixture(scope="class")
def isothermal_object_with_lists(self):
obj = Isothermal_Monolith_Simulator()
return obj
@pytest.mark.build
def test_add_dim(self, isothermal_object):
obj = isothermal_object
obj.add_axial_dim(0,5)
assert hasattr(obj.model, 'z')
assert isinstance(obj.model.z, ContinuousSet)
@pytest.mark.build
def test_add_dim_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_axial_dim(point_list=[0,1,2,3,4,5])
assert hasattr(obj.model, 'z')
assert isinstance(obj.model.z, ContinuousSet)
assert len(obj.model.z) == 6
@pytest.mark.build
def test_add_temporal_dim(self, isothermal_object):
obj = isothermal_object
obj.add_temporal_dim(0,20)
assert hasattr(obj.model, 't')
assert isinstance(obj.model.t, ContinuousSet)
@pytest.mark.build
def test_add_temporal_dim_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_temporal_dim(point_list=[0,4,8,12,16,20])
assert hasattr(obj.model, 't')
assert isinstance(obj.model.t, ContinuousSet)
assert len(obj.model.t) == 6
@pytest.mark.build
def test_add_age_set(self, isothermal_object):
obj = isothermal_object
obj.add_age_set("Unaged")
assert hasattr(obj.model, 'age_set')
assert isinstance(obj.model.age_set, Set)
assert len(obj.model.age_set) == 1
@pytest.mark.build
def test_add_age_set_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_age_set(["Unaged", "2hr"])
assert hasattr(obj.model, 'age_set')
assert isinstance(obj.model.age_set, Set)
assert len(obj.model.age_set) == 2
@pytest.mark.build
def test_add_temperature_set(self, isothermal_object):
obj = isothermal_object
obj.add_temperature_set("250C")
assert hasattr(obj.model, 'T_set')
assert isinstance(obj.model.T_set, Set)
assert len(obj.model.T_set) == 1
assert hasattr(obj.model, 'T')
assert isinstance(obj.model.T, Var)
assert hasattr(obj.model, 'space_velocity')
assert isinstance(obj.model.space_velocity, Var)
assert hasattr(obj.model, 'v')
assert isinstance(obj.model.v, Var)
assert hasattr(obj.model, 'P')
assert isinstance(obj.model.P, Var)
assert hasattr(obj.model, 'Tref')
assert isinstance(obj.model.Tref, Param)
assert hasattr(obj.model, 'Pref')
assert isinstance(obj.model.Pref, Param)
assert hasattr(obj.model, 'rho')
assert isinstance(obj.model.rho, Var)
assert hasattr(obj.model, 'mu')
assert isinstance(obj.model.mu, Var)
assert hasattr(obj.model, 'Re')
assert isinstance(obj.model.Re, Var)
@pytest.mark.build
def test_add_temperature_set_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_temperature_set(["250C","300C"])
assert hasattr(obj.model, 'T_set')
assert isinstance(obj.model.T_set, Set)
assert len(obj.model.T_set) == 2
assert hasattr(obj.model, 'T')
assert isinstance(obj.model.T, Var)
assert hasattr(obj.model, 'space_velocity')
assert isinstance(obj.model.space_velocity, Var)
assert hasattr(obj.model, 'v')
assert isinstance(obj.model.v, Var)
assert hasattr(obj.model, 'P')
assert isinstance(obj.model.P, Var)
assert hasattr(obj.model, 'Tref')
assert isinstance(obj.model.Tref, Param)
assert hasattr(obj.model, 'Pref')
assert isinstance(obj.model.Pref, Param)
assert hasattr(obj.model, 'rho')
assert isinstance(obj.model.rho, Var)
assert hasattr(obj.model, 'mu')
assert isinstance(obj.model.mu, Var)
assert hasattr(obj.model, 'Re')
assert isinstance(obj.model.Re, Var)
@pytest.mark.build
def test_add_gas_species(self, isothermal_object):
obj = isothermal_object
obj.add_gas_species("NH3")
assert hasattr(obj.model, 'gas_set')
assert isinstance(obj.model.gas_set, Set)
assert len(obj.model.gas_set) == 1
assert hasattr(obj.model, 'Cb')
assert isinstance(obj.model.Cb, Var)
assert hasattr(obj.model, 'C')
assert isinstance(obj.model.C, Var)
assert hasattr(obj.model, 'dCb_dz')
assert isinstance(obj.model.dCb_dz, DerivativeVar)
assert hasattr(obj.model, 'dCb_dt')
assert isinstance(obj.model.dCb_dt, DerivativeVar)
assert hasattr(obj.model, 'dC_dt')
assert isinstance(obj.model.dC_dt, DerivativeVar)
assert hasattr(obj.model, 'km')
assert isinstance(obj.model.km, Var)
assert hasattr(obj.model, 'Dm')
assert isinstance(obj.model.Dm, Param)
assert hasattr(obj.model, 'Sc')
assert isinstance(obj.model.Sc, Var)
assert hasattr(obj.model, 'Sh')
assert isinstance(obj.model.Sh, Var)
@pytest.mark.build
def test_add_gas_species_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_gas_species(["NH3","NO"])
assert hasattr(obj.model, 'gas_set')
assert isinstance(obj.model.gas_set, Set)
assert len(obj.model.gas_set) == 2
assert hasattr(obj.model, 'Cb')
assert isinstance(obj.model.Cb, Var)
assert hasattr(obj.model, 'C')
assert isinstance(obj.model.C, Var)
assert hasattr(obj.model, 'dCb_dz')
assert isinstance(obj.model.dCb_dz, DerivativeVar)
assert hasattr(obj.model, 'dCb_dt')
assert isinstance(obj.model.dCb_dt, DerivativeVar)
assert hasattr(obj.model, 'dC_dt')
assert isinstance(obj.model.dC_dt, DerivativeVar)
assert hasattr(obj.model, 'km')
assert isinstance(obj.model.km, Var)
assert hasattr(obj.model, 'Dm')
assert isinstance(obj.model.Dm, Param)
assert hasattr(obj.model, 'Sc')
assert isinstance(obj.model.Sc, Var)
assert hasattr(obj.model, 'Sh')
assert isinstance(obj.model.Sh, Var)
@pytest.mark.build
def test_add_surface_species(self, isothermal_object):
obj = isothermal_object
obj.add_surface_species("ZNH4")
assert hasattr(obj.model, 'surf_set')
assert isinstance(obj.model.surf_set, Set)
assert len(obj.model.surf_set) == 1
assert hasattr(obj.model, 'q')
assert isinstance(obj.model.q, Var)
assert hasattr(obj.model, 'dq_dt')
assert isinstance(obj.model.dq_dt, DerivativeVar)
@pytest.mark.build
def test_add_surface_species_list(self, isothermal_object_with_lists):
obj = isothermal_object_with_lists
obj.add_surface_species(["ZNH4","ZH"])
assert hasattr(obj.model, 'surf_set')
assert isinstance(obj.model.surf_set, Set)
assert len(obj.model.surf_set) == 2
assert hasattr(obj.model, 'q')
assert isinstance(obj.model.q, Var)
assert hasattr(obj.model, 'dq_dt')
assert isinstance(obj.model.dq_dt, DerivativeVar)
@pytest.mark.build
def test_add_surface_sites(self, isothermal_object):
obj = isothermal_object
obj.add_surface_sites("ZH")
assert hasattr(obj.model, 'site_set')
assert isinstance(obj.model.site_set, Set)
assert len(obj.model.site_set) == 1
assert hasattr(obj.model, 'S')
assert isinstance(obj.model.S, Var)
assert hasattr(obj.model, 'Smax')
assert isinstance(obj.model.Smax, Param)
assert hasattr(obj.model, 'u_S')
assert isinstance(obj.model.u_S, Param)
@pytest.mark.build
def test_add_surface_sites_list(self):
obj = Isothermal_Monolith_Simulator()
obj.add_axial_dim(0,5)
obj.add_temporal_dim(0,10)
obj.add_age_set("Unaged")
obj.add_temperature_set("250C")
obj.add_gas_species("NH3")
obj.add_surface_species("ZNH4")
obj.add_surface_sites(["S1","S2"])
assert hasattr(obj.model, 'site_set')
assert isinstance(obj.model.site_set, Set)
assert len(obj.model.site_set) == 2
assert hasattr(obj.model, 'S')
assert isinstance(obj.model.S, Var)
assert hasattr(obj.model, 'Smax')
assert isinstance(obj.model.Smax, Param)
assert hasattr(obj.model, 'u_S')
assert isinstance(obj.model.u_S, Param)
@pytest.mark.build
def test_add_reactions_equ(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
rxn_dict = {"r1": ReactionType.EquilibriumArrhenius}
obj.add_reactions(rxn_dict)
obj_with_lists.add_reactions(rxn_dict)
assert hasattr(obj.model, 'all_rxns')
assert isinstance(obj.model.all_rxns, Set)
assert len(obj.model.all_rxns) == 1
assert hasattr(obj_with_lists.model, 'all_rxns')
assert isinstance(obj_with_lists.model.all_rxns, Set)
assert len(obj_with_lists.model.all_rxns) == 1
assert hasattr(obj.model, 'arrhenius_rxns')
assert isinstance(obj.model.arrhenius_rxns, Set)
assert len(obj.model.arrhenius_rxns) == 0
assert hasattr(obj_with_lists.model, 'arrhenius_rxns')
assert isinstance(obj_with_lists.model.arrhenius_rxns, Set)
assert len(obj_with_lists.model.arrhenius_rxns) == 0
assert hasattr(obj.model, 'equ_arrhenius_rxns')
assert isinstance(obj.model.equ_arrhenius_rxns, Set)
assert len(obj.model.equ_arrhenius_rxns) == 1
assert hasattr(obj_with_lists.model, 'equ_arrhenius_rxns')
assert isinstance(obj_with_lists.model.equ_arrhenius_rxns, Set)
assert len(obj_with_lists.model.equ_arrhenius_rxns) == 1
assert hasattr(obj.model, 'u_C')
assert isinstance(obj.model.u_C, Param)
assert hasattr(obj.model, 'u_q')
assert isinstance(obj.model.u_q, Param)
assert hasattr(obj_with_lists.model, 'u_C')
assert isinstance(obj_with_lists.model.u_C, Param)
assert hasattr(obj_with_lists.model, 'u_q')
assert isinstance(obj_with_lists.model.u_q, Param)
assert hasattr(obj.model, 'A')
assert isinstance(obj.model.A, Var)
assert hasattr(obj.model, 'B')
assert isinstance(obj.model.B, Var)
assert hasattr(obj.model, 'E')
assert isinstance(obj.model.E, Var)
assert hasattr(obj.model, 'Af')
assert isinstance(obj.model.Af, Var)
assert hasattr(obj.model, 'Ef')
assert isinstance(obj.model.Ef, Var)
assert hasattr(obj.model, 'dH')
assert isinstance(obj.model.dH, Var)
assert hasattr(obj.model, 'dS')
assert isinstance(obj.model.dS, Var)
assert hasattr(obj_with_lists.model, 'A')
assert isinstance(obj_with_lists.model.A, Var)
assert hasattr(obj_with_lists.model, 'B')
assert isinstance(obj_with_lists.model.B, Var)
assert hasattr(obj_with_lists.model, 'E')
assert isinstance(obj_with_lists.model.E, Var)
assert hasattr(obj_with_lists.model, 'Af')
assert isinstance(obj_with_lists.model.Af, Var)
assert hasattr(obj_with_lists.model, 'Ef')
assert isinstance(obj_with_lists.model.Ef, Var)
assert hasattr(obj_with_lists.model, 'dH')
assert isinstance(obj_with_lists.model.dH, Var)
assert hasattr(obj_with_lists.model, 'dS')
assert isinstance(obj_with_lists.model.dS, Var)
assert hasattr(obj.model, 'all_species_set')
assert isinstance(obj.model.all_species_set, Set)
assert len(obj.model.all_species_set) == 3
assert hasattr(obj_with_lists.model, 'all_species_set')
assert isinstance(obj_with_lists.model.all_species_set, Set)
assert len(obj_with_lists.model.all_species_set) == 4
assert hasattr(obj.model, 'rxn_orders')
assert isinstance(obj.model.rxn_orders, Param)
assert hasattr(obj_with_lists.model, 'rxn_orders')
assert isinstance(obj_with_lists.model.rxn_orders, Param)
@pytest.mark.unit
def test_formfactor_calculations(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_bulk_porosity(0.3309)
obj.set_cell_density(62)
obj.set_washcoat_porosity(0.4)
obj.set_reactor_radius(1)
obj_with_lists.isMonolith = False
obj_with_lists.model.dh.set_value(0.1)
obj_with_lists.set_bulk_porosity(0.3309)
obj_with_lists.set_cell_density(62)
obj_with_lists.set_washcoat_porosity(0.4)
obj_with_lists.set_reactor_radius(1)
assert value(obj.model.eb) == 0.3309
assert value(obj.model.cell_density) == 62
assert value(obj_with_lists.model.eb) == 0.3309
assert value(obj_with_lists.model.cell_density) == 62
assert value(obj_with_lists.model.dh) == 0.1
assert value(obj_with_lists.model.Ga) == 6/0.1
assert pytest.approx(0.0777448, rel=1e-3) == value(obj.model.dh)
assert pytest.approx(28.8159, rel=1e-3) == value(obj.model.Ga)
obj_with_lists.isMonolith = True
obj_with_lists.model.dh.set_value(value(obj.model.dh))
obj_with_lists.model.Ga.set_value(value(obj.model.Ga))
assert pytest.approx(0.0777448, rel=1e-3) == value(obj_with_lists.model.dh)
assert pytest.approx(28.8159, rel=1e-3) == value(obj_with_lists.model.Ga)
obj.set_space_velocity_all_runs(1000)
obj_with_lists.set_space_velocity_all_runs(1000)
@pytest.mark.unit
def test_set_site_balance(self, isothermal_object):
obj = isothermal_object
obj.set_site_density("ZH","Unaged",0.1152619)
site_data = {"mol_occupancy": {"ZNH4": 1}}
obj.set_site_balance("ZH",site_data)
assert value(obj.model.u_S["ZH","ZNH4"]) == 1
@pytest.mark.unit
def test_set_reaction_info(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
rxn_dict = {"parameters": {"A": 250000, "E": 0,
"A_lb": 2500, "A_ub": 2500000000,
"E_lb": -1, "E_ub": 1,
"dH": -54000, "dS": 30,
"dH_lb": -55000, "dH_ub": -53000,
"dS_lb": 20, "dS_ub": 40,
},
"mol_reactants": {"ZH": 1, "NH3": 1},
"mol_products": {"ZNH4": 1},
"rxn_orders": {"ZH": 1, "NH3": 1, "ZNH4": 1}
}
obj.set_reaction_info("r1", rxn_dict)
obj_with_lists.set_reaction_info("r1", rxn_dict)
assert value(obj.model.Af["r1"].lb) == 2500
assert value(obj.model.Af["r1"].ub) == 2500000000
assert value(obj.model.Af["r1"]) == 250000
assert value(obj_with_lists.model.Af["r1"].lb) == 2500
assert value(obj_with_lists.model.Af["r1"].ub) == 2500000000
assert value(obj_with_lists.model.Af["r1"]) == 250000
assert value(obj.model.Ef["r1"].lb) == -1
assert value(obj.model.Ef["r1"].ub) == 1
assert value(obj.model.Ef["r1"]) == 0
assert value(obj_with_lists.model.Ef["r1"].lb) == -1
assert value(obj_with_lists.model.Ef["r1"].ub) == 1
assert value(obj_with_lists.model.Ef["r1"]) == 0
assert value(obj.model.dH["r1"].lb) == -55000
assert value(obj.model.dH["r1"].ub) == -53000
assert value(obj.model.dH["r1"]) == -54000
assert value(obj_with_lists.model.dH["r1"].lb) == -55000
assert value(obj_with_lists.model.dH["r1"].ub) == -53000
assert value(obj_with_lists.model.dH["r1"]) == -54000
assert value(obj.model.dS["r1"].lb) == 20
assert value(obj.model.dS["r1"].ub) == 40
assert value(obj.model.dS["r1"]) == 30
assert value(obj_with_lists.model.dS["r1"].lb) == 20
assert value(obj_with_lists.model.dS["r1"].ub) == 40
assert value(obj_with_lists.model.dS["r1"]) == 30
assert hasattr(obj.model, 'r1_reactants')
assert isinstance(obj.model.r1_reactants, Set)
assert len(obj.model.r1_reactants) == 2
assert hasattr(obj.model, 'r1_products')
assert isinstance(obj.model.r1_products, Set)
assert len(obj.model.r1_products) == 1
assert hasattr(obj_with_lists.model, 'r1_reactants')
assert isinstance(obj_with_lists.model.r1_reactants, Set)
assert len(obj_with_lists.model.r1_reactants) == 2
assert hasattr(obj_with_lists.model, 'r1_products')
assert isinstance(obj_with_lists.model.r1_products, Set)
assert len(obj_with_lists.model.r1_products) == 1
assert value(obj.model.u_C["NH3","r1",obj.model.z.first()]) == -1
assert value(obj.model.u_q["ZNH4","r1",obj.model.z.first()]) == 1
assert value(obj_with_lists.model.u_C["NH3","r1",obj_with_lists.model.z.first()]) == -1
assert value(obj_with_lists.model.u_q["ZNH4","r1",obj_with_lists.model.z.first()]) == 1
assert value(obj_with_lists.model.u_q["ZH","r1",obj_with_lists.model.z.first()]) == -1
assert value(obj.model.rxn_orders["r1","NH3"]) == 1
assert value(obj.model.rxn_orders["r1","ZH"]) == 1
assert value(obj.model.rxn_orders["r1","ZNH4"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","NH3"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","ZH"]) == 1
assert value(obj_with_lists.model.rxn_orders["r1","ZNH4"]) == 1
@pytest.mark.unit
def test_set_isothermal_temp(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.set_isothermal_temp("Unaged","250C",250+273.15)
obj_with_lists.set_isothermal_temp("Unaged","250C",250+273.15)
obj_with_lists.set_isothermal_temp("2hr","250C",250+273.15)
obj_with_lists.set_isothermal_temp("Unaged","300C",300+273.15)
obj_with_lists.set_isothermal_temp("2hr","300C",300+273.15)
assert value(obj.model.T["Unaged","250C",obj.model.z.first(),obj.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["Unaged","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["2hr","250C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 250+273.15
assert value(obj_with_lists.model.T["Unaged","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 300+273.15
assert value(obj_with_lists.model.T["2hr","300C",
obj_with_lists.model.z.first(),obj_with_lists.model.t.first()]) == 300+273.15
@pytest.mark.initialization
def test_build_constraints(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.build_constraints()
obj_with_lists.build_constraints()
assert hasattr(obj.model, 'bulk_cons')
assert isinstance(obj.model.bulk_cons, Constraint)
assert hasattr(obj.model, 'pore_cons')
assert isinstance(obj.model.pore_cons, Constraint)
assert hasattr(obj.model, 'surf_cons')
assert isinstance(obj.model.surf_cons, Constraint)
assert hasattr(obj.model, 'site_cons')
assert isinstance(obj.model.site_cons, Constraint)
assert hasattr(obj_with_lists.model, 'bulk_cons')
assert isinstance(obj_with_lists.model.bulk_cons, Constraint)
assert hasattr(obj_with_lists.model, 'pore_cons')
assert isinstance(obj_with_lists.model.pore_cons, Constraint)
assert hasattr(obj_with_lists.model, 'surf_cons')
assert isinstance(obj_with_lists.model.surf_cons, Constraint)
@pytest.mark.initialization
def test_discretization_fd(self, isothermal_object, isothermal_object_with_lists):
obj = isothermal_object
obj_with_lists = isothermal_object_with_lists
obj.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=5,elems=5,colpoints=2)
obj_with_lists.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=5,elems=5,colpoints=2)
assert hasattr(obj.model, 'dCbdz_edge')
assert isinstance(obj.model.dCbdz_edge, Constraint)
assert hasattr(obj_with_lists.model, 'dCbdz_edge')
assert isinstance(obj_with_lists.model.dCbdz_edge, Constraint)
assert len(obj.model.t) == len(obj_with_lists.model.t)
assert len(obj.model.z) == len(obj_with_lists.model.z)
assert pytest.approx(111.63437198706396, rel=1e-3) == \
value(obj.model.P["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(
value(obj_with_lists.model.P["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.P["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(28882.87336113903, rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.v["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) == \
value(obj.model.v["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(0.0006748820366629658, rel=1e-3) == \
value(obj.model.rho["Unaged","250C",obj.model.z.first(),obj.model.t.first()])
assert pytest.approx(value(obj_with_lists.model.rho["Unaged","250C",obj_with_lists.model.z.first(),
obj_with_lists.model.t.first()]), rel=1e-3) | |
:param 'EventSourceMappingSourceAccessConfigurationType' type: The type of source access configuration.
:param str u_ri: The URI for the source access configuration resource.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if u_ri is not None:
pulumi.set(__self__, "u_ri", u_ri)
@property
@pulumi.getter
def type(self) -> Optional['EventSourceMappingSourceAccessConfigurationType']:
"""
The type of source access configuration.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uRI")
def u_ri(self) -> Optional[str]:
"""
The URI for the source access configuration resource.
"""
return pulumi.get(self, "u_ri")
@pulumi.output_type
class FilterCriteriaProperties(dict):
"""
The filter criteria to control event filtering.
"""
def __init__(__self__, *,
filters: Optional[Sequence['outputs.EventSourceMappingFilter']] = None):
"""
The filter criteria to control event filtering.
:param Sequence['EventSourceMappingFilter'] filters: List of filters of this FilterCriteria
"""
if filters is not None:
pulumi.set(__self__, "filters", filters)
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.EventSourceMappingFilter']]:
"""
List of filters of this FilterCriteria
"""
return pulumi.get(self, "filters")
@pulumi.output_type
class FunctionCode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "imageUri":
suggest = "image_uri"
elif key == "s3Bucket":
suggest = "s3_bucket"
elif key == "s3Key":
suggest = "s3_key"
elif key == "s3ObjectVersion":
suggest = "s3_object_version"
elif key == "zipFile":
suggest = "zip_file"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FunctionCode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FunctionCode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FunctionCode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
image_uri: Optional[str] = None,
s3_bucket: Optional[str] = None,
s3_key: Optional[str] = None,
s3_object_version: Optional[str] = None,
zip_file: Optional[str] = None):
"""
:param str image_uri: ImageUri.
:param str s3_bucket: An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.
:param str s3_key: The Amazon S3 key of the deployment package.
:param str s3_object_version: For versioned objects, the version of the deployment package object to use.
:param str zip_file: The source code of your Lambda function. If you include your function source inline with this parameter, AWS CloudFormation places it in a file named index and zips it to create a deployment package..
"""
if image_uri is not None:
pulumi.set(__self__, "image_uri", image_uri)
if s3_bucket is not None:
pulumi.set(__self__, "s3_bucket", s3_bucket)
if s3_key is not None:
pulumi.set(__self__, "s3_key", s3_key)
if s3_object_version is not None:
pulumi.set(__self__, "s3_object_version", s3_object_version)
if zip_file is not None:
pulumi.set(__self__, "zip_file", zip_file)
@property
@pulumi.getter(name="imageUri")
def image_uri(self) -> Optional[str]:
"""
ImageUri.
"""
return pulumi.get(self, "image_uri")
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> Optional[str]:
"""
An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.
"""
return pulumi.get(self, "s3_bucket")
@property
@pulumi.getter(name="s3Key")
def s3_key(self) -> Optional[str]:
"""
The Amazon S3 key of the deployment package.
"""
return pulumi.get(self, "s3_key")
@property
@pulumi.getter(name="s3ObjectVersion")
def s3_object_version(self) -> Optional[str]:
"""
For versioned objects, the version of the deployment package object to use.
"""
return pulumi.get(self, "s3_object_version")
@property
@pulumi.getter(name="zipFile")
def zip_file(self) -> Optional[str]:
"""
The source code of your Lambda function. If you include your function source inline with this parameter, AWS CloudFormation places it in a file named index and zips it to create a deployment package..
"""
return pulumi.get(self, "zip_file")
@pulumi.output_type
class FunctionDeadLetterConfig(dict):
"""
The dead-letter queue for failed asynchronous invocations.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetArn":
suggest = "target_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FunctionDeadLetterConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FunctionDeadLetterConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FunctionDeadLetterConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_arn: Optional[str] = None):
"""
The dead-letter queue for failed asynchronous invocations.
:param str target_arn: The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
"""
if target_arn is not None:
pulumi.set(__self__, "target_arn", target_arn)
@property
@pulumi.getter(name="targetArn")
def target_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
"""
return pulumi.get(self, "target_arn")
@pulumi.output_type
class FunctionEnvironment(dict):
"""
A function's environment variable settings.
"""
def __init__(__self__, *,
variables: Optional[Any] = None):
"""
A function's environment variable settings.
:param Any variables: Environment variable key-value pairs.
"""
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter
def variables(self) -> Optional[Any]:
"""
Environment variable key-value pairs.
"""
return pulumi.get(self, "variables")
@pulumi.output_type
class FunctionFileSystemConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "localMountPath":
suggest = "local_mount_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FunctionFileSystemConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FunctionFileSystemConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FunctionFileSystemConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
arn: str,
local_mount_path: str):
"""
:param str arn: The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.
:param str local_mount_path: The path where the function can access the file system, starting with /mnt/.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "local_mount_path", local_mount_path)
@property
@pulumi.getter
def arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="localMountPath")
def local_mount_path(self) -> str:
"""
The path where the function can access the file system, starting with /mnt/.
"""
return pulumi.get(self, "local_mount_path")
@pulumi.output_type
class FunctionImageConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entryPoint":
suggest = "entry_point"
elif key == "workingDirectory":
suggest = "working_directory"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FunctionImageConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FunctionImageConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FunctionImageConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
command: Optional[Sequence[str]] = None,
entry_point: Optional[Sequence[str]] = None,
working_directory: Optional[str] = None):
"""
:param Sequence[str] command: Command.
:param Sequence[str] entry_point: EntryPoint.
:param str working_directory: WorkingDirectory.
"""
if command is not None:
pulumi.set(__self__, "command", command)
if entry_point is not None:
pulumi.set(__self__, "entry_point", entry_point)
if working_directory is not None:
pulumi.set(__self__, "working_directory", working_directory)
@property
@pulumi.getter
def command(self) -> Optional[Sequence[str]]:
"""
Command.
"""
return pulumi.get(self, "command")
@property
@pulumi.getter(name="entryPoint")
def entry_point(self) -> Optional[Sequence[str]]:
"""
EntryPoint.
"""
return pulumi.get(self, "entry_point")
@property
@pulumi.getter(name="workingDirectory")
def working_directory(self) -> Optional[str]:
"""
WorkingDirectory.
"""
return pulumi.get(self, "working_directory")
@pulumi.output_type
class FunctionTag(dict):
def __init__(__self__, *,
key: str,
value: Optional[str] = None):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class FunctionTracingConfig(dict):
"""
The function's AWS X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.
"""
def __init__(__self__, *,
mode: Optional['FunctionTracingConfigMode'] = None):
"""
The function's AWS X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.
:param 'FunctionTracingConfigMode' mode: The | |
= [";\n".join(row[i::2]) for i in (0, 1)]
table.append((sort, row))
if not table:
if user:
await ctx.send("User is currently not punished.")
else:
await ctx.send("No users are currently punished.")
return
table.sort()
msg += tabulate.tabulate([k[1] for k in table], headers, tablefmt="grid")
for page in pagify(msg):
await ctx.send(box(page))
@punish.command(pass_context=True, no_pm=True, name="clean")
@checks.mod_or_permissions(manage_messages=True)
async def punish_clean(self, ctx, clean_pending: bool = False):
"""
Removes absent members from the punished list.
If run without an argument, it only removes members who are no longer
present but whose timer has expired. If the argument is 'yes', 1,
or another trueish value, it will also remove absent members whose
timers have yet to expire.
Use this option with care, as removing them will prevent the punished
role from being re-added if they rejoin before their timer expires.
"""
count = 0
now = time.time()
server = ctx.guild
data = await self.config.all_members(server)
for mid, mdata in data.copy().items():
if not isinstance(mid, int) or server.get_member(mid):
continue
elif clean_pending or ((data[mid]["until"] or 0) < now):
member = MockedMember(mid, server)
await self.config.member(member).clear()
count += 1
await ctx.send("Cleaned %i absent members from the list." % count)
@punish.command(pass_context=True, no_pm=True, name="warn")
@checks.mod_or_permissions(manage_messages=True)
async def punish_warn(self, ctx, user: discord.Member, *, reason: str = None):
"""
Warns a user with boilerplate about the rules
"""
msg = ["Hey %s, " % user.mention]
msg.append(
"you're doing something that might get you muted if you keep " "doing it."
)
if reason:
msg.append(" Specifically, %s." % reason)
msg.append("Be sure to review the server rules in #start-here.")
await ctx.send(" ".join(msg))
@punish.command(pass_context=True, no_pm=True, name="end", aliases=["remove"])
@checks.mod_or_permissions(manage_messages=True)
async def punish_end(self, ctx, user: discord.Member, *, reason: str = None):
"""
Removes punishment from a user before time has expired
This is the same as removing the role directly.
"""
role = await self.get_role(user.guild, ctx, quiet=True)
sid = user.guild.id
now = time.time()
async with self.config.member(user)() as data:
if role and role in user.roles:
msg = "Punishment manually ended early by %s." % ctx.message.author
original_start = data["start"]
original_end = data["until"]
remaining = original_end and (original_end - now)
if remaining:
msg += " %s was left" % _generate_timespec(round(remaining))
if original_start:
msg += " of the original %s." % _generate_timespec(
round(original_end - original_start)
)
else:
msg += "."
if reason:
msg += "\n\nReason for ending early: " + reason
if data["reason"] is not None:
msg += "\n\nOriginal reason was: " + data["reason"]
if not await self._unpunish(user, msg, update=True):
msg += "\n\n(failed to send punishment end notification DM)"
await ctx.send(msg)
elif data: # This shouldn't happen, but just in case
now = time.time()
until = data["until"]
remaining = (
until and _generate_timespec(round(until - now)) or "forever"
)
data_fmt = "\n".join(
[
"**Reason:** %s" % (data["reason"] or "no reason set"),
"**Time remaining:** %s" % remaining,
"**Moderator**: %s"
% (
user.guild.get_member(data["by"])
or "Missing ID#%s" % data["by"]
),
]
)
await self.config.member(user).clear()
# self.data[sid].pop(user.id, None)
# await self.save_data()
await ctx.send(
"That user doesn't have the %s role, but they still have a data entry. I removed it, "
"but in case it's needed, this is what was there:\n\n%s"
% (role.name, data_fmt)
)
elif role:
await ctx.send("That user doesn't have the %s role." % role.name)
else:
await ctx.send("The punish role couldn't be found in this server.")
@punish.command(pass_context=True, no_pm=True, name="reason")
@checks.mod_or_permissions(manage_messages=True)
async def punish_reason(self, ctx, user: discord.Member, *, reason: str = None):
"""
Updates the reason for a punishment, including the modlog if a case exists.
"""
server = ctx.message.guild
async with self.config.member(user)() as data:
if not data:
await ctx.send(
"That user doesn't have an active punishment entry. To update modlog "
"cases manually, use the `%sreason` command." % ctx.prefix
)
return
data["reason"] = reason
if reason:
msg = "Reason updated."
else:
msg = "Reason cleared"
caseno = data.get("caseno")
if caseno and ENABLE_MODLOG:
moderator = ctx.message.author
case_error = None
try:
if moderator.id != data["by"] and not await is_admin_or_superior(
self.bot, moderator
):
moderator = (
server.get_member(data["by"]) or server.me
) # fallback gracefully
case = await modlog.get_case(caseno, server, self.bot)
await case.edit({reason: reason, mod: moderator})
except RuntimeError:
case_error = "the case message could not be found"
except Exception:
pass
if case_error:
msg += "\n\n" + warning(
"There was an error updating the modlog case: %s." % case_error
)
await ctx.send(msg)
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def punishset(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@punishset.command(pass_context=True, no_pm=True, name="setup")
async def punishset_setup(self, ctx):
"""
(Re)configures the punish role and channel overrides
"""
server = ctx.message.guild
default_name = DEFAULT_ROLE_NAME
# role_id = self.data.get(server.id, {}).get('ROLE_ID')
role_id = await self.config.guild(server).role_id()
if role_id:
role = discord.utils.get(server.roles, id=role_id)
else:
role = discord.utils.get(server.roles, name=default_name)
perms = server.me.guild_permissions
if not perms.manage_roles and perms.manage_channels:
await ctx.send(
"I need the Manage Roles and Manage Channels permissions for that command to work."
)
return
if not role:
msg = "The %s role doesn't exist; Creating it now... " % default_name
msgobj = await ctx.send(msg)
perms = discord.Permissions.none()
role = await server.create_role(
server, name=default_name, permissions=perms
)
else:
msgobj = await ctx.send("%s role exists... " % role.name)
if role.position != (server.me.top_role.position - 1):
if role < server.me.top_role:
msgobj = msgobj.edit(
msgobj.content + "moving role to higher position... "
)
await role.edit(position=server.me.top_role.position - 1)
else:
await msgobj.edit(
msgobj.content + "role is too high to manage."
" Please move it to below my highest role."
)
return
msgobj = await msgobj.edit(msgobj.content + "(re)configuring channels... ")
for channel in server.channels:
await self.setup_channel(channel, role)
await msgobj.edit(msgobj.content + "done.")
if role and role.id != role_id:
await self.config.guild(server).role_id.set(role.id)
@punishset.command(pass_context=True, no_pm=True, name="channel")
async def punishset_channel(self, ctx, channel: discord.channel = None):
"""
Sets or shows the punishment "timeout" channel.
This channel has special settings to allow punished users to discuss their
infraction(s) with moderators.
If there is a role deny on the channel for the punish role, it is
automatically set to allow. If the default permissions don't allow the
punished role to see or speak in it, an overwrite is created to allow
them to do so.
"""
server = ctx.message.guild
current = await self.config.guild(server).channel_id()
current = current and server.get_channel(current)
if channel is None:
if not current:
await ctx.send("No timeout channel has been set.")
else:
await ctx.send("The timeout channel is currently %s." % current.mention)
else:
if current == channel:
await ctx.send(
"The timeout channel is already %s. If you need to repair its permissions, use "
"`%spunishset setup`." % (current.mention, ctx.prefix)
)
return
await self.config.guild(server).channel_id.set(channel.id)
role = await self.get_role(server, ctx, create=True)
update_msg = "{} to the %s role" % role
grants = []
denies = []
perms = permissions_for_roles(channel, role)
overwrite = channel.overwrites_for(role) or discord.PermissionOverwrite()
for perm, value in DEFAULT_TIMEOUT_OVERWRITE:
if value is None:
continue
if getattr(perms, perm) != value:
setattr(overwrite, perm, value)
name = perm.replace("_", " ").title().replace("Tts", "TTS")
if value:
grants.append(name)
else:
denies.append(name)
# Any changes made? Apply them.
if grants or denies:
grants = grants and ("grant " + format_list(*grants))
denies = denies and ("deny " + format_list(*denies))
to_join = [x for x in (grants, denies) if x]
update_msg = update_msg.format(format_list(*to_join))
if current and current.id != channel.id:
if current.permissions_for(server.me).manage_roles:
msg = info(
"Resetting permissions in the old channel (%s) to the default..."
)
else:
msg = error(
"I don't have permissions to reset permissions in the old channel (%s)"
)
await ctx.send(msg % current.mention)
await self.setup_channel(current, role)
if channel.permissions_for(server.me).manage_roles:
await ctx.send(
info(
"Updating permissions in %s to %s..."
% (channel.mention, update_msg)
)
)
await channel.set_permissions(role, overwrite)
else:
await ctx.send(
error("I don't have permissions to %s." % update_msg)
)
await ctx.send("Timeout channel set to %s." % channel.mention)
@punishset.command(pass_context=True, no_pm=True, name="clear-channel")
async def punishset_clear_channel(self, ctx):
"""
Clears the timeout channel and resets its permissions
"""
server = ctx.message.guild
current = await self.config.guild(server).channel_id()
current = current and server.get_channel(current)
if current:
msg = None
await self.config.guild(server).channel_id.set(None)
if current.permissions_for(server.me).manage_roles:
role = await self.get_role(server, ctx, quiet=True)
await self.setup_channel(current, role)
msg = " and its permissions reset"
else:
msg = ", but I don't have permissions to reset its permissions."
await ctx.send("Timeout channel has | |
# from errors import incorrectinput
incorrectinput = 'INCORRECT SUBNET OR SUBNET MASK DETECTED NULL RETURNED'
# ----------------------------------------------------------------------------
# Module Functions
# ----------------------------------------------------------------------------
def addressing(subnet):
'''main function proiving ip-subnet object for various functions on it
--> ipsubnet object
:param: subnet: either ipv4 or ipv6 subnet with /mask
:param type: str
:param decmask: decimal mask notation only in case of IPv4 (optional)
:param type: str
'''
v_obj = Validation(subnet)
if v_obj.validated:
version = v_obj.version
if version == 4:
return IPv4(v_obj.subnet)
elif version == 6:
return IPv6(v_obj.subnet)
# private
# Concatenate strings s and pfx with conjuction
def _strconcate(s, pfx, conj=''):
if s == '':
s = s + pfx
else:
s = s + conj + pfx
return s
def found(s, sub, pos=0):
'''Search for substring in string and return Boolean result
--> bool
:param: s: main string to be search within
:param type: str
:param: sub: substring which is to be search in to main string
:param type: str
:param: pos: position index, search to be start from
:param type: int
'''
try:
return True if s.find(sub, pos) > -1 else False
except:
return False
# ----------------------------------------------------------------------------
# Validation Class - doing subnet validation and version detection
# ----------------------------------------------------------------------------
class Validation():
'''ip-subnet validation class
:param subnet: ipv4 or ipv6 subnet with "/" mask
:param type: str
'''
def __init__(self, subnet):
'''ip-subnet validation class
:param subnet: ipv4 or ipv6 subnet with "/" mask
:param type: str
'''
self.mask = None
self.subnet = subnet
self.version = self.__function
self.validated = False
if self.version == 4:
self.validated = self.check_v4_input
elif self.version == 6:
self.validated = self.check_v6_input
else:
raise Exception(f"Not a VALID Subnet {subnet}")
@property
def __function(self):
if found(self.subnet, ":"):
return 6
elif found(self.subnet, "."):
return 4
else:
return 0
@property
def check_v4_input(self):
'''Property to validate provided v4 subnet
'''
# ~~~~~~~~~ Mask Check ~~~~~~~~~
try:
self.mask = self.subnet.split("/")[1]
except:
self.mask = 32
self.subnet = self.subnet + "/32"
try:
self.mask = int(self.mask)
if not all([self.mask>=0, self.mask<=32]):
raise Exception(f"Invalid mask length {self.mask}")
except:
raise Exception(f"Incorrect Mask {self.mask}")
# ~~~~~~~~~ Subnet Check ~~~~~~~~~
try:
octs = self.subnet.split("/")[0].split(".")
if len(octs) != 4:
raise Exception(f"Invalid Subnet Length {len(octs)}")
for i in range(4):
if not all([int(octs[i])>=0, int(octs[i])<=255 ]):
raise Exception("Invalid Subnet Octet {i}")
return True
except:
raise Exception("Unidentified Subnet")
@property
def check_v6_input(self):
'''Property to validate provided v6 subnet
'''
try:
# ~~~~~~~~~ Mask Check ~~~~~~~~~
self.mask = self.subnet.split("/")[1]
except:
self.mask = 128
self.subnet = self.subnet + "/128"
try:
self.mask = int(self.mask)
if not all([self.mask>=0, self.mask<=128]):
raise Exception(f"Invalid mask length {self.mask}")
# ~~~~~~~~~ Subnet ~~~~~~~~~
sip = self.subnet.split("/")[0].split("::")
# ~~~~~~~~~ Check Subnet squeezers ~~~~~~~~~
if len(sip) > 2:
raise Exception("Invalid Subnet, Squeezers detected > 1")
# ~~~~~~~~~ Subnet Length ~~~~~~~~~
lsip = sip[0].split(":")
try:
rsip = sip[1].split(":")
except:
rsip = []
if len(lsip)+len(rsip) > 8:
raise Exception(f"Invalid Subnet Length {len(lsip)+len(rsip)}")
# ~~~~~~~~~ Validate Hextates ~~~~~~~~~
for hxt in lsip+rsip:
try:
if hxt != '' :
hex(int(hxt, 16))
except:
raise Exception(f"Invalid Hextate {hxt}")
# ~~~~~~~~~ All Good ~~~~~~~~~
return True
except:
raise Exception("Unidentified Subnet")
# ----------------------------------------------------------------------------
# IPv4 Subnet (IPv4) class
# ----------------------------------------------------------------------------
class IPv4:
'''Defines IPv4 object and its various operations'''
# Initializer
def __init__(self, subnet):
self.subnet = subnet
self.mask = int(self.subnet.split("/")[1])
self.net = self.subnet.split("/")[0]
def __str__(self): return self.subnet
def __repr__(self): return self.subnet
def __getitem__(self, n):
'''get a specific ip, Range of IP(s) from Subnet'''
try:
return self.n_thIP(n, False)
except:
l = []
for x in self.__subnetips(n.start, n.stop):
l.append(x)
return tuple(l)
def __add__(self, n):
'''add n-ip's to given subnet and return udpated subnet'''
return self.n_thIP(n, False, "_")
def __sub__(self, n):
'''Deduct n-ip's from given subnet and return udpated subnet'''
return self.n_thIP(-1*n, False, "_")
def __truediv__(self, n):
'''Devide provided subnet/super-net to n-number of smaller subnets'''
return self.__sub_subnets(n)
def __iter__(self):
'''iterate over full subnet'''
return self.__subnetips()
# ------------------------------------------------------------------------
# Private Properties
# ------------------------------------------------------------------------
# binary to decimal mask convert
def __bin2decmask(self, binmask):
return binmask.count('1')
# binary mask return property
@property
def __binmask(self):
try:
pone ='1'*self.mask
pzero = '0'*(32-self.mask)
return pone+pzero
except:
pass
# Inverse mask return property
@property
def __invmask(self):
try:
pone ='0'*self.mask
pzero = '1'*(32-self.mask)
return pone+pzero
except:
pass
# ------------------------------------------------------------------------
# Private Methods
# ------------------------------------------------------------------------
# binary to Decimal convert subnet method
@staticmethod
def __bin2dec(binnet):
o = []
for x in range(0, 32, 8):
o.append(int(binnet[x:x+8], 2))
return o
# binary subnet return method
@staticmethod
def __binsubnet(subnet):
try:
s = ''
octs = subnet.split("/")[0].split(".")
for o in octs:
bo = str(bin(int(o)))[2:]
lbo = len(bo)
pzero = '0'*(8 - lbo)
s = s + pzero + bo
return s
except:
pass
# adjust length of 4 octets
@staticmethod
def __set32bits(bins):
lbo = len(str(bins))
pzero = '0'*(34 - lbo)
return pzero+bins[2:]
# list to octet conversion
@staticmethod
def __lst2oct(lst):
l = ''
for x in lst:
l = str(x) if l == '' else l +'.'+ str(x)
return l
# compare two binary for and operation
def __both(self, binone, bintwo):
b1 = int(binone.encode('ascii'), 2)
b2 = int(bintwo.encode('ascii'), 2)
b1b2 = bin(b1 & b2)
return self.__set32bits(b1b2)
# compare two binary for or operation
def __either(self, binone, bintwo):
b1 = int(binone.encode('ascii'), 2)
b2 = int(bintwo.encode('ascii'), 2)
b1b2 = bin(b1 | b2)
return self.__set32bits(b1b2)
# get n-number of subnets of given super-net
def __sub_subnets(self, n):
_iplst = []
for i1, x1 in enumerate(range(32)):
p = 2**x1
if p >= n: break
_nsm = self.mask + i1
_nip = int(self.__binsubnet(self.NetworkIP()), 2)
_bcip = int(self.__binsubnet(self.BroadcastIP()), 2)
_iis = (_bcip - _nip + 1) // p
for i2, x2 in enumerate(range(_nip, _bcip, _iis)):
_iplst.append(self.n_thIP(i2*_iis)+ "/" + str(_nsm))
return tuple(_iplst)
# yields IP Address(es) of the provided subnet
def __subnetips(self, begin=0, end=0):
_nip = int(self.__binsubnet(self.NetworkIP()), 2)
if end == 0:
_bcip = int(self.__binsubnet(self.BroadcastIP()), 2)
else:
_bcip = _nip + (end-begin)
for i2, x2 in enumerate(range(_nip, _bcip)):
if begin>0: i2 = i2+begin
yield self.n_thIP(i2)
# ------------------------------------------------------------------------
# Available Methods & Public properties of class
# ------------------------------------------------------------------------
def NetworkIP(self, withMask=True):
'''Network IP Address of subnet from provided IP/Subnet'''
try:
s = self.__binsubnet(self.subnet)
bm = self.__binmask
net = self.__lst2oct(self.__bin2dec(self.__both(s, bm )))
if withMask :
return net + "/" + str(self.mask)
else:
return net
except:
pass
subnetZero = NetworkIP
def BroadcastIP(self, withMask=False):
'''Broadcast IP Address of subnet from provided IP/Subnet'''
try:
s = self.__binsubnet(self.subnet)
im = self.__invmask
bc = self.__lst2oct(self.__bin2dec(self.__either(s, im )))
if withMask :
return bc + "/" + str(self.mask)
else:
return bc
except:
pass
def n_thIP(self, n=0, withMask=False, _=''):
'''n-th IP Address of subnet from provided IP/Subnet'''
s = self.__binsubnet(self.subnet)
if _ == '':
bm = self.__binmask
addedbin = self.__set32bits(bin(int(self.__both(s, bm), 2)+n))
else:
addedbin = self.__set32bits(bin(int(s.encode('ascii'), 2 )+n))
if any([addedbin > self.__binsubnet(self.BroadcastIP()),
addedbin < self.__binsubnet(self.NetworkIP())]) :
raise Exception("Address Out of Range")
else:
ip = self.__lst2oct(self.__bin2dec(addedbin))
if withMask :
return ip + "/" + str(self.mask)
else:
return ip
@property
def decmask(self):
'''Decimal Mask from provided IP/Subnet - Numeric/Integer'''
return self.mask
decimalMask = decmask
@property
def binmask(self):
'''Binary Mask from provided IP/Subnet'''
return self.__lst2oct(self.__bin2dec(self.__binmask))
@property
def invmask(self):
'''Inverse Mask from provided IP/Subnet'''
return self.__lst2oct(self.__bin2dec(self.__invmask))
def ipdecmask(self, n=0):
'''IP with Decimal Mask for provided IP/Subnet,
n ==>
n-th ip of subnet will appear in output if provided,
subnet0 ip will appear in output if not provided
default: n = 0, for Network IP
'''
try:
return self[n] + "/" + str(self.mask)
except:
raise Exception(f'Invalid Input : detected')
def ipbinmask(self, n=0):
'''IP with Binary Mask for provided IP/Subnet,
n ==>
n-th ip of subnet will appear in output if provided,
same input subnet/ip will appear in output if not provided
set - n = 0, for Network IP
'''
try:
return self[n] + " " + self.binmask
except:
raise Exception(f'Invalid Input : detected')
def ipinvmask(self, n=0):
'''IP with Inverse Mask for provided IP/Subnet,
n ==>
n-th ip of subnet will appear in output if provided,
same input subnet/ip will appear in output if not provided
set - n = 0, for Network IP
'''
try:
return self[n] + " " + self.invmask
except:
raise Exception(f'Invalid Input : detected')
@property
def version(self):
'''get version of IP Subnet'''
return 4
# ----------------------------------------------------------------------------
# IP Subnet (IPv6) class
# ----------------------------------------------------------------------------
class IPv6:
'''Defines IPv6 object and its various operations'''
# Object Initializer
def __init__(self, subnet=''):
self.subnet = subnet
self.__networkIP
self.__actualv6subnet = False # breaked subnet expanded
self.__NetworkAddressbool = False # Subnet zero available/not
self.mask = int(self.subnet.split("/")[1])
self.net = self.subnet.split("/")[0]
def __str__(self): return self.subnet
def __repr__(self): return self.subnet
def __getitem__(self, n):
'''get a specific ip, Range of IP(s) from Subnet'''
try:
return self.n_thIP(n, False)
except:
l = []
for x in self.__subnetips(n.start, n.stop):
l.append(x)
return tuple(l)
def __add__(self, n):
'''add n-ip's to given subnet and return udpated subnet'''
return self.n_thIP(n, False, "_")
def __sub__(self, n):
'''Deduct n-ip's from given subnet and return udpated subnet'''
return self.n_thIP(-1*n, False, "_")
def __truediv__(self, n):
'''Devide provided subnet/super-net to n-number of smaller subnets'''
return self.__sub_subnets(n)
def __iter__(self):
'''iterate over full subnet'''
return self.__subnetips()
# ------------------------------------------------------------------------
# Private Methods
# ------------------------------------------------------------------------
# get n-number of subnets of given super-net
def __sub_subnets(self, n):
_iplst = []
for i1, x1 in enumerate(range(128)):
p = 2**x1
if p >= n: break
_nsm = self.mask + i1
_nip = int(self.__binsubnet(self.subnetZero()), 2)
_bcip = int(self.__binsubnet(self.BroadcastIP()), 2)
_iis = (_bcip - _nip + 1) // p
for i2, x2 in enumerate(range(_nip, _bcip, _iis)):
_iplst.append(self.n_thIP(i2*_iis)+ "/" + str(_nsm))
return tuple(_iplst)
# binary subnet return method
@staticmethod
def __binsubnet(subnet):
try:
s = ''
octs = subnet.split("/")[0].split(":")
for o in octs:
bo = str(bin(int(o, 16)))[2:]
lbo = len(bo)
pzero = '0'*(16 - lbo)
s = s + pzero + bo
return s
except:
pass
# yields IP Address(es) of the provided subnet
def __subnetips(self, begin=0, end=0):
_nip = int(self.__binsubnet(self.subnetZero()), 2)
if end == 0:
_bcip = int(self.__binsubnet(self.BroadcastIP()), 2)
else:
_bcip = _nip + (end-begin)
for i2, x2 in enumerate(range(_nip, _bcip)):
if begin>0: i2 = i2+begin
yield self.n_thIP(i2)
# update Subnet to actual length
@property
def __actualsize(self):
try:
if not self.__actualv6subnet:
p = ''
sip = self.subnet.split("/")[0].split("::")
if len(sip) == 2:
# ~~~~~~ No padding, inserting zeros in middle ~~~~~~~
for x in range(1, 9):
p = _strconcate(p, self.__getHext(hexTnum=x), conj=':')
self.subnet = p
else :
# ~~~~~~~ pad leading zeros ~~~~~~~
lsip = sip[0].split(":")
for x in range(8-len(lsip), 0, -1):
p = _strconcate(p, '0', conj=":")
if p != '':
self.subnet = p + ':' + self.subnet
self.__actualv6subnet = True
except:
return False
# IP Portion of Input
@property
def __networkIP(self):
try:
self.network = self.subnet.split("/")[0]
return self.network
except:
raise Exception("WARNING!!! NO SUBNET DETECTED, NULL RETURNED")
return None
# Padding subnet with ":0" or ":ffff"
@staticmethod
def __pad(padwhat='0', counts=0):
s = ''
for x in range(counts):
s = s + ":" + padwhat
return s
# Return a specific Hextate (hexTnum) from IPV6 address
def __getHext(self, hexTnum, s=''):
if s == '':
s = self.subnet.split("/")[0]
try:
if s != '' and all([hexTnum>0, hexTnum<=8]):
sip = s.split("/")[0].split("::")
lsip = sip[0].split(":")
if | |
i_RES_maxpow or i_RES_idleScale dim inconsistent')
#check enb-route map and res-route map
[self.m_EnbNum, self.m_RtNum]=np.shape(self.i_EnbRouteMat)
t=np.shape(self.i_ResRouteMat)
if t[0]!=self.m_ResNum:
raise Exception('Number of resource mismatch for resource-route map')
if t[1]!=self.m_RtNum:
raise Exception('Number of routes mismatch for resource-route map')
# check weighting factor
w=np.shape(self.i_Weight)
if not w: # for single value
self.i_Weight = np.array([[self.i_Weight]])
w=np.shape(self.i_Weight)
elif len(w)==1: # for one dim array
self.i_Weight = np.array([self.i_Weight])
w=np.shape(self.i_Weight)
#print self.i_Weight
if (w[0]==1) and (w[1]==1):
if self.i_Weight>1 or self.i_Weight<0:
raise Exception('Incorrect range for weighting factor')
elif (w[0]!=self.m_EnbNum) or (w[1]!=3):
raise Exception('Input i_Weight dim mismatch number of user')
# check loop control
if not (self.i_outloop_en==0 or self.i_outloop_en==1):
print 'self.i_outloop_en=',self.i_outloop_en
raise Exception('Out loop control should be 1 or 0')
# check RES util ratio para
if not(self.i_RES_ratio.any()):
self.m_res_appox_en=0
else:
self.m_res_appox_en=1
if len(self.i_RES_ratio)!= len(self.i_RES_idleScale):
raise Exception('Input i_RES_ratio dim mismatch number of res')
# check constraint mode
if not(self.i_constraint_mode):
self.i_constraint_mode = 'lag' # Lagrangian
elif (self.i_constraint_mode!='lag'
and self.i_constraint_mode!='pen'):
raise Exception('Input i_constraint_mode not supported yet')
print'constraint mode is %s ' %self.i_constraint_mode
# parse route map for RES and BS
self.f_pars_routeMap()
# check sgw, mux and links against system throughput
self.m_throughput = sum(self.i_traffic_in)
if sum(self.m_SGW_cap) <= self.m_throughput:
raise Exception('SGW total capacity is less than the system input')
if sum(self.m_MUX_cap) <= self.m_throughput:
raise Exception('MUX total capacity is less than the system input')
if sum(self.m_Link_cap) <= self.m_throughput:
raise Exception('Link total capacity is less than the system input')
# check i_LB_mode
if not(self.i_LB_mode):
self.i_LB_mode = 'global'
elif (self.i_LB_mode!='global'
and self.i_LB_mode!='user'):
raise Exception('Only support global or user mode for i_LB_mode')
# check i_normScale
if (self.i_normScale):
if self.i_normScale <= 0:
raise Exception('i_normScale should be a positive numer')
self.m_normScale = self.i_normScale
# need put more check
def f_pars_routeMap(self):
# Derive RES paras
self.m_RES_peff = ((1.0-self.i_RES_idleScale)
* self.i_RES_maxpow
/ self.i_RES_cap)
if self.m_res_appox_en:
idle_amortise = (self.m_res_appox_en * self.i_RES_idleScale * self.i_RES_maxpow
/(self.i_RES_ratio * self.i_RES_cap))
self.m_RES_peff_approx = self.m_RES_peff + idle_amortise
else:
self.m_RES_peff_approx = self.m_RES_peff
self.m_RES_p0 = self.i_RES_idleScale * self.i_RES_maxpow
# get start postion in res-route map
self.m_sgw_start = 0 # 0 for python, 1 for matlab
self.m_mux_start = self.m_SgwNum+self.m_sgw_start
self.m_link_start = self.m_mux_start+self.m_MuxNum
# Derive SGW paras
sgw_start = self.m_sgw_start
sgw_end = self.m_mux_start # -1 only for matlab
self.m_SGW_cap = self.i_RES_cap[sgw_start:sgw_end]
self.m_SGW_peff = self.m_RES_peff_approx[sgw_start:sgw_end]
self.m_SGW_p0 = self.m_RES_p0[sgw_start:sgw_end]
# Derive MUX paras
if self.i_MUX_num > 0:
mux_start = self.m_mux_start
mux_end = self.m_link_start # -1 only for matlab
self.m_MUX_cap = self.i_RES_cap[mux_start:mux_end]
self.m_MUX_peff = self.m_RES_peff_approx[mux_start:mux_end]
self.m_MUX_p0 = self.m_RES_p0[mux_start:mux_end]
# Derive link paras
if self.i_link_num > 0:
link_start = self.m_link_start
link_end = self.m_ResNum
self.m_Link_cap = self.i_RES_cap[link_start:link_end]
self.m_Link_peff = self.m_RES_peff_approx[link_start:link_end]
self.m_Link_p0 = self.m_RES_p0[link_start:link_end]
# Initialise
def f_init(self):
# initial rate,res,sgw,mux set config
self.f_init_set()
# initial weights for power, SGW and MUX LB
self.f_init_weight()
# initial method mode related (LBMUX, LBMUX_NVAR)
self.f_init_methodMode()
# initial power norm term
self.f_init_norm()
# Init user specific SGW cap sum
self.f_init_userSysCap()
# intial exit, inner, outer loop related
self.f_init_loop()
# initial plot, fig
self.f_init_plot()
# Init output
self.f_init_out()
def f_init_set(self):
# Initial rate update
self.m_rate_update = np.zeros((self.m_RtNum,)) # (1,self.m_RtNum)
# Initial resource enabling map
self.md_res_config_pre = np.ones((self.m_ResNum,)) # (1,self.m_ResNum)
self.md_res_config_new = np.ones((self.m_ResNum,)) # (1,self.m_ResNum)
# Init pre and new SGW reconfig vector with all 0
self.md_sgw_config_pre = np.zeros((self.m_ResNum,))# (1,self.m_ResNum)
# Set new SGW reconfig vector with all 1
self.md_sgw_config_new = np.ones((self.m_SgwNum,)) # (1,self.m_SgwNum)
# Set new MUX reconfig vector with all 1
self.md_mux_config_new = np.ones((self.m_MuxNum,)) # (1,self.m_MuxNum)
# Initial SGW load
self.m_load_update = np.zeros((self.m_ResNum,)) # (1,self.m_ResNum)
self.f_init_sdiag()
def f_init_sdiag(self):
if not(self.i_sdiag):
self.m_sdiag = 0.001 / max(1,sum(self.i_traffic_in))
else:
self.m_sdiag = self.i_sdiag
def f_init_weight(self):
w=np.shape(self.i_Weight)
t1 = np.ones(self.m_EnbNum,)
if (w[0]==1) and (w[1]==1):
self.m_WeightPow = (1 - self.i_Weight[0,0]) * t1
#print self.m_WeightPow
self.m_WeightSgw = self.i_Weight[0,0] / 2 * t1
self.m_WeightMux = self.i_Weight[0,0] / 2 * t1
elif (w(1)==self.m_EnbNum) and (w(2)==3):
self.m_WeightPow = self.i_Weight[:,0]
self.m_WeightSgw = self.i_Weight[:,1]
self.m_WeightMux = self.i_Weight[:,2]
def f_init_methodMode(self): ########################################## to be recalled by outer loop
# init MUX LB enable
if self.io_method_mode == 'LBMUX':
self.m_lbMux_en = 1
# init both MUX LB enable and LB variance norm
if self.io_method_mode=='LBMUX_NVAR':
self.m_lbMux_en = 1
self.m_varNorm_en = 1
def f_init_norm(self):
# form active RES-route map A based on md_res_config_new
self.f_form_active_map()
# get active RES cap
self.m_RES_cap = self.md_res_config_new * self.i_RES_cap # 1xN
# Get norm factor for power
self.f_init_pow_norm()
# Get updated ideal load ratio for SGW and MUX
self.f_init_lb_norm()
# Get norm factor for var
self.f_init_delay_norm()
def f_init_pow_norm(self):
if not(self.io_norm_mode):
self.io_norm_mode = 'mean'
# get power normalisation term
#self.md_max_pow = self.i_RES_maxpow .* self.md_res_config_new
if self.m_res_appox_en:
self.md_max_pow = (self.m_RES_peff_approx * self.md_res_config_new *
self.i_RES_cap * self.i_RES_ratio)
else:
self.md_max_pow = self.i_RES_maxpow * self.md_res_config_new
if self.io_norm_mode=='mean':
self.m_Pratio = 1.0 # 0.8 #
# self.md_pnorm = sum(self.md_max_pow) / sum(self.md_res_config_new) * self.m_Pratio
pow_scale = 1 #sum(self.md_sgw_config_new) / sum(self.md_res_config_new) # ???? not clear scale by sum(self.md_sgw_config_new)
# if self.m_res_appox_en
# pow_scale = self.i_RES_ratio
#print self.m_WeightPow
if min(self.m_WeightPow.flatten()) < (1 - 0.001):
self.md_pnorm = sum(self.md_max_pow * pow_scale)
else:
if self.io_scale_mode ==1:
p_weight = 0.1 * sum(self.i_traffic_in) / self.m_EnbNum
# p_weight = p_weight + 0.02
else:
p_weight = 1.0
#print p_weight, pow_scale
self.md_pnorm = p_weight*sum(self.md_max_pow)*pow_scale
elif self.io_norm_mode=='max':
self.md_pnorm = self.i_RES_maxpow
else:
raise Exception('Incorrect SGW normalisation mode')
# scale norm weights to proper level
self.md_pnorm = self.md_pnorm * self.m_normScale
print 'True pow normaliser = %2.5f ' %self.md_pnorm
def f_init_lb_norm(self):
m_new_SGW_cap = self.m_RES_cap[self.m_sgw_start : self.m_mux_start]
self.md_C_sys_sgw = sum(m_new_SGW_cap)
self.m_ideal_lb_sgw = self.m_throughput / self.md_C_sys_sgw #m_throughput as T calculated in f_check
if self.m_varNorm_en == 0:
return
# self.md_var_norm = ((1.0 - self.m_ideal_lb_sgw).^2
# + (self.md_new_SgwNum-1)*self.m_ideal_lb_sgw.^2)
# ./ self.md_new_SgwNum
sgw_weight = np.mean(np.sum(self.m_EnbSgwMat,1))
# sgw_weight = sgw_weight .* 30 # debug purpose !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
var_weight = sgw_weight / self.md_new_SgwNum
self.md_var_norm = var_weight * self.m_ideal_lb_sgw**2
self.md_var_norm = self.md_var_norm * self.m_normScale
print 'True var normaliser = %2.5f ' %self.md_var_norm
def f_init_delay_norm(self):
if self.m_lbMux_en == 1:
m_new_MUX_cap = self.m_RES_cap[self.m_mux_start : self.m_link_start]
self.md_C_sys_mux = sum(m_new_MUX_cap)
# self.m_ideal_lb_mux = self.m_throughput / self.md_C_sys_mux
if self.m_res_appox_en:
mux_start = self.m_mux_start
mux_end = self.m_link_start
mux_ratio = self.i_RES_ratio[mux_start:mux_end]
self.md_MUX_ntime = 1.0 / (self.m_MUX_cap * (1.0 - mux_ratio)) # 1xM
#self.md_MUX_ntime = self.md_MUX_ntime * 2
else:
self.md_MUX_ntime = 1.0 / (self.md_C_sys_mux - self.m_throughput)
# Get number of average routes over all users
#routeNumEnb_weight = mean(sum(self.i_EnbRouteMat,1))
routeNumEnb_weight = max(np.sum(self.i_EnbRouteMat,1))
#print routeNumEnb_weight
# converge for 0.01 and BsAggC=1
self.md_MUX_ntime = self.md_MUX_ntime * routeNumEnb_weight # 10 ###!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.md_MUX_ntime = self.md_MUX_ntime * self.m_normScale
print 'True mux normaliser = ', self.md_MUX_ntime
def f_init_userSysCap(self):
if self.i_LB_mode=='user':
self.m_userSgwSysCap = np.dot(self.m_SGW_cap, self.m_EnbSgwMat.transpose()) #1xI * (JxI)'-->1xJ
self.m_userMuxSysCap = np.dot(self.m_MUX_cap, self.m_EnbMuxMat.transpose()) #1xM * (JxM)'-->1xJ
def f_calculate_loadUser(self):
new_sgw_load = self.m_load_update[self.m_sgw_start : self.m_mux_start] #1xI
self.m_sgw_user_load = np.dot(new_sgw_load, self.m_EnbSgwMat.transpose()) #1xI * (JxI)'-->1xJ
self.m_sgw_user_raio = self.m_sgw_user_load / self.m_userSgwSysCap # 1xJ
def f_init_loop(self):
# get iteration limit
if not(self.io_iter_limit):
self.m_iter_limit = self.mc_max_iteration
else:
self.m_iter_limit = self.io_iter_limit
def f_init_plot(self):
# Initial fig dir
self.m_fig_dir = 'results/MatFig/'
# get load ratio threshold
if not(self.io_load_ratio_th):
self.m_load_ratio_th = 0.0
else:
self.m_load_ratio_th = self.io_load_ratio_th
def f_init_out(self):
self.o_traffic = np.zeros((self.m_EnbNum, self.m_RtNum))
self.o_Amatrix = np.zeros((self.m_EnbNum, self.m_RtNum))
self.o_SgwLoadRatio = np.zeros((1,self.m_SgwNum))
self.o_RES_status = np.ones((1,self.m_ResNum))
self.o_powRes_enb = np.zeros(self.m_EnbNum,)
def f_form_active_map(self):
# form active RES-route map A based on md_res_config_new
#bsxfun(@times,self.md_res_config_new',self.i_ResRouteMat) in matlab
self.m_ResRouteMat = self.i_ResRouteMat * self.md_res_config_new.flatten()[:,None]#.T --> .transpose() # NxR b* Nx1-->NxR (python index)
# get active route from active SGW-route mat A(I),DIM: I x R
self.m_SgwRouteMat = self.m_ResRouteMat[self.m_sgw_start:self.m_mux_start,:] # IxR, no need -1 as in matlab
# get active route from active MUX-route mat A(M),DIM: M x R
self.m_MuxRouteMat = self.m_ResRouteMat[self.m_mux_start:self.m_link_start,:] # MxR, no need -1 as in matlab
########%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# form equality constrain: Bd=d^{in}, F with DIM: J x R
ActiveSgwRtVect = (np.sum(self.m_SgwRouteMat,0)>0).astype(float) # sum over SGW, 1xR
# get active route path from active link & MUX
MuxLinkRouteMat = self.m_ResRouteMat[self.m_mux_start:self.m_ResNum,:] #(ML)xR
ActiveMuxLinkRtVect = (np.sum(MuxLinkRouteMat,0)>0).astype(float) # sum over MUX_link, 1xR
# get upated active route: Dim: 1 x R
ActiveRtVect = ActiveSgwRtVect * ActiveMuxLinkRtVect # 1xR
# get B(t) as Aeq, DIM: JxR
#bsxfun(@times,ActiveRtVect,self.i_EnbRouteMat) in matlab
self.m_EnbRouteMat = self.i_EnbRouteMat * ActiveRtVect # JxR b* | |
4lT2OMZKpnxc_F1_4yDJFcqb5CiDSmA-psB2k0JtjxAj4UPI61oONK7z
# zFIu4gBfjJCndsZfdvG7h8wGjV98QhrKEnR7xKZ3KCr0_qR1B-gxpNk3
# xWU",
# "tag": "DKW7jrb4WaRSNfbXVPlT5g"
# }
# Figure 149: General JWE JSON Serialization
# Miller Informational [Page 78]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the flattened JWE JSON Serialization:
# {
# "protected": "<KEY>",
# "encrypted_key": "<KEY>",
# "iv": "gz6NjyEFNm_vm8Gj6FwoFQ",
# "ciphertext": "Jf5p9-ZhJlJy_IQ_byKFmI0Ro7w7G1QiaZpI8OaiVgD8E
# qoDZHyFKFBupS8iaEeVIgMqWmsuJKuoVgzR3YfzoMd3GxEm3VxNhzWyW
# tZKX0gxKdy6HgLvqoGNbZCzLjqcpDiF8q2_62EVAbr2uSc2oaxFmFuIQ
# HLcqAHxy51449xkjZ7ewzZaGV3eFqhpco8o4DijXaG5_7kp3h2cajRfD
# gymuxUbWgLqaeNQaJtvJmSMFuEOSAzw9Hdeb6yhdTynCRmu-kqtO5Dec
# 4lT2OMZKpnxc_F1_4yDJFcqb5CiDSmA-psB2k0JtjxAj4UPI61oONK7z
# zFIu4gBfjJCndsZfdvG7h8wGjV98QhrKEnR7xKZ3KCr0_qR1B-gxpNk3
# xWU",
# "tag": "NvBveHr_vonkvflfnUrmBQ"
# }
# Figure 150: Flattened JWE JSON Serialization
# 5.8. Key Wrap Using AES-KeyWrap with AES-GCM
# The following example illustrates content encryption using the
# "A128KW" (AES-128-KeyWrap) key encryption algorithm and the "A128GCM"
# (AES-128-GCM) content encryption algorithm.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.8.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o AES symmetric key; this example uses the key from Figure 151.
# o "alg" parameter of "A128KW".
# o "enc" parameter of "A128GCM".
# Miller Informational [Page 79]
# RFC 7520 JOSE Cookbook May 2015
# {
# "kty": "oct",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "use": "enc",
# "alg": "A128KW",
# "k": "GZy6sIZ6wl9NJOKB-jnmVQ"
# }
# Figure 151: AES 128-Bit Key
# 5.8.2. Generated Factors
# The following are generated before encrypting:
# o AES symmetric key as the Content Encryption Key; this example uses
# the key from Figure 152.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure 153.
# aY5_Ghmk9KxWPBLu_glx1w
# Figure 152: Content Encryption Key, base64url-encoded
# Qx0pmsDa8KnJc9Jo
# Figure 153: Initialization Vector, base64url-encoded
# 5.8.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 152)
# with the AES symmetric key (Figure 151) produces the following
# Encrypted Key:
# CBI6oDw8MydIx1IBntf_lQcw2MmJKIQx
# Figure 154: Encrypted Key, base64url-encoded
# 5.8.4. Encrypting the Content
# The following is generated before encrypting the content:
# o JWE Protected Header; this example uses the header from
# Figure 155, encoded to base64url [RFC4648] as Figure 156.
# Miller Informational [Page 80]
# RFC 7520 JOSE Cookbook May 2015
# {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "enc": "A128GCM"
# }
# Figure 155: JWE Protected Header JSON
# <KEY>
# 04MjE2MGFkOTFhYzgiLCJlbmMiOiJBMTI4R0NNIn0
# Figure 156: JWE Protected Header, base64url-encoded
# Performing the content encryption over the Plaintext (Figure 72) with
# the following:
# o CEK (Figure 152);
# o Initialization Vector (Figure 153); and
# o JWE Protected Header (Figure 156) as authenticated data
# produces the following:
# o Ciphertext from Figure 157.
# o Authentication Tag from Figure 158.
# AwliP-KmWgsZ37BvzCefNen6VTbRK3QMA4TkvRkH0tP1bTdhtFJgJxeVmJkLD6
# 1A1hnWGetdg11c9ADsnWgL56NyxwSYjU1ZEHcGkd3EkU0vjHi9gTlb90qSYFfe
# F0LwkcTtjbYKCsiNJQkcIp1yeM03OmuiYSoYJVSpf7ej6zaYcMv3WwdxDFl8RE
# wOhNImk2Xld2JXq6BR53TSFkyT7PwVLuq-1GwtGHlQeg7gDT6xW0JqHDPn_H-p
# uQsmthc9Zg0ojmJfqqFvETUxLAF-KjcBTS5dNy6egwkYtOt8EIHK-oEsKYtZRa
# a8Z7MOZ7UGxGIMvEmxrGCPeJa14slv2-gaqK0kEThkaSqdYw0FkQZF
# Figure 157: Ciphertext, base64url-encoded
# ER7MWJZ1FBI_NKvn7Zb1Lw
# Figure 158: Authentication Tag, base64url-encoded
# Miller Informational [Page 81]
# RFC 7520 JOSE Cookbook May 2015
# 5.8.5. Output Results
# The following compose the resulting JWE object:
# o JWE Protected Header (Figure 156)
# o Encrypted Key (Figure 154)
# o Initialization Vector (Figure 153)
# o Ciphertext (Figure 157)
# o Authentication Tag (Figure 158)
# The resulting JWE object using the JWE Compact Serialization:
# <KEY>
# <KEY>
# .
# CBI6oDw8MydIx1IBntf_lQcw2MmJKIQx
# .
# Qx0pmsDa8KnJc9Jo
# .
# AwliP-KmWgsZ37BvzCefNen6VTbRK3QMA4TkvRkH0tP1bTdhtFJgJxeVmJkLD6
# 1A1hnWGetdg11c9ADsnWgL56NyxwSYjU1ZEHcGkd3EkU0vjHi9gTlb90qSYFfe
# F0LwkcTtjbYKCsiNJQkcIp1yeM03OmuiYSoYJVSpf7ej6zaYcMv3WwdxDFl8RE
# wOhNImk2Xld2JXq6BR53TSFkyT7PwVLuq-1GwtGHlQeg7gDT6xW0JqHDPn_H-p
# uQsmthc9Zg0ojmJfqqFvETUxLAF-KjcBTS5dNy6egwkYtOt8EIHK-oEsKYtZRa
# a8Z7MOZ7UGxGIMvEmxrGCPeJa14slv2-gaqK0kEThkaSqdYw0FkQZF
# .
# ER7MWJZ1FBI_NKvn7Zb1Lw
# Figure 159: JWE Compact Serialization
# Miller Informational [Page 82]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the general JWE JSON Serialization:
# {
# "recipients": [
# {
# "encrypted_key": "<KEY>"
# }
# ],
# "protected": "<KEY>
# <KEY>
# 0",
# "iv": "Qx0pmsDa8KnJc9Jo",
# "ciphertext": "AwliP-KmWgsZ37BvzCefNen6VTbRK3QMA4TkvRkH0tP1b
# TdhtFJgJxeVmJkLD61A1hnWGetdg11c9ADsnWgL56NyxwSYjU1ZEHcGk
# d3EkU0vjHi9gTlb90qSYFfeF0LwkcTtjbYKCsiNJQkcIp1yeM03OmuiY
# SoYJVSpf7ej6zaYcMv3WwdxDFl8REwOhNImk2Xld2JXq6BR53TSFkyT7
# PwVLuq-1GwtGHlQeg7gDT6xW0JqHDPn_H-puQsmthc9Zg0ojmJfqqFvE
# TUxLAF-KjcBTS5dNy6egwkYtOt8EIHK-oEsKYtZRaa8Z7MOZ7UGxGIMv
# EmxrGCPeJa14slv2-gaqK0kEThkaSqdYw0FkQZF",
# "tag": "ER7MWJZ1FBI_NKvn7Zb1Lw"
# }
# Figure 160: General JWE JSON Serialization
# The resulting JWE object using the flattened JWE JSON Serialization:
# {
# "protected": "<KEY>
# 0",
# "encrypted_key": "<KEY>",
# "iv": "Qx0pmsDa8KnJc9Jo",
# "ciphertext": "AwliP-KmWgsZ37BvzCefNen6VTbRK3QMA4TkvRkH0tP1b
# TdhtFJgJxeVmJkLD61A1hnWGetdg11c9ADsnWgL56NyxwSYjU1ZEHcGk
# d3EkU0vjHi9gTlb90qSYFfeF0LwkcTtjbYKCsiNJQkcIp1yeM03OmuiY
# SoYJVSpf7ej6zaYcMv3WwdxDFl8REwOhNImk2Xld2JXq6BR53TSFkyT7
# PwVLuq-1GwtGHlQeg7gDT6xW0JqHDPn_H-puQsmthc9Zg0ojmJfqqFvE
# TUxLAF-KjcBTS5dNy6egwkYtOt8EIHK-oEsKYtZRaa8Z7MOZ7UGxGIMv
# EmxrGCPeJa14slv2-gaqK0kEThkaSqdYw0FkQZF",
# "tag": "ER7MWJZ1FBI_NKvn7Zb1Lw"
# }
# Figure 161: Flattened JWE JSON Serialization
# Miller Informational [Page 83]
# RFC 7520 JOSE Cookbook May 2015
# 5.9. Compressed Content
# This example illustrates encrypting content that is first compressed.
# It reuses the AES symmetric key, key encryption algorithm, and
# content encryption algorithm from Section 5.8.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.9.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o Recipient encryption key; this example uses the key from
# Figure 151.
# o Key encryption algorithm; this example uses "A128KW".
# o Content encryption algorithm; this example uses "A128GCM".
# o "zip" parameter of "DEF".
# 5.9.2. Generated Factors
# The following are generated before encrypting:
# o Compressed Plaintext from the original Plaintext content;
# compressing Figure 72 using the DEFLATE [RFC1951] algorithm
# produces the compressed Plaintext from Figure 162.
# o AES symmetric key as the Content Encryption Key (CEK); this
# example uses the key from Figure 163.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure 164.
# bY_BDcIwDEVX-QNU3QEOrIA4pqlDokYxchxVvbEDGzIJbioOSJwc-f___HPjBu
# 8KVFpVtAplVE1-wZo0YjNZo3C7R5v72pV5f5X382VWjYQpqZKAyjziZOr2B7kQ
# PSy6oZIXUnDYbVKN4jNXi2u0yB7t1qSHTjmMODf9QgvrDzfTIQXnyQRuUya4zI
# WG3vTOdir0v7BRHFYWq3k1k1A_gSDJqtcBF-GZxw8
# Figure 162: Compressed Plaintext, base64url-encoded
# Miller Informational [Page 84]
# RFC 7520 JOSE Cookbook May 2015
# hC-MpLZSuwWv8sexS6ydfw
# Figure 163: Content Encryption Key, base64url-encoded
# p9pUq6XHY0jfEZIl
# Figure 164: Initialization Vector, base64url-encoded
# 5.9.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 163)
# with the AES symmetric key (Figure 151) produces the following
# Encrypted Key:
# <KEY>
# Figure 165: Encrypted Key, base64url-encoded
# 5.9.4. Encrypting the Content
# The following is generated before encrypting the content:
# o JWE Protected Header; this example uses the header from
# Figure 166, encoded to base64url [RFC4648] as Figure 167.
# {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "enc": "A128GCM",
# "zip": "DEF"
# }
# Figure 166: JWE Protected Header JSON
# <KEY>OC
# 04MjE2MGFkOTFhYzgiLCJlbmMiOiJBMTI4R0NNIiwiemlwIjoiREVGIn0
# Figure 167: JWE Protected Header, base64url-encoded
# Miller Informational [Page 85]
# RFC 7520 JOSE Cookbook May 2015
# Performing the content encryption operation over the compressed
# Plaintext (Figure 162, encoded as an octet string) with the
# following:
# o CEK (Figure 163);
# o Initialization Vector (Figure 164); and
# o JWE Protected Header (Figure 167) as authenticated data
# produces the following:
# o Ciphertext from Figure 168.
# o Authentication Tag from Figure 169.
# HbDtOsdai1oYziSx25KEeTxmwnh8L8jKMFNc1k3zmMI6VB8hry57tDZ61jXyez
# SPt0fdLVfe6Jf5y5-JaCap_JQBcb5opbmT60uWGml8blyiMQmOn9J--XhhlYg0
# m-BHaqfDO5iTOWxPxFMUedx7WCy8mxgDHj0aBMG6152PsM-w5E_o2B3jDbrYBK
# hpYA7qi3AyijnCJ7BP9rr3U8kxExCpG3mK420TjOw
# Figure 168: Ciphertext, base64url-encoded
# VILuUwuIxaLVmh5X-T7kmA
# Figure 169: Authentication Tag, base64url-encoded
# 5.9.5. Output Results
# The following compose the resulting JWE object:
# o JWE Protected Header (Figure 167)
# o Encrypted Key (Figure 165)
# o Initialization Vector (Figure 164)
# o Ciphertext (Figure 168)
# o Authentication Tag (Figure 169)
# Miller Informational [Page 86]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the JWE Compact Serialization:
# <KEY>
# <KEY>
# .
# <KEY>
# .
# p9pUq6XHY0jfEZIl
# .
# HbDtOsdai1oYziSx25KEeTxmwnh8L8jKMFNc1k3zmMI6VB8hry57tDZ61jXyez
# SPt0fdLVfe6Jf5y5-JaCap_JQBcb5opbmT60uWGml8blyiMQmOn9J--XhhlYg0
# m-BHaqfDO5iTOWxPxFMUedx7WCy8mxgDHj0aBMG6152PsM-w5E_o2B3jDbrYBK
# hpYA7qi3AyijnCJ7BP9rr3U8kxExCpG3mK420TjOw
# .
# VILuUwuIxaLVmh5X-T7kmA
# Figure 170: JWE Compact Serialization
# The resulting JWE object using the general JWE JSON Serialization:
# {
# "recipients": [
# {
# "encrypted_key": "<KEY>"
# }
# ],
# "protected": "<KEY> <KEY>",
# "iv": "p9pUq6XHY0jfEZIl",
# "ciphertext": "HbDtOsdai1oYziSx25KEeTxmwnh8L8jKMFNc1k3zmMI6V
# B8hry57tDZ61jXyezSPt0fdLVfe6Jf5y5-JaCap_JQBcb5opbmT60uWG
# ml8blyiMQmOn9J--XhhlYg0m-BHaqfDO5iTOWxPxFMUedx7WCy8mxgDH
# j0aBMG6152PsM-w5E_o2B3jDbrYBKhpYA7qi3AyijnCJ7BP9rr3U8kxE
# xCpG3mK420TjOw",
# "tag": "VILuUwuIxaLVmh5X-T7kmA"
# }
# Figure 171: General JWE JSON Serialization
# Miller Informational [Page 87]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the flattened JWE JSON Serialization:
# {
# "protected": "<KEY>
# <KEY>
# wiemlwIjoiREVGIn0",
# "encrypted_key": "<KEY>",
# "iv": "p9pUq6XHY0jfEZIl",
# "ciphertext": "HbDtOsdai1oYziSx25KEeTxmwnh8L8jKMFNc1k3zmMI6V
# B8hry57tDZ61jXyezSPt0fdLVfe6Jf5y5-JaCap_JQBcb5opbmT60uWG
# ml8blyiMQmOn9J--XhhlYg0m-BHaqfDO5iTOWxPxFMUedx7WCy8mxgDH
# j0aBMG6152PsM-w5E_o2B3jDbrYBKhpYA7qi3AyijnCJ7BP9rr3U8kxE
# xCpG3mK420TjOw",
# "tag": "VILuUwuIxaLVmh5X-T7kmA"
# }
# Figure 172: Flattened JWE JSON Serialization
# 5.10. Including Additional Authenticated Data
# This example illustrates encrypting content that includes additional
# authenticated data. As this example includes an additional top-level
# property not present in the JWE Compact Serialization, only the
# flattened JWE JSON Serialization and general JWE JSON Serialization
# are possible.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.10.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o Recipient encryption key; this example uses the key from
# Figure 151.
# o Key encryption algorithm; this example uses "A128KW".
# o Content encryption algorithm; this example uses "A128GCM".
# o Additional Authenticated Data; this example uses a vCard [RFC7095]
# from Figure 173, serialized to UTF-8.
# Miller Informational [Page 88]
# RFC 7520 JOSE Cookbook May 2015
# [
# "vcard",
# [
# [ "version", {}, "text", "4.0" ],
# [ "fn", {}, "text", "<NAME>" ],
# [ "n", {},
# "text", [
# "Brandybuck", "Meriadoc", "Mr.", ""
# ]
# ],
# [ "bday", {}, "text", "TA 2982" ],
# [ "gender", {}, "text", "M" ]
# ]
# ]
# Figure 173: Additional Authenticated Data, in JSON Format
# NOTE: Whitespace between JSON values was added for readability.
# 5.10.2. Generated Factors
# The following are generated before encrypting:
# o AES symmetric key as the Content Encryption Key (CEK); this
# example uses the key from Figure 174.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure | |
import numpy as np
import math
import time
class PulsedProgramming:
"""
This class contains all the parameters for the Pulsed programming on a memristor model.
After initializing the parameters values, start the simulation with self.simulate()
Parameters
----------
max_voltage : float
The max voltage (V) of a pulse. If 0, no limit is apply.
pulse_algorithm : string
The pulse algorithm use. Those are the available choices (Sources in the methods). Default is 'fabien'.
'fabien' : Use fabien_convergence()
'log' : Use a log_convergence()
tolerance : float
The tolerance_value input is an int that represent the absolute tolerance (Ohm) from the res_states the
pulsed programming will find. Smaller is more precise, but too small can never converge.
is_relative_tolerance : bool
If true, the tolerance_value would be in percentage instead of (Ohm). ex: 10 : if true, 10% : if false, 10 Ohm
variability_write : iterable[float]
A gaussian distribution with (mu=0, sigma=variance_write)
index_variability : int
Index of the current variability. If over 1000, reset to 0.
variance_write : float
Variance of the gaussian distribution on the memristor write. See variability.
graph_resistance : List[Union[float, int]]
Contains all resistance of the simulation. It's used in the creation of plots.
graph_voltages : List[Union[float, int]]
Contains all voltages of the simulation. It's used in the creation of plots.
number_of_reading : int
The number of correct value read before passing to the next state.
max_pulse : int
The max number of pulses.
"""
def __init__(self, memristor_simulation, pulse_algorithm='fabien', max_voltage=0, tolerance=0, is_relative_tolerance=False,
variance_write=0, number_of_reading=1, max_pulse=20000, verbose=False, plot_memristor=0):
self.memristor_simulation = memristor_simulation
self.pulse_algorithm = pulse_algorithm
self.tolerance = tolerance
self.max_voltage = max_voltage
self.is_relative_tolerance = is_relative_tolerance
self.variance_write = variance_write
self.number_of_reading = number_of_reading
self.max_pulse = max_pulse
self.verbose = verbose
self.voltage_output = {}
self.plot_memristor = plot_memristor
self.index_variability = 0
self.variability_write = np.random.normal(0, variance_write, 1000)
self.graph_resistance = []
self.graph_voltages = []
def print(self):
print(self.pulse_algorithm)
print(self.tolerance)
print(self.max_voltage)
print(self.voltage_output)
print(self.is_relative_tolerance)
print(self.variance_write)
print(self.number_of_reading)
print(self.max_pulse)
print(self.verbose)
print(np.array(self.graph_resistance))
print(np.array(self.graph_voltages))
def write_resistance(self, memristor, voltage, t_pulse):
"""
This function change the resistance of the memristor by applying a voltage fo t_pulse.
Parameters
----------
memristor : Memristor
The memristor wrote.
voltage : float
The voltage (V) applied.
t_pulse : float
The time of the writing pulse. (s)
Returns
----------
"""
t = int(t_pulse / memristor.time_series_resolution)
signal = [voltage] * t
memristor.simulate(signal)
self.index_variability = self.index_variability + 1 if self.index_variability < len(self.variability_write) - 1 else 0
memristor.g = 1 / (1 / memristor.g + (1 / memristor.g) * self.variability_write[self.index_variability])
def find_number_iteration(self):
"""
This function find the number of iteration needed to create the resistance list depending on the distribution type
Returns
----------
number_iteration : int
number of iteration
"""
number_iteration = 1
if self.distribution_type == 'full_spread':
number_iteration = self.circuit.number_of_memristor
return number_iteration
def simulate(self, voltages_target, precision=None):
"""
This function will set the memristors to the resistance wanted in each voltages_target package.
Parameters
----------
voltages_target : dict
dict with keys as voltage and package as list of resistance
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
if self.pulse_algorithm != 'fabien' and self.pulse_algorithm != 'log':
raise(Exception(f'Pulse algorithm not supported: {self.pulse_algorithm}'))
# voltages_target_list = list(voltages_target.keys())
# resolution = voltages_target_list[1] - voltages_target_list[0]
index = 1
conf_done = 0
start_time = time.time()
diff_voltage = {}
for v in list(voltages_target.keys()):
if index == 1:
start_time_ = time.time()
self.simulate_list_memristor(voltages_target[v], precision)
self.voltage_output[self.memristor_simulation.circuit.current_v_out()] = [i.read() for i in self.memristor_simulation.circuit.list_memristor]
diff_voltage[abs(v - self.memristor_simulation.circuit.current_v_out())] = [round(1 / np.sum([1/res for res in voltages_target[v]]), 4), round(1 / self.memristor_simulation.circuit.current_conductance(), 4)]
if index == 50 and self.verbose:
conf_done += index
print(f'Conf done: {conf_done}\tTook: {round(time.time() - start_time_, 2)} s\tTime left: {round((time.time() - start_time_) * (len(voltages_target.keys()) - conf_done) / 50, 2)} s')
index = 0
index += 1
if self.verbose:
print(f'Total time: {time.time() - start_time}')
print()
for key in diff_voltage.keys():
print(f'{round(key*1000, 4)} mV\t{diff_voltage.get(key)[0]}\t{diff_voltage.get(key)[1]} (Ohm)')
print(f'Mean diff: {np.mean(list(diff_voltage.keys()))}')
print(f'Min diff: {np.min(list(diff_voltage.keys()))}\tMax diff: {np.max(list(diff_voltage.keys()))}')
return self.voltage_output
def simulate_list_memristor(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if i == self.plot_memristor else False
if self.pulse_algorithm == 'fabien':
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
elif self.pulse_algorithm == 'log':
self.log_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
self.balance(list_resistance, precision)
def balance(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method. If 0,
won't do it.
"""
final_g = np.sum([1 / i for i in list_resistance])
delta_g = final_g - self.memristor_simulation.circuit.current_conductance()
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if -(i+1) == self.plot_memristor else False
final_res = 1 / (self.memristor_simulation.circuit.list_memristor[-(i+1)].g + delta_g)
if self.memristor_simulation.circuit.memristor_model.r_on <= final_res <= self.memristor_simulation.circuit.memristor_model.r_off:
p_tolerance, p_relative = self.tolerance, self.is_relative_tolerance
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[0][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[0][0], precision[0][1]
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[1][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[1][0], precision[1][1]
self.small_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
self.tolerance, self.is_relative_tolerance = p_tolerance, p_relative
break
def small_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor with a
really small increment.
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.001
positive_voltage = voltage_set = 0.1
negative_voltage = voltage_reset = -0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print(f'Got max pulse {self.max_pulse}')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
def log_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://arxiv.org/abs/2103.09931
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
# additional parameters
min_shift = 0.005
max_shift = 0.2
a = 0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
r_shift = 1
current_res = memristor.read()
while not flag_finish:
if res_min < current_res < res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res > res_max:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
positive_voltage += a * np.log10(abs(target_res - current_res) / r_shift)
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
positive_voltage = voltage_set
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
elif current_res < res_min:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
negative_voltage -= a * np.log10(abs((target_res - current_res) / r_shift))
| |
<gh_stars>1-10
"""Support to send and receive Telegram messages."""
from functools import partial
import importlib
import io
from ipaddress import ip_network
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from telegram import (
Bot,
InlineKeyboardButton,
InlineKeyboardMarkup,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
)
from telegram.error import TelegramError
from telegram.parsemode import ParseMode
from telegram.utils.request import Request
import voluptuous as vol
from homeassistant.const import (
ATTR_COMMAND,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_PLATFORM,
CONF_URL,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_DATA = "data"
ATTR_MESSAGE = "message"
ATTR_TITLE = "title"
ATTR_ARGS = "args"
ATTR_AUTHENTICATION = "authentication"
ATTR_CALLBACK_QUERY = "callback_query"
ATTR_CALLBACK_QUERY_ID = "callback_query_id"
ATTR_CAPTION = "caption"
ATTR_CHAT_ID = "chat_id"
ATTR_CHAT_INSTANCE = "chat_instance"
ATTR_DISABLE_NOTIF = "disable_notification"
ATTR_DISABLE_WEB_PREV = "disable_web_page_preview"
ATTR_EDITED_MSG = "edited_message"
ATTR_FILE = "file"
ATTR_FROM_FIRST = "from_first"
ATTR_FROM_LAST = "from_last"
ATTR_KEYBOARD = "keyboard"
ATTR_KEYBOARD_INLINE = "inline_keyboard"
ATTR_MESSAGEID = "message_id"
ATTR_MSG = "message"
ATTR_MSGID = "id"
ATTR_PARSER = "parse_mode"
ATTR_PASSWORD = "password"
ATTR_REPLY_TO_MSGID = "reply_to_message_id"
ATTR_REPLYMARKUP = "reply_markup"
ATTR_SHOW_ALERT = "show_alert"
ATTR_TARGET = "target"
ATTR_TEXT = "text"
ATTR_URL = "url"
ATTR_USER_ID = "user_id"
ATTR_USERNAME = "username"
ATTR_VERIFY_SSL = "verify_ssl"
ATTR_TIMEOUT = "timeout"
ATTR_MESSAGE_TAG = "message_tag"
CONF_ALLOWED_CHAT_IDS = "allowed_chat_ids"
CONF_PROXY_URL = "proxy_url"
CONF_PROXY_PARAMS = "proxy_params"
CONF_TRUSTED_NETWORKS = "trusted_networks"
DOMAIN = "telegram_bot"
SERVICE_SEND_MESSAGE = "send_message"
SERVICE_SEND_PHOTO = "send_photo"
SERVICE_SEND_STICKER = "send_sticker"
SERVICE_SEND_ANIMATION = "send_animation"
SERVICE_SEND_VIDEO = "send_video"
SERVICE_SEND_VOICE = "send_voice"
SERVICE_SEND_DOCUMENT = "send_document"
SERVICE_SEND_LOCATION = "send_location"
SERVICE_EDIT_MESSAGE = "edit_message"
SERVICE_EDIT_CAPTION = "edit_caption"
SERVICE_EDIT_REPLYMARKUP = "edit_replymarkup"
SERVICE_ANSWER_CALLBACK_QUERY = "answer_callback_query"
SERVICE_DELETE_MESSAGE = "delete_message"
SERVICE_LEAVE_CHAT = "leave_chat"
EVENT_TELEGRAM_CALLBACK = "telegram_callback"
EVENT_TELEGRAM_COMMAND = "telegram_command"
EVENT_TELEGRAM_TEXT = "telegram_text"
EVENT_TELEGRAM_SENT = "telegram_sent"
PARSER_HTML = "html"
PARSER_MD = "markdown"
DEFAULT_TRUSTED_NETWORKS = [ip_network("192.168.3.11/20"), ip_network("172.16.17.32/22")]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_PLATFORM): vol.In(
("broadcast", "polling", "webhooks")
),
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ALLOWED_CHAT_IDS): vol.All(
cv.ensure_list, [vol.Coerce(int)]
),
vol.Optional(ATTR_PARSER, default=PARSER_MD): cv.string,
vol.Optional(CONF_PROXY_URL): cv.string,
vol.Optional(CONF_PROXY_PARAMS): dict,
# webhooks
vol.Optional(CONF_URL): cv.url,
vol.Optional(
CONF_TRUSTED_NETWORKS, default=DEFAULT_TRUSTED_NETWORKS
): vol.All(cv.ensure_list, [ip_network]),
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
BASE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER): cv.string,
vol.Optional(ATTR_DISABLE_NOTIF): cv.boolean,
vol.Optional(ATTR_DISABLE_WEB_PREV): cv.boolean,
vol.Optional(ATTR_KEYBOARD): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
vol.Optional(ATTR_TIMEOUT): cv.positive_int,
vol.Optional(ATTR_MESSAGE_TAG): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SEND_MESSAGE = BASE_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_MESSAGE): cv.template, vol.Optional(ATTR_TITLE): cv.template}
)
SERVICE_SCHEMA_SEND_FILE = BASE_SERVICE_SCHEMA.extend(
{
vol.Optional(ATTR_URL): cv.template,
vol.Optional(ATTR_FILE): cv.template,
vol.Optional(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
vol.Optional(ATTR_AUTHENTICATION): cv.string,
vol.Optional(ATTR_VERIFY_SSL): cv.boolean,
}
)
SERVICE_SCHEMA_SEND_LOCATION = BASE_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_LONGITUDE): cv.template,
vol.Required(ATTR_LATITUDE): cv.template,
}
)
SERVICE_SCHEMA_EDIT_MESSAGE = SERVICE_SCHEMA_SEND_MESSAGE.extend(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
}
)
SERVICE_SCHEMA_EDIT_CAPTION = vol.Schema(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_EDIT_REPLYMARKUP = vol.Schema(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_KEYBOARD_INLINE): cv.ensure_list,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Required(ATTR_CALLBACK_QUERY_ID): vol.Coerce(int),
vol.Optional(ATTR_SHOW_ALERT): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_DELETE_MESSAGE = vol.Schema(
{
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_LEAVE_CHAT = vol.Schema({vol.Required(ATTR_CHAT_ID): vol.Coerce(int)})
SERVICE_MAP = {
SERVICE_SEND_MESSAGE: SERVICE_SCHEMA_SEND_MESSAGE,
SERVICE_SEND_PHOTO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_STICKER: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_ANIMATION: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_VIDEO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_VOICE: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_DOCUMENT: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_LOCATION: SERVICE_SCHEMA_SEND_LOCATION,
SERVICE_EDIT_MESSAGE: SERVICE_SCHEMA_EDIT_MESSAGE,
SERVICE_EDIT_CAPTION: SERVICE_SCHEMA_EDIT_CAPTION,
SERVICE_EDIT_REPLYMARKUP: SERVICE_SCHEMA_EDIT_REPLYMARKUP,
SERVICE_ANSWER_CALLBACK_QUERY: SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY,
SERVICE_DELETE_MESSAGE: SERVICE_SCHEMA_DELETE_MESSAGE,
SERVICE_LEAVE_CHAT: SERVICE_SCHEMA_LEAVE_CHAT,
}
def load_data(
hass,
url=None,
filepath=None,
username=None,
password=<PASSWORD>,
authentication=None,
num_retries=5,
verify_ssl=None,
):
"""Load data into ByteIO/File container from a source."""
try:
if url is not None:
# Load data from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
if verify_ssl is not None:
params["verify"] = verify_ssl
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning(
"Status code %s (retry #%s) loading %s",
req.status_code,
retry_num + 1,
url,
)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s)", retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load data in %s after %s retries", url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load data. No data found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load data into ByteIO: %s", error)
return None
async def async_setup(hass, config):
"""Set up the Telegram bot component."""
if not config[DOMAIN]:
return False
for p_config in config[DOMAIN]:
p_type = p_config.get(CONF_PLATFORM)
platform = importlib.import_module(
".{}".format(p_config[CONF_PLATFORM]), __name__
)
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
receiver_service = await platform.async_setup_platform(hass, p_config)
if receiver_service is False:
_LOGGER.error("Failed to initialize Telegram bot %s", p_type)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return False
bot = initialize_bot(p_config)
notify_service = TelegramNotificationService(
hass, bot, p_config.get(CONF_ALLOWED_CHAT_IDS), p_config.get(ATTR_PARSER)
)
async def async_send_telegram_message(service):
"""Handle sending Telegram Bot message service calls."""
def _render_template_attr(data, attribute):
attribute_templ = data.get(attribute)
if attribute_templ:
if any(
isinstance(attribute_templ, vtype) for vtype in [float, int, str]
):
data[attribute] = attribute_templ
else:
attribute_templ.hass = hass
try:
data[attribute] = attribute_templ.async_render(
parse_result=False
)
except TemplateError as exc:
_LOGGER.error(
"TemplateError in %s: %s -> %s",
attribute,
attribute_templ.template,
exc,
)
data[attribute] = attribute_templ.template
msgtype = service.service
kwargs = dict(service.data)
for attribute in [
ATTR_MESSAGE,
ATTR_TITLE,
ATTR_URL,
ATTR_FILE,
ATTR_CAPTION,
ATTR_LONGITUDE,
ATTR_LATITUDE,
]:
_render_template_attr(kwargs, attribute)
_LOGGER.debug("New telegram message %s: %s", msgtype, kwargs)
if msgtype == SERVICE_SEND_MESSAGE:
await hass.async_add_executor_job(
partial(notify_service.send_message, **kwargs)
)
elif msgtype in [
SERVICE_SEND_PHOTO,
SERVICE_SEND_STICKER,
SERVICE_SEND_ANIMATION,
SERVICE_SEND_VIDEO,
SERVICE_SEND_VOICE,
SERVICE_SEND_DOCUMENT,
]:
await hass.async_add_executor_job(
partial(notify_service.send_file, msgtype, **kwargs)
)
elif msgtype == SERVICE_SEND_LOCATION:
await hass.async_add_executor_job(
partial(notify_service.send_location, **kwargs)
)
elif msgtype == SERVICE_ANSWER_CALLBACK_QUERY:
await hass.async_add_executor_job(
partial(notify_service.answer_callback_query, **kwargs)
)
elif msgtype == SERVICE_DELETE_MESSAGE:
await hass.async_add_executor_job(
partial(notify_service.delete_message, **kwargs)
)
else:
await hass.async_add_executor_job(
partial(notify_service.edit_message, msgtype, **kwargs)
)
# Register notification services
for service_notif, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service_notif, async_send_telegram_message, schema=schema
)
return True
def initialize_bot(p_config):
"""Initialize telegram bot with proxy support."""
api_key = p_config.get(CONF_API_KEY)
proxy_url = p_config.get(CONF_PROXY_URL)
proxy_params = p_config.get(CONF_PROXY_PARAMS)
if proxy_url is not None:
request = Request(
con_pool_size=8, proxy_url=proxy_url, urllib3_proxy_kwargs=proxy_params
)
else:
request = Request(con_pool_size=8)
return Bot(token=api_key, request=request)
class TelegramNotificationService:
"""Implement the notification services for the Telegram Bot domain."""
def __init__(self, hass, bot, allowed_chat_ids, parser):
"""Initialize the service."""
self.allowed_chat_ids = allowed_chat_ids
self._default_user = self.allowed_chat_ids[0]
self._last_message_id = {user: None for user in self.allowed_chat_ids}
self._parsers = {PARSER_HTML: ParseMode.HTML, PARSER_MD: ParseMode.MARKDOWN}
self._parse_mode = self._parsers.get(parser)
self.bot = bot
self.hass = hass
def _get_msg_ids(self, msg_data, chat_id):
"""Get the message id to edit.
This can be one of (message_id, inline_message_id) from a msg dict,
returning a tuple.
**You can use 'last' as message_id** to edit
the message last sent in the chat_id.
"""
message_id = inline_message_id = None
if ATTR_MESSAGEID in msg_data:
message_id = msg_data[ATTR_MESSAGEID]
if (
isinstance(message_id, str)
and (message_id == "last")
and (self._last_message_id[chat_id] is not None)
):
message_id = self._last_message_id[chat_id]
else:
inline_message_id = msg_data["inline_message_id"]
return message_id, inline_message_id
def _get_target_chat_ids(self, target):
"""Validate chat_id targets or return default target (first).
:param target: optional list of integers ([12234, -12345])
:return list of chat_id targets (integers)
"""
if target is not None:
if isinstance(target, int):
target = [target]
chat_ids = [t for t in target if t in self.allowed_chat_ids]
if chat_ids:
return chat_ids
_LOGGER.warning(
"Disallowed targets: %s, using default: %s", target, self._default_user
)
return [self._default_user]
def _get_msg_kwargs(self, data):
"""Get parameters in message data kwargs."""
def _make_row_inline_keyboard(row_keyboard):
"""Make a list of InlineKeyboardButtons.
It can accept:
- a list of tuples like:
`[(text_b1, data_callback_b1),
(text_b2, data_callback_b2), ...]
- a string like: `/cmd1, /cmd2, /cmd3`
- or a string like: `text_b1:/cmd1, text_b2:/cmd2`
"""
buttons = []
if isinstance(row_keyboard, str):
for key in row_keyboard.split(","):
if ":/" in key:
# commands like: 'Label:/cmd' become ('Label', '/cmd')
label = key.split(":/")[0]
command = key[len(label) + 1 :]
buttons.append(
InlineKeyboardButton(label, callback_data=command)
)
else:
# commands like: '/cmd' become ('CMD', '/cmd')
label = key.strip()[1:].upper()
buttons.append(InlineKeyboardButton(label, callback_data=key))
elif isinstance(row_keyboard, list):
for entry in row_keyboard:
text_btn, data_btn = entry
buttons.append(
InlineKeyboardButton(text_btn, callback_data=data_btn)
)
else:
raise ValueError(str(row_keyboard))
return buttons
# Defaults
params = {
ATTR_PARSER: self._parse_mode,
ATTR_DISABLE_NOTIF: False,
ATTR_DISABLE_WEB_PREV: None,
ATTR_REPLY_TO_MSGID: None,
ATTR_REPLYMARKUP: None,
ATTR_TIMEOUT: None,
ATTR_MESSAGE_TAG: None,
}
if data is not None:
if ATTR_PARSER in data:
params[ATTR_PARSER] = self._parsers.get(
data[ATTR_PARSER], self._parse_mode
)
if ATTR_TIMEOUT in data:
params[ATTR_TIMEOUT] = data[ATTR_TIMEOUT]
if ATTR_DISABLE_NOTIF in data:
params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF]
if ATTR_DISABLE_WEB_PREV in data:
params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV]
if ATTR_REPLY_TO_MSGID in data:
params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID]
if ATTR_MESSAGE_TAG in data:
params[ATTR_MESSAGE_TAG] = data[ATTR_MESSAGE_TAG]
# Keyboards:
if ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
if keys:
params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup(
[[key.strip() for key in row.split(",")] for row in keys]
)
else:
params[ATTR_REPLYMARKUP] = ReplyKeyboardRemove(True)
elif ATTR_KEYBOARD_INLINE in data:
keys = data.get(ATTR_KEYBOARD_INLINE)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup(
[_make_row_inline_keyboard(row) for row in keys]
)
return params
def _send_msg(self, func_send, msg_error, message_tag, *args_msg, **kwargs_msg):
"""Send one message."""
try:
out = func_send(*args_msg, **kwargs_msg)
if not isinstance(out, bool) and hasattr(out, ATTR_MESSAGEID):
chat_id = out.chat_id
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 23:08:38 2021
@author: zengke
"""
from sklearn.base import BaseEstimator,TransformerMixin
import numpy as np
import pandas as pd
import warnings
import re
from BDMLtools.base import Base
from BDMLtools.selector.bin_fun import binFreq,binPretty,binTree,binChi2,binKmeans
from BDMLtools.report.report import varGroupsReport,varReportSinge
import matplotlib.pyplot as plt
from pandas.api.types import is_string_dtype,is_numeric_dtype
from BDMLtools.plotter.base import BaseWoePlotter
#from joblib import Parallel,delayed
#from pandas.api.types import is_numeric_dtype
class binSelector(Base,BaseEstimator,TransformerMixin):
"""
自动分箱
本模块提供自动分箱方法包括等频、kmeans,pretty,决策树、卡方等
由于numpy部分计算函数的结果的最低精度类型为float64,
因此数值类数据的精度类型最好为float64,不支持inf,若为float32则为近似结果且可能会有精度问题
此外字符类数据的levels不要为'','missing','special',None等特殊字符
Parameters:
----------
method:str,分箱方法
+ ‘freq’:数值等频分箱,分类特征按其类别分箱
+ ‘freq-kmeans’:基于Kmeans,对freq-cut结果进行自动调整,以将badrate近似的箱进行合并
+ 'pretty':使用Pretty Breakpoints获取数值特征分箱点
+ pretty分箱点更加美观,适合报告、绘图
+ 详见R的pretty函数
+ 'tree':决策树,递归分裂iv/ks增益最高的切分点形成新分箱直到达到终止条件
+ 'chi2':卡方,先等频预分箱,再递归合并低于卡方值(交叉表卡方检验的差异不显著)的分箱
max_bin:int,预分箱数,越多的预分箱数越有可能得到越好的分箱点,但会增加计算量,不适用于method=‘freq’
+ method=‘pretty’时代表pretty预分箱数
+ method=‘freq-kmeans’时代表freq预分箱数
+ method='tree'时,代表pretty预分箱数
+ method='chi2'时,代表pretty预分箱数
distr_limit,最终箱样本占比限制,不适用于method=‘freq’
+ method='pretty'时,箱最终箱样本占比限制,
+ method='freq-kmeans'时,箱最终箱样本占比限制
+ method='tree'时,箱最终箱样本占比限制
+ method='chi2':时,箱最终箱样本占比限制
bin_num_limit,
+ method=‘freq’时代表等频分箱数
+ method='freq-kmeans'时,合并分箱最低限制,bin_num_limit<max_bin时才有效果
+ method='pretty'时,代表分箱数限制
+ method='tree'时,代表分箱数限制,实际分箱数将小于等于改值
+ method='chi2':卡方,代表分箱数限制,实际分箱数将小于等于改值
coerce_monotonic=False,是否强制数值特征的bad_prob单调,默认否
强制bad_prob单调适用于所有本模块所支持的分箱算法
若分箱后的x与y本身有单调关系,则强制单调能够取得理想的结果,若分箱后x的woe与y无关系非单调相关,则强制单调效果将不佳
+ method='freq'时,将先强制freq cut单调,此时分箱结果将可能低于bin_num_limit,分箱占比也将发生变化
+ method='freq-kmeans'时,将先强制freq cut单调,在适用keamns算法进行合并
+ method='pretty'时,将强制pretty cut的预分箱单调,再根据条件合并分箱
+ method='tree'时,最优分割过程中加入单调性限制,强制每一个新的加入的分割点都必须先使bad_rate单调
+ method='chi2':先在预分箱中强制单调,再进行卡方分箱以保证卡方分箱单调
sample_weight=None,样本权重,非0
+ 若数据进行过抽样,则可设定sample_weight
+ 其将会影响最终vtable的每一箱的count,bad,good,bad_prob,iv,ks等,
+ 若只对好坏样本进行加权则只会影响bad_prob
+ 当method in ('tree','chi2')时若sample_weight非空,则算法会计算加权后的iv_gain,ks_gain或卡方值
special_values,特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定
+ None,保证数据默认
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
iv_limit=0.02:float,IV阈值,IV低于该阈值特征将被剔除
keep=None,list or None,保留列的列名list,其将保留于self.keep_col中但不会产生特征分析报告,通过transform筛选后的数据将保留这些特征
n_jobs:int,列并行计算job数,默认-1,并行在数据量较大,特征较多时能够提升效率,但会增加内存消耗
verbose:int,并行计算信息输出等级
Attribute:
----------
keep_col:list经分箱、iv筛选后还存在的列的列名list,
breaks_list:dict,经指定方法分箱后产生的分箱点list
bins:dict,经指定方法分箱后产生的特征分析报告
iv_info:pd.Series,分箱后各个特征的iv
ks_info:pd.Series,分箱后各个特征的ks
"""
def __init__(self,method='freq',max_bin=50,distr_limit=0.05,bin_num_limit=8,special_values=None,
iv_limit=0.02,keep=None,sample_weight=None,coerce_monotonic=False,b_dtype='float64',n_jobs=-1,verbose=0):
self.method=method
self.max_bin=max_bin
self.distr_limit=distr_limit
self.bin_num_limit=bin_num_limit
self.iv_limit=iv_limit
self.keep=keep
self.special_values=special_values
self.coerce_monotonic=coerce_monotonic
self.b_dtype=b_dtype
self.sample_weight=sample_weight
self.n_jobs=n_jobs
self.verbose=verbose
self._is_fitted=False
def transform(self,X,y=None):
self._check_is_fitted()
self._check_X(X)
return X[self.keep_col]
def fit(self,X,y):
"""
"""
self._check_data(X, y)
self._check_colname(X)
if y.name:
self.target=y.name
else:
raise ValueError('name y using pd.Series(y,name=yname)')
if self.method == 'freq':
#using freq cut
self.breaks_list,bin_res=binFreq(X,y,
bin_num_limit=self.bin_num_limit,
special_values=self.special_values,
ws=self.sample_weight,
coerce_monotonic=self.coerce_monotonic
)
elif self.method == 'freq-kmeans':
#using freq-kmeans to combine bins with similar badprob after freq cut
breaks_list_freq,_=binFreq(X,y,
bin_num_limit=self.max_bin,
special_values=self.special_values,
ws=self.sample_weight,
coerce_monotonic=self.coerce_monotonic
)
res_Kmeans=binKmeans(breaks_list=breaks_list_freq,
combine_ratio=0.1,
bin_limit=self.bin_num_limit,
seed=123,
sample_weight=self.sample_weight,
special_values=self.special_values,
n_jobs=self.n_jobs,
verbose=self.verbose).fit(X,y)
self.breaks_list=res_Kmeans.breaks_list
bin_res=res_Kmeans.bins
elif self.method == 'pretty':
#using pretty-cuts
res_pretty=binPretty(max_bin=self.max_bin,distr_limit=self.distr_limit,bin_num_limit=self.bin_num_limit,
coerce_monotonic=self.coerce_monotonic,ws=self.sample_weight,
special_values=self.special_values,n_jobs=self.n_jobs,verbose=self.verbose).fit(X,y)
self.breaks_list=res_pretty.breaks_list
bin_res=res_pretty.bins
elif self.method == 'tree':
#using treecut
res_tree=binTree(max_bin=self.max_bin,criteria='iv',distr_limit=self.distr_limit,
bin_num_limit=self.bin_num_limit,ws=self.sample_weight,
coerce_monotonic=self.coerce_monotonic,
special_values=self.special_values,n_jobs=self.n_jobs,
verbose=self.verbose).fit(X,y)
self.breaks_list=res_tree.breaks_list
bin_res=res_tree.bins
elif self.method == 'chi2':
#using chi2merge
res_chi2=binChi2(max_bin=self.max_bin,tol=0.1,distr_limit=self.distr_limit,bin_num_limit=self.bin_num_limit,
coerce_monotonic=self.coerce_monotonic,ws=self.sample_weight,
special_values=self.special_values,n_jobs=self.n_jobs,verbose=self.verbose).fit(X,y)
self.breaks_list=res_chi2.breaks_list
bin_res=res_chi2.bins
else:
raise ValueError("method in ('freq','pretty','pretty-kmeans','chi2','tree')")
#get iv and ks
optbindf_ks=pd.concat(bin_res.values())
self.iv_info=optbindf_ks.groupby('variable')['bin_iv'].sum().rename('total_iv')
self.ks_info=optbindf_ks.groupby('variable')['ks'].max().rename('ks_max')
#fliter by iv
self.keep_col=self.iv_info[self.iv_info>=self.iv_limit].index.tolist()
if not self.keep_col:
warnings.warn('iv_limit too high to keep any variables,reset iv_limit')
#keep user-defined columns
if self.keep:
if not np.isin(self.keep,X.columns.tolist()).all():
raise ValueError("keep columns not in X")
self.keep_col=list(set(self.keep_col+self.keep))
#keep bin info and breaks info for checking and rebinning
self.bins={column:bin_res.get(column) for column in self.keep_col}
self.breaks_list={column:self.breaks_list.get(column) for column in self.keep_col}
self._is_fitted=True
return self
class binAdjuster(Base,BaseWoePlotter):
"""
交互式分箱,支持单特征、组特征的交互式分箱及分箱调整
Parameters:
----------
breaks_list_dict:dict,需要调整的特征分箱字典
column:str,来自X的组变量,用于分组调整,且只支持单个组特征
sort_column:list,排序组变量水平,必须涵盖组的所有的水平
psi_base:str,若column不为None时,进行组分箱调整时的psi基准,需符合X.query(str)语法
+ 'all':以特征在全量数据的分布为基准
+ user-define:用户输入支持X.query的表达式以确定base
special_values:特殊值指代值
+ None,保持数据默认
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
sample_weight:numpy.array or pd.Series(...,index=X.index) or None,样本权重,若数据是经过抽样获取的,则可加入样本权重以计算加权的badrate,woe,iv,ks等指标以还原抽样对分析影响
b_dtype:可选float32与float64,breaks的数据精度类型,breaks与x的数据精度类型应保持一致,否则会导致在极端条件下的分箱出现错误结果
+ 若x的数据为np.float32类型,请设定为float32以保证breaks和x的精度类型一致
+ 若x的数据为np.float64类型,请保持默认
+ 请不要在原始数据中共用不同的数值精度格式,例如float32与float64并存..,请使用bm.dtypeAllocator统一数据的精度格式
figure_size:tuple,特征分析图的图形大小
Attribute:
----------
breaks_list_adj:dict,经调整后的分箱结果
vtabs_dict_adj:dict,经调整后的特征分析报告
Method:
----------
fit(X,y):给定X,y并开始分箱调整
transform(X):给定X并根据调整结果进行特征选择
binAdjuster的交互内容
1: next:当前特征分箱完毕,跳转到下个特征
2: yes:调整当前特征分箱:
输入需调整的分箱:
+ 连续:输入[数值1,数值2,...]调整分段继续,
- 分段中不用写最大/最小值
- 若输入空白则会在全数据上进行最优分箱
+ 分类:输入[字符1,字符2,...]调整分段继续,
- 其中若合并分类特征写成“字符3%,%字符4”
- 其中字符必须涵盖该分类特征的所有水平,若有遗漏则将被转换为missing
3: back :返回前一个特征并进行调整
4: remove :当前特征分箱无法调整至合理水平,在调整最终结果中剔除该特征信息
+ 只要某特征被选择为remove,那么该特征无论调整了多少次分箱都会被最终从结果中剔除
0: exit:终止分箱程序
+ 输入"y"终止,其他则继续
"""
def __init__(self,breaks_list_dict,column=None,sort_column=None,psi_base='all',
special_values=None,sample_weight=None,b_dtype='float64',figure_size=None):
self.breaks_list_dict=breaks_list_dict
self.column=column
self.sort_column=sort_column
self.psi_base=psi_base
self.special_values=special_values
self.b_dtype=b_dtype
self.sample_weight=sample_weight
self.figure_size=figure_size
self._is_fitted=False
def fit(self,X,y):
self._check_param_dtype(self.b_dtype)
self._check_data(X,y)
self._check_colname(X)
if not np.all(np.isin(list(self.breaks_list_dict),X.columns)):
raise ValueError("breaks_list_dict contains colname not in X")
if self.column is None:
breaks_list_adj,vtabs_dict_adj=self._get_breaks_adj(self.breaks_list_dict,
X,y,
sample_weight=self.sample_weight,
special_values=self.special_values,
b_dtype=self.b_dtype,
figure_size=self.figure_size)
else:
breaks_list_adj,vtabs_dict_adj=self._get_breaks_adj_g(self.breaks_list_dict,
X,y,
column=self.column,
sort_column=self.sort_column,
psi_base=self.psi_base,
sample_weight=self.sample_weight,
special_values=self.special_values,
b_dtype=self.b_dtype,
figure_size=self.figure_size)
self.breaks_list_adj=breaks_list_adj
self.vtabs_dict_adj=vtabs_dict_adj
self._is_fitted=True
return self
def transform(self,X,y=None):
self._check_is_fitted()
self._check_X(X)
return X[self.breaks_list_dict.keys()]
def _split_by_re(self,string,pattern):
indices=[i.start() for i in re.finditer(pattern,string)]
indices=[0]+indices+[len(string)] if 0 not in indices else indices+[len(string)]
ind_range=[indices[i:i + 2] for i in range(len(indices) - 1) if i]
res=[string[:indices[1]]]+[string[i[0]+1:i[1]] for i in ind_range]
return res
def _menu(self,i, xs_len, x_i):
print('>>> Adjust breaks for ({}/{}) {}?'.format(i, xs_len, x_i))
print('1: next \n2: yes \n3: back \n4: remove \n0: exit')
adj_brk = input("Selection: ")
while isinstance(adj_brk,str):
if str(adj_brk).isdigit():
adj_brk = int(adj_brk)
if adj_brk not in [0,1,2,3,4]:
warnings.warn('Enter an item from the menu, or 0 to exit.')
adj_brk = input("Selection: ")
else:
warnings.warn('input 1,2,3,4,0')
adj_brk = input("Selection: ")
return adj_brk
def _is_numeric(self,strung):
try:
float(strung)
return True
except:
return False
def _get_breaks_adj(self,br_adj,X,y,
sample_weight=None,special_values=None,b_dtype='float64',
figure_size=None):
global breaks_list_adj,vtabs_dict_adj
# set param
adj_count=0
var_sum=len(br_adj)
var_dict=dict(zip(range(len(br_adj)),br_adj.keys()))
adj_status=False
# set output
breaks_list_adj={}
vtabs_dict_adj={}
# colname_del
colname_del=set()
while True:
# default binning and plotting using given breaks
if not adj_status:
colname=var_dict[adj_count]
breaks=br_adj[colname]
print('----Adjusting {}...----'.format(colname))
print('Current breaks: {}...'.format(breaks))
binx=varReportSinge().report(X[colname],y,breaks,sample_weight=sample_weight,
special_values=special_values,b_dtype=b_dtype)
fig,_=self._get_plot_single(binx,figure_size=None,show_plot=True)
plt.show(fig)
# interactive options
option=self._menu(adj_count+1,var_sum,colname)
#opt==1:no adjustion,go next variable
if option==1:
adj_count+=1
breaks_list_adj[colname]=breaks
vtabs_dict_adj[colname]=binx
adj_status=False
print('----Adjusting {} finish.----'.format(colname))
#opt==2:adjusting breaks and re-binning variable
elif option==2:
if is_numeric_dtype(X[colname]):
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$|\s", "", breaks).split(',')
while True:
if breaks==['']:
breaks=binTree(n_jobs=1,coerce_monotonic=True).fit(X[[colname]],y).breaks_list[colname]
break
elif all([self._is_numeric(i) for i in breaks]):
break
else:
warnings.warn('Breaks could not be converted to number.')
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$|\s", "", breaks).split(',')
#check break dtype
if b_dtype=='float64':
breaks = np.float64(breaks).tolist()
else:
breaks = np.float32(breaks).tolist()
elif is_string_dtype(X[colname]):
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$|\s", "", breaks)
if not breaks:
breaks=binTree(n_jobs=1,coerce_monotonic=True).fit(X[[colname]],y).breaks_list[colname]
else:
breaks = self._split_by_re(breaks,'[,][^%]')
else:
raise ValueError("{}'s dtype in ('number' or 'object')".format(colname))
adj_status=True
#opt==3:roll back to previous variable
elif option==3:
adj_count+=-1 if adj_count else adj_count
print('Roll back to previous variable.')
adj_status=False
#opt==4:remove current variable and go next
elif option==4:
adj_count+=1
colname_del.add(colname)
print('variable {} will be removed and go next.'.format(colname))
adj_status=False
#opt==0:stop adjustion by user
elif option==0:
print('Adjustion has not been completed yet,are you sure?')
adj_status=False
if_exit = input("Input 'y' to exit or other to continue :")
# stop condition (1/2):user defined
if if_exit=='y':
print('Stop adjusting...,result store in global variables "breaks_list_adj" and "vtabs_dict_adj"')
break
else:
raise ValueError('option not in (0,1,2,3,4)')
# stop condition (2/2):all variables done
if adj_count==var_sum:
print('Adjustion complete...')
break
if colname_del:
for key in colname_del:
if key in breaks_list_adj:
del breaks_list_adj[key]
if key in vtabs_dict_adj:
del vtabs_dict_adj[key]
return breaks_list_adj,vtabs_dict_adj
def _get_breaks_adj_g(self,br_adj,X,y,column,sort_column=None,psi_base='all',
sample_weight=None,special_values=None,b_dtype='float64',
figure_size=None):
global breaks_list_adj,vtabs_dict_adj
# set param
adj_count=0
var_sum=len(br_adj)
var_dict=dict(zip(range(len(br_adj)),br_adj.keys()))
adj_status=False
# set output
breaks_list_adj={}
vtabs_dict_adj={}
colname_del=set()
while True:
# default binning and plotting using given breaks
if not adj_status:
colname=var_dict[adj_count]
breaks=br_adj[colname]
print('----Adjusting {}...----'.format(colname))
print('Current breaks: {}...'.format(breaks))
bins=varGroupsReport({colname:breaks},target=y.name,
columns=[column],
sort_columns={column:sort_column} if sort_column else sort_column,
output_psi=True,
psi_base=psi_base,
sample_weight=sample_weight,
b_dtype=b_dtype,
row_limit=0,n_jobs=1).fit(X[[colname]+[column]].join(y))
binx_g=pd.concat(bins.report_dict_raw,axis=1).droplevel(0)
binx_psi=bins.report_dict['report_psi']
psi_col=sort_column if sort_column else X[column].astype('str').unique()
psi_info=[(i,round(binx_psi.loc[binx_psi.bin=='psi'][i]['count_distr'].values[0],4)) for i in psi_col]
print('PSI at current breaks:{}'.format(psi_info))
fig,_=self._get_plot_single_group(binx_g,
sort_column=sort_column,
figure_size=figure_size,
show_plot=True)
plt.show(fig)
# interactive options
option=self._menu(adj_count+1,var_sum,colname)
#opt==1:no adjustion,go next variable
if option==1:
adj_count+=1
breaks_list_adj[colname]=breaks
vtabs_dict_adj[colname]=binx_g
adj_status=False
print('----Adjusting {} finish.----'.format(colname))
#opt==2:adjusting breaks and re-binning variable
elif option==2:
if is_numeric_dtype(X[colname]):
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$|\s", "", breaks).split(',')
while True:
if breaks==['']:
breaks=binTree(n_jobs=1,coerce_monotonic=True).fit(X[[colname]],y).breaks_list[colname]
break
elif all([self._is_numeric(i) for i in breaks]):
break
else:
warnings.warn('Breaks could not be converted to number.')
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$|\s", "", breaks).split(',')
#check break dtype
if b_dtype=='float64':
| |
<reponame>zhengxiawu/XNAS<gh_stars>10-100
import numpy as np
import copy
import time
import torch
from sklearn.ensemble import RandomForestClassifier
import xnas.algorithms.RMINAS.sampler.sampling as sampling
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=-1, keepdims=True)
class RF_suggest():
def __init__(self, space, logger, api=None, thres_rate=0.05, batch=1000, seed=10):
np.random.seed(seed)
self.sampled_history = [] # list[arch_index] / list[arch.ravel()]
self.trained_arch = [] # list[dict{'arch':arch, 'loss':loss}]
self.trained_arch_index = []
self.thres_rate = thres_rate
self.loss_thres = 0.
self.batch = batch
self.space = space
self.logger = logger
self.times_suggest = 0 # without warmup
if self.space == 'nasbench201':
self.api = api
self.max_space = 15625
self.num_estimator = 30
elif self.space == 'darts':
self.num_estimator = 98
elif self.space == 'mb':
self.num_estimator = 140
elif self.space == 'nasbenchmacro':
self.api = api
self.max_space = int(3**8)
self.num_estimator = 30
self.spaces = list(api.keys())
elif self.space == 'proxyless':
self.num_estimator = 100
self.model = RandomForestClassifier(n_estimators=self.num_estimator,random_state=seed)
def _update_lossthres(self):
losses = [i['loss'] for i in self.trained_arch]
# losses_wo_inf = []
# for i in losses:
# if not np.isinf(i):
# losses_wo_inf.append(i)
self.loss_thres = np.quantile(losses, self.thres_rate) + 1e-9
self.logger.info("CKA loss_thres: {}".format(self.loss_thres))
good_arch = (np.array(losses) < self.loss_thres).tolist()
assert np.sum(good_arch) > 1, "no enough good architectures"
def _index2arch_nb201(self, index):
assert self.space == 'nasbench201', 'api dismatch'
_arch_str = self.api.arch(index)
_arch_arr = sampling.nb201genostr2array(_arch_str)
return _arch_arr
def _trainedarch2xy(self):
features = []
labels = []
for i in self.trained_arch:
features.append(i['arch'].ravel())
labels.append(i['loss'] < self.loss_thres if self.loss_thres else False)
return features, labels
def warmup_samples(self, num_warmup):
if self.space == 'nasbench201':
sampled = list(np.random.choice(self.max_space, size=num_warmup, replace=False))
self.sampled_history = copy.deepcopy(sampled)
return sampled
elif self.space == 'darts':
return [self._single_sample() for _ in range(num_warmup)]
elif self.space == 'mb':
return [self._single_sample() for _ in range(num_warmup)]
elif self.space == 'nasbenchmacro':
return [self._single_sample() for _ in range(num_warmup)]
elif self.space == 'proxyless':
return [self._single_sample() for _ in range(num_warmup)]
def _single_sample(self, unique=True):
if self.space == 'nasbench201':
assert len(self.sampled_history) < self.max_space, "error: oversampled"
while True:
sample = np.random.randint(self.max_space)
if sample not in self.sampled_history:
self.sampled_history.append(sample)
return sample
elif self.space == 'darts':
if unique:
while True:
sample = np.zeros((14, 7)) # 14边,7op
node_ids = np.asarray([np.random.choice(range(x,x+i+2), size=2, replace=False) for i, x in enumerate((0,2,5,9))]).ravel() # 选择哪8个边
op = np.random.multinomial(1,[1/7.]*7, size=8) # 8条选择的边、7个有意义op
sample[node_ids] = op
if str(sample) not in self.sampled_history:
self.sampled_history.append(str(sample))
return sample
else:
sample = np.zeros((14, 7)) # 14边,7op
node_ids = np.asarray([np.random.choice(range(x,x+i+2), size=2, replace=False) for i, x in enumerate((0,2,5,9))]).ravel() # 选择哪8个边
op = np.random.multinomial(1,[1/7.]*7, size=8) # 8条选择的边、7个有意义op
sample[node_ids] = op
return sample
elif self.space == 'mb':
if unique:
while True:
c = np.zeros((20, 7))
for i in range(20):
j = np.random.randint(7)
c[i, j] = True
if str(c) not in self.sampled_history:
self.sampled_history.append(str(c))
return c
else:
c = np.zeros((20, 7))
for i in range(20):
j = np.random.randint(7)
c[i, j] = True
return c
elif self.space == 'nasbenchmacro':
if unique:
while True:
numeric_choice = np.random.randint(3,size=8)
if str(numeric_choice) not in self.sampled_history:
self.sampled_history.append(str(numeric_choice))
return numeric_choice
else:
numeric_choice = np.random.randint(3,size=8)
return numeric_choice
elif self.space == 'proxyless':
def gen_sample():
depth = np.array(np.random.randint(1, 4+1, size=5).tolist() + [1])
anchors = depth+[0,4,8,12,16,20]
ks = np.random.choice([3,5,7], size=21)
expand_ratios = np.random.choice([3,6], size=21)
ed = 4
for anchor in anchors:
ks[anchor:ed] = 0
expand_ratios[anchor:ed] = 0
ed += 4
sample = np.concatenate([depth, ks, expand_ratios])
return sample
if unique:
while True:
sample = gen_sample()
if sample.tobytes() not in self.sampled_history:
self.sampled_history.append(sample.tobytes())
return sample
else:
sample = gen_sample()
return sample
def Warmup(self):
self._update_lossthres()
features, labels = self._trainedarch2xy()
self.model.fit(np.asarray(features, dtype='float'), np.asarray(labels, dtype='float'))
def fitting_samples(self):
self.times_suggest += 1
start_time = time.time()
if self.space == 'nasbench201':
_sample_indexes = np.random.choice(self.max_space, size=self.batch, replace=False)
_sample_archs = []
_sample_archs_idx = []
for i in _sample_indexes:
if i not in self.trained_arch_index:
_sample_archs.append(self._index2arch_nb201(i).ravel())
_sample_archs_idx.append(i)
# print("sample {} archs/batch, cost time: {}".format(len(_sample_archs), time.time()-start_time))
_sample_archs = np.array(_sample_archs)
best_id = np.argmax(self.model.predict_proba(_sample_archs)[:,1])
best_arch_id = _sample_archs_idx[best_id]
return best_arch_id
elif self.space == 'darts':
_sample_batch = np.array([self._single_sample(unique=False).ravel() for _ in range(self.batch)])
_tmp_trained_arch = [str(i['arch'].ravel()) for i in self.trained_arch]
_sample_archs = []
for i in _sample_batch:
if str(i) not in _tmp_trained_arch:
_sample_archs.append(i)
# print("sample {} archs/batch, cost time: {}".format(len(_sample_archs), time.time()-start_time))
best_id = np.argmax(self.model.predict_proba(_sample_archs)[:,1])
best_arch = _sample_archs[best_id].reshape((14, 7))
return best_arch
elif self.space == 'mb':
_sample_batch = np.array([self._single_sample(unique=False).ravel() for _ in range(self.batch)])
_tmp_trained_arch = [str(i['arch'].ravel()) for i in self.trained_arch]
_sample_archs = []
for i in _sample_batch:
if str(i) not in _tmp_trained_arch:
_sample_archs.append(i)
# print("sample {} archs/batch, cost time: {}".format(len(_sample_archs), time.time()-start_time))
best_id = np.argmax(self.model.predict_proba(_sample_archs)[:,1])
best_arch = _sample_archs[best_id].reshape((20, 7))
return best_arch
elif self.space == 'nasbenchmacro':
_sample_indexes = np.random.choice(self.max_space, size=self.batch, replace=False)
chace_table = set(str(i['arch'].ravel()) for i in self.trained_arch)
_sample_archs = []
for i in _sample_indexes:
if self.spaces[i] not in chace_table:
_sample_archs.append(np.array(list(self.spaces[i])).astype(int))
elif self.space == 'proxyless':
_sample_batch = np.array([self._single_sample(unique=False).ravel() for _ in range(self.batch)])
_tmp_trained_arch = [(i['arch'].tobytes()) for i in self.trained_arch]
_sample_archs = []
for i in _sample_batch:
if (i).tobytes() not in _tmp_trained_arch:
_sample_archs.append(i)
# print("sample {} archs/batch, cost time: {}".format(len(_sample_archs), time.time()-start_time))
best_id = np.argmax(self.model.predict_proba(_sample_archs)[:,1])
best_arch = _sample_archs[best_id]
return best_arch
# _sample_batch = np.array([self._single_sample(unique=True).ravel() for _ in range(self.batch)])
# _tmp_trained_arch = [str(i['arch'].ravel()) for i in self.trained_arch]
# _sample_archs = []
# for i in _sample_batch:
# if str(i) not in _tmp_trained_arch:
# _sample_archs.append(i)
# print("sample {} archs/batch, cost time: {}".format(len(_sample_archs), time.time()-start_time))
best_id = np.argmax(self.model.predict_proba(_sample_archs)[:,1])
best_arch = _sample_archs[best_id]
return best_arch
def Fitting(self):
# Called after adding data into trained_arch list.
loss = self.trained_arch[-1]['loss']
features, labels = self._trainedarch2xy()
self.model.fit(np.asarray(features, dtype='float'), np.asarray(labels, dtype='float'))
return loss < self.loss_thres if self.loss_thres else False
def optimal_arch(self, method, top=300, use_softmax=True):
assert method in ['sum', 'greedy'], 'method error.'
# with open('RF_sampling.pkl', 'wb') as f:
# pickle.dump((self.loss_thres, self.trained_arch, self.sampled_history), f)
self.logger.info("#times suggest: {}".format(self.times_suggest))
_tmp_trained_arch = [i['arch'].ravel() for i in self.trained_arch]
# self.logger.info("Unique archs {} in total archs {}".format(len(np.unique(_tmp_trained_arch, axis=0)), len(self.trained_arch)))
estimate_archs_tmp = []
for i in self.trained_arch:
if (i['loss'] < self.loss_thres if self.loss_thres else False):
estimate_archs_tmp.append(i)
self.logger.info("#arch < CKA loss_thres: {}".format(len(estimate_archs_tmp)))
_est_archs_sort = sorted(estimate_archs_tmp, key=lambda d: d['loss'])
estimate_archs = []
if top>len(_est_archs_sort):
self.logger.info('top>all, using all archs.')
for i in range(min(top, len(_est_archs_sort))):
estimate_archs.append(_est_archs_sort[i]['arch'])
if self.space == 'nasbench201':
result = []
if method == 'sum':
all_sum = estimate_archs[0]
for i in estimate_archs[1:]:
all_sum = np.add(all_sum, i)
# print(all_sum)
sum_max = list(np.argmax(all_sum, axis=1))
result = copy.deepcopy(sum_max)
elif method == 'greedy':
path_info =[[[0 for _ in range(5)] for _ in range(5)] for _ in range(6)]
for i in estimate_archs:
for j in range(1, 6):
path_info[j][np.argmax(i[j-1])][np.argmax(i[j])] += 1
_esti_arch_0 = [0]*5
for i in estimate_archs:
_esti_arch_0 = np.add(i[0], _esti_arch_0)
startindex = np.argmax(_esti_arch_0)
path_max = [startindex]
for i in range(1, 6):
# path_max.append(np.argmax(path_info[i][path_max[i-1]]))
# one more step
max_op_sum = np.max(path_info[i][path_max[i-1]])
_tmp_max_idx = []
for j in range(5):
if path_info[i][path_max[i-1]][j] == max_op_sum:
_tmp_max_idx.append(j)
if len(_tmp_max_idx) == 1 or i==5:
path_max.append(np.argmax(path_info[i][path_max[i-1]]))
else:
_next_step = np.array([np.sum(path_info[i+1][j]) for j in _tmp_max_idx])
_chosen_op = _tmp_max_idx[np.argmax(_next_step)]
path_max.append(_chosen_op)
self.logger.info("path info:\n{}".format(str(path_info)))
result = copy.deepcopy(path_max)
_tmp_np = np.array(result)
op_arr = np.zeros((_tmp_np.size, 5))
op_arr[np.arange(_tmp_np.size),_tmp_np] = 1
return op_arr
elif self.space == 'darts':
assert method == 'sum', 'only sum is supported in darts.'
all_sum = estimate_archs[0]
for i in estimate_archs[1:]:
all_sum = np.add(all_sum, i)
if use_softmax:
all_sum = softmax(all_sum)
sum_max = np.argmax(all_sum, axis=1)
start_index = 0
end_index = 0
for i in range(2, 6):
end_index += i
_, top_index = torch.topk(torch.from_numpy(sum_max[start_index:end_index]), 2)
mask = list(set(range(i)) - set(list(top_index.numpy())))
for j in mask:
sum_max[start_index+j] = 7
start_index = end_index
# print(sum_max)
_tmp_np = np.array(sum_max)
op_arr = np.zeros((_tmp_np.size, 8))
op_arr[np.arange(_tmp_np.size),_tmp_np] = 1
return op_arr
elif self.space == 'mb':
assert method == 'sum', 'only sum is supported in mb.'
all_sum = estimate_archs[0]
for i in estimate_archs[1:]:
all_sum = np.add(all_sum, i)
print(all_sum)
if use_softmax:
all_sum = softmax(all_sum)
sum_max = np.argmax(all_sum, axis=1)
print(sum_max)
_tmp_np = np.array(sum_max)
op_arr = np.zeros((_tmp_np.size, 7))
op_arr[np.arange(_tmp_np.size),_tmp_np] = 1
return op_arr
elif self.space == 'nasbenchmacro':
assert method == 'sum', 'only sum is supported in mb.'
estimate_archs = np.eye(3)[estimate_archs]
all_sum = estimate_archs.sum(0)
print(all_sum)
if use_softmax:
all_sum = softmax(all_sum)
sum_max = np.argmax(all_sum, axis=1)
print(sum_max)
_tmp_np = np.array(sum_max)
op_arr = np.zeros((_tmp_np.size, 3))
op_arr[np.arange(_tmp_np.size),_tmp_np] = 1
return op_arr.argmax(-1)
elif self.space == 'proxyless':
assert method == 'sum', 'only sum is supported in mb.'
depth = estimate_archs[:, :6]
best_depth = np.eye(4)[depth].argmax(-1)+1
ks = estimate_archs[:, 6:27]//2 # {3, 5, 7}
best_ks = np.eye(3)[ks].argmax(-1) * 2 + 3
er = estimate_archs[:, 27:]//3 # {3, 6}
best_er = np.eye(2)[er].agrmax(-1) * 3 | |
def showToolTips(self):
#self .setToolTip('This GUI is for browsing log messages')
self.box_txt .setToolTip('Window for log messages')
self.but_close .setToolTip('Close this window')
self.but_save .setToolTip('Save current content of the GUI Logger\nin work directory file: '+os.path.basename(self.fname_log))
self.tit_status .setToolTip('The file name, where this log \nwill be saved at the end of session')
self.box_level .setToolTip('Click on this button and \nselect the level of messages \nwhich will be displayed')
def setFrame(self):
self.frame = QFrame(self)
self.frame.setFrameStyle(QFrame.Box | QFrame.Sunken ) #Box, Panel | Sunken, Raised
self.frame.setLineWidth(0)
self.frame.setMidLineWidth(1)
self.frame.setGeometry(self.rect())
#self.frame.setVisible(False)
def setStyle(self):
self. setStyleSheet (cp.styleBkgd)
#self.tit_title.setStyleSheet (cp.styleTitleBold)
self.tit_status.setStyleSheet (cp.styleTitle)
self.tit_level .setStyleSheet (cp.styleTitle)
self.but_close .setStyleSheet (cp.styleButton)
self.but_save .setStyleSheet (cp.styleButton)
self.box_level .setStyleSheet (cp.styleButton)
self.box_txt .setReadOnly(True)
self.box_txt .setStyleSheet (cp.styleWhiteFixed)
#self.box_txt .ensureCursorVisible()
#self.tit_title.setAlignment(QtCore.Qt.AlignCenter)
#self.titTitle.setBold()
def setParent(self,parent) :
self.parent = parent
def resizeEvent(self, e):
#logger.debug('resizeEvent', self.name)
self.frame.setGeometry(self.rect())
def moveEvent(self, e):
#logger.debug('moveEvent', self.name)
#cp.posGUIMain = (self.pos().x(),self.pos().y())
pass
def closeEvent(self, event):
logger.debug('closeEvent', self.name)
#self.saveLogTotalInFile() # It will be saved at closing of GUIMain
#try : del cp.guilogger # GUILogger
#except : pass
#if cp.guilogger!=None :
# del cp.guilogger
# cp.guilogger = None
def onClose(self):
logger.debug('onClose', self.name)
self.close()
def onSave(self):
logger.debug('onSave:', self.name)
self.saveLogInFile()
def onBox(self):
level_selected = self.box_level.currentText()
cp.log_level.setValue( level_selected )
logger.info('onBox - selected ' + self.tit_level.text() + ' ' + cp.log_level.value(), self.name)
logger.setLevel(cp.log_level.value())
self.box_txt.setText( logger.getLogContent() )
def saveLogInFile(self):
logger.info('saveLogInFile ' + self.fname_log, self.name)
path,filt = QFileDialog.getSaveFileName(self,
caption = 'Select the file to save log',
directory = self.fname_log,
filter = '*.txt'
)
if path == '' :
logger.debug('Saving is cancelled.', self.name)
return
logger.info('Output file: ' + path, self.name)
logger.saveLogInFile(path)
self.fname_log = path
cp.log_file.setValue(path)
self.setStatus(0, 'Log-file: ' + os.path.basename(self.fname_log))
def saveLogTotalInFile(self):
logger.info('saveLogTotalInFile' + self.fname_log_total, self.name)
logger.saveLogTotalInFile(self.fname_log_total)
def getConfirmation(self):
"""Pop-up box for confirmation"""
msg = QMessageBox(self, windowTitle='Confirm closing!',
text='You are about to close GUI Logger...\nIf the log-file is not saved it will be lost.',
standardButtons=QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
msg.setDefaultButton(msg.Save)
clicked = msg.exec_()
if clicked == QMessageBox.Save :
logger.info('Saving is requested', self.name)
elif clicked == QMessageBox.Discard :
logger.info('Discard is requested', self.name)
else :
logger.info('Cancel is requested', self.name)
return clicked
def onShow(self):
logger.info('onShow - is not implemented yet...', self.name)
def startGUILog(self) :
#self.fname_log = cp.log_file.value()
self.fname_log = logger.fname
#self.fname_log_total = cp.log_file_total.value()
self.setStatus(0, 'Log-file: ' + os.path.basename(self.fname_log))
logger.setLevel(cp.log_level.value())
self.box_txt.setText(logger.getLogContent())
logger.setGUILogger(self)
logger.debug('GUILogger is open', self.name)
self.box_txt.moveCursor(QTextCursor.End)
def appendGUILog(self, msg='...'):
self.box_txt.append(msg)
self.scrollDown()
def scrollDown(self):
#print('scrollDown')
#scrol_bar_v = self.box_txt.verticalScrollBar() # QScrollBar
#scrol_bar_v.setValue(scrol_bar_v.maximum())
self.box_txt.moveCursor(QTextCursor.End)
self.box_txt.repaint()
#self.raise_()
#self.box_txt.update()
def setStatus(self, status_index=0, msg=''):
list_of_states = ['Good','Warning','Alarm']
if status_index == 0 : self.tit_status.setStyleSheet(cp.styleStatusGood)
if status_index == 1 : self.tit_status.setStyleSheet(cp.styleStatusWarning)
if status_index == 2 : self.tit_status.setStyleSheet(cp.styleStatusAlarm)
#self.tit_status.setText('Status: ' + list_of_states[status_index] + msg)
self.tit_status.setText(msg)
#-----------------------------
def test_GUILogger() :
app = QApplication(sys.argv)
widget = GUILogger()
widget.show()
app.exec_()
#-----------------------------
#-----------------------------
#-----------------------------
#-----------------------------
class GUIImage(QLabel) :
"""Main GUI of the Screen Grabber
@see BaseClass
@see OtherClass
"""
name = 'GUIImage'
def __init__ (self, parent=None, app=None) :
self.myapp = app
QLabel.__init__(self, parent)
self.setGeometry(200, 100, 100, 100)
self.setWindowTitle('Image For Grabber')
self.palette = QPalette()
self.resetColorIsSet = False
#self.grview = QtGui.QGraphicsView()
#self.setCentralWidget(self.grview)
#self.setWidget(self.grview)
self.setFrame()
self.poi1 = QtCore.QPoint(0,0)
self.poi2 = QtCore.QPoint(0,0)
self.rect1 = QtCore.QRect()
self.rect2 = QtCore.QRect()
self.pen1 = QPen(QtCore.Qt.black)
self.pen2 = QPen(QtCore.Qt.white)
self.pen1.setStyle(QtCore.Qt.DashLine)
self.pen2.setStyle(QtCore.Qt.DashLine)
self.pen1.setWidthF(1)
self.pen2.setWidthF(1)
self.o_pixmap_list = [] # list of old pixmap
self.r_pixmap = None # raw pixmap
self.s_pixmap = None # scailed for image pixmap
self.qp = QPainter()
#self.pixmap_item = None
self.counter = 0
#self.vbox = QtGui.QVBoxLayout()
#self.vbox.addWidget(self.grview)
##self.vbox.addStretch(1)
##self.vbox.addWidget(self.wbutbar)
#self.setLayout(self.vbox)
#self.connect(self.butFiles , QtCore.SIGNAL('clicked()'), self.onFiles )
self.showToolTips()
self.setStyle()
#self.grabImage()
#self.show()
#cp.guiimage = self
#-------------------
# Private methods --
#-------------------
def grabImage(self):
fname = tempfile.NamedTemporaryFile(mode='r+b',suffix='.xpm')
#print(fname.name)
#logger.info('Use temporary file: %s' % (fname.name), self.name)
if( 0 == os.system('import -trim -frame -border %s' % (fname.name))) :
self.r_pixmap = QPixmap(QImage(fname.name,'XPM'))
self.setPixmapForImage()
def grabEntireWindow(self):
self.r_pixmap = QPixmap.grabWindow(QApplication.desktop().winId())
self.setPixmapForImage()
def resetImage(self):
self.r_pixmap = None
self.setPixmapForImage()
def loadImageFromFile(self, fname) : #Read formats: bmp, jpg, jpeg, png, ppm, xbm, xpm + gif, pbm, pgm,
self.r_pixmap = QPixmap(QImage(fname))
self.setPixmapForImage()
def setPixmapForImage(self):
if self.r_pixmap == None :
self.s_pixmap = None
self.clear()
else :
#self.s_pixmap = self.r_pixmap.scaled(self.size())
self.s_pixmap = self.r_pixmap.scaled(self.size(), QtCore.Qt.KeepAspectRatio)
self.setPixmap(self.s_pixmap)
self.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.setScailedMask()
def setScailedMask(self):
size = self.s_pixmap.size()
#print('Scaled pixmap size: %d x %d' % (size.width(), size.height()))
#==================================
self.qimage_mask = QImage(size, QImage.Format_Mono)
self.qimage_mask.fill(0)
self.qbitmap_mask = QBitmap.fromImage(self.qimage_mask)
self.s_pixmap.setMask(self.qbitmap_mask)
#==================================
def saveImageInFile(self, fname='test.png'): #Write formats: bmp, jpg, jpeg, png, pbm, pgm, ppm, xbm, xpm
if self.r_pixmap is not None :
self.r_pixmap.save(fname, format=None)
def showToolTips(self):
self.setToolTip('Window for image')
def setFrame(self):
self.frame = QFrame(self)
self.frame.setFrameStyle(QFrame.Box | QFrame.Sunken ) #Box, Panel | Sunken, Raised
self.frame.setLineWidth(0)
self.frame.setMidLineWidth(1)
self.frame.setGeometry(self.rect())
#self.frame.setVisible(False)
self.frame.setStyleSheet('background: transparent;')
def setStyle(self):
self .setStyleSheet(cp.styleWhite)
#self.titControl .setStyleSheet(cp.styleTitle)
#self.butFiles .setStyleSheet(cp.styleButton)
#self.butLogger .setStyleSheet(cp.styleGreenish)
#self.titControl .setAlignment(QtCore.Qt.AlignCenter)
#self.setMinimumWidth(600)
#self.setMinimumHeight(300)
self.setMinimumSize(150, 150)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#self.setMaximumSize(900, 900)
def resizeEvent(self, e):
s = self.size()
self.frame.setGeometry(QtCore.QRect(0,0,s.width(),s.height()))
self.setPixmapForImage()
#self.update()
#print('resizeEvent')
def moveEvent(self, e):
#logger.debug('moveEvent', self.name)
#self.position = self.mapToGlobal(self.pos())
#self.position = self.pos()
#logger.debug('moveEvent - pos:' + str(self.position), self.name)
pass
def closeEvent(self, event):
#print('closeEvent')
#logger.info('closeEvent', self.name)
#if cp.res_save_log :
# logger.saveLogInFile ( fnm.log_file() )
# logger.saveLogTotalInFile( fnm.log_file_total() )
#try : cp.guifiles.close()
#except : pass
pass
def onExit(self):
#logger.debug('onExit', self.name)
self.close()
#-----------------------------
def mouseMoveEvent(self, e):
#print('mouseMoveEvent: x, y = %d, %d' % (e.pos().x(), e.pos().y()))
self.poi2.setX(e.pos().x())
self.poi2.setY(e.pos().y())
#self.line.setLine( 0, 0, e.pos().x(), e.pos().y())
#self.line.setP2(pos)
#self.rect.setCoords( 5, 5, e.pos().x(), e.pos().y())
#self.update()
def mousePressEvent(self, e):
if e.button() == 4 and len(self.o_pixmap_list)>0 : # Undo last zoom-in
self.r_pixmap = self.o_pixmap_list.pop()
self.setPixmapForImage()
#else : self.o_pixmap_list = []
self.poi1.setX(e.pos().x())
self.poi1.setY(e.pos().y())
self.poi2.setX(e.pos().x())
self.poi2.setY(e.pos().y())
#print('mousePressEvent: e.x, e.y, e.button =', str(e.x()), str(e.y()), str(e.button()))
def mouseReleaseEvent(self, e):
self.poi2.setX(e.pos().x())
self.poi2.setY(e.pos().y())
#print('mouseReleaseEvent: e.x, e.y, e.button =', str(e.x()), str(e.y()), str(e.button()))
self.zoomInImage()
def zoomInImage(self):
if self.r_pixmap == None:
self.resetRectPoints()
return
s_size = self.s_pixmap.size()
r_size = self.r_pixmap.size()
sw, sh = s_size.width(), s_size.height()
rw, rh = r_size.width(), r_size.height()
sclx, scly = float(rw)/sw, float(rh)/sh
#print('='*50)
#print('zoomInImage: s_size: w, h = %d, %d' % (sw, sh))
#print('zoomInImage: r_size: w, h = %d, %d' % (rw, rh))
p1x, p1y = self.poi1.x(), self.poi1.y()
p2x, p2y = self.poi2.x(), self.poi2.y()
self.resetRectPoints()
if p2x < 0 : p2x = 0
if p2y < 0 : p2y = 0
if p2x > sw : p2x = sw
if p2y > sh : p2y = sh
R=10
if abs(p2x-p1x) < R : return
if abs(p2y-p1y) < R : return
#print('zoomInImage: p1: x, y = %d, %d' % (p1x, p1y))
#print('zoomInImage: p2: x, y = %d, %d' % (p2x, p2y))
x1, y1, x2, y2 = int(p1x*sclx), int(p1y*scly), int(p2x*sclx), int(p2y*scly)
#print('zoomInImage: x1, y1, x2, y2 = %d, %d, %d, %d' % (x1, y1, x2, y2))
xmin, xmax = min(x1, x2), max(x1, x2)
ymin, ymax = min(y1, y2), max(y1, y2)
self.o_pixmap_list.append(self.r_pixmap)
self.r_pixmap = self.r_pixmap.copy(xmin, ymin, xmax-xmin, ymax-ymin)
self.setPixmapForImage()
# return and remove the oldest list, Keeps 10 latest images only.
if len(self.o_pixmap_list) > 10 : list = self.o_pixmap_list.pop(0)
def resetRectPoints(self):
self.poi1.setX(0)
self.poi1.setY(0)
self.poi2.setX(0)
self.poi2.setY(0)
def paintEvent(self, e):
super(GUIImage,self).paintEvent(e)
#self.counter+=1
#print(self.counter)
#qp = QtGui.QPainter()
qp = self.qp
qp.begin(self)
#self.drawPixmap(qp)
self.drawRect(qp)
qp.end()
self.update()
def setPen(self, qp):
self.pen.setStyle(QtCore.Qt.DashLine)
self.pen.setWidthF(1)
def drawRect(self, qp):
if self.r_pixmap == None:
return
p1x, p1y = self.poi1.x(), self.poi1.y()
p2x, p2y = self.poi2.x(), self.poi2.y()
R=1
if abs(p2x-p1x) < R : return
if abs(p2y-p1y) < R : return
self.rect1.setCoords( p1x, p1y, p2x, p2y)
self.rect2.setCoords( p1x+1, p1y+1, p2x-1, p2y-1)
qp.setPen (self.pen1)
qp.drawRect(self.rect1);
qp.setPen (self.pen2)
qp.drawRect(self.rect2);
def drawPixmap(self, qp):
if self.r_pixmap != None:
qp.drawPixmap(0,0,self.s_pixmap)
#-----------------------------
#def mousePressEvent(self, event):
# print('event.x, event.y, event.button =', str(event.x()), str(event.y()), str(event.button()))
#def mouseReleaseEvent(self, event):
# print('event.x, event.y, event.button =', str(event.x()), str(event.y()), str(event.button()))
#http://doc.qt.nokia.com/4.6/qt.html#Key-enum
def keyPressEvent(self, event):
#print('event.key() = %s' % (event.key()))
if event.key() == QtCore.Qt.Key_Escape:
#self.close()
self.SHowIsOn = False
pass
if event.key() == QtCore.Qt.Key_B:
#print('event.key() = %s' % (QtCore.Qt.Key_B))
pass
if event.key() == QtCore.Qt.Key_Return:
#print('event.key() = Return')
pass
if event.key() == QtCore.Qt.Key_Home:
#print('event.key() = Home')
pass
#-----------------------------
# In case someone decides to run this module
#
def test_GUIImage():
app = QApplication(sys.argv)
ex = GUIImage()
ex.grabEntireWindow()
ex.show()
app.exec_()
#-----------------------------
#-----------------------------
#-----------------------------
#-----------------------------
class LocalParameter () :
"""This helper class allows to access local parameters through the reference in the list."""
_val=None
def __init__ ( self, val=None ) :
self._val = val
def setValue ( self, val ) : | |
<filename>herders/models.py
import uuid
from collections import OrderedDict
from math import floor, ceil
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, Count
from django.utils.safestring import mark_safe
from timezone_field import TimeZoneField
from bestiary.models import Monster, Building, Level, Rune, RuneCraft
# Individual user/monster collection models
class Summoner(models.Model):
SERVER_GLOBAL = 0
SERVER_EUROPE = 1
SERVER_ASIA = 2
SERVER_KOREA = 3
SERVER_JAPAN = 4
SERVER_CHINA = 5
SERVER_CHOICES = [
(SERVER_GLOBAL, 'Global'),
(SERVER_EUROPE, 'Europe'),
(SERVER_ASIA, 'Asia'),
(SERVER_KOREA, 'Korea'),
(SERVER_JAPAN, 'Japan'),
(SERVER_CHINA, 'China'),
]
user = models.OneToOneField(User, on_delete=models.CASCADE)
summoner_name = models.CharField(max_length=256, null=True, blank=True)
com2us_id = models.BigIntegerField(default=None, null=True, blank=True)
server = models.IntegerField(choices=SERVER_CHOICES, default=SERVER_GLOBAL, null=True, blank=True)
following = models.ManyToManyField("self", related_name='followed_by', symmetrical=False)
public = models.BooleanField(default=False, blank=True)
timezone = TimeZoneField(default='America/Los_Angeles')
notes = models.TextField(null=True, blank=True)
preferences = JSONField(default=dict)
last_update = models.DateTimeField(auto_now=True)
def get_rune_counts(self):
counts = {}
for rune_type in RuneInstance.TYPE_CHOICES:
counts[rune_type[1]] = RuneInstance.objects.filter(owner=self, type=rune_type[0]).count()
return counts
def save(self, *args, **kwargs):
super(Summoner, self).save(*args, **kwargs)
# Update new storage model
if not hasattr(self, 'storage'):
new_storage = Storage.objects.create(
owner=self,
)
new_storage.save()
def __str__(self):
return self.user.username
def _default_storage_data():
return [0, 0, 0]
class Storage(models.Model):
ESSENCE_LOW = 0
ESSENCE_MID = 1
ESSENCE_HIGH = 2
ESSENCE_SIZES = [
(ESSENCE_LOW, 'Low'),
(ESSENCE_MID, 'Mid'),
(ESSENCE_HIGH, 'High'),
]
ESSENCE_FIELDS = ['magic_essence', 'fire_essence', 'water_essence', 'wind_essence', 'light_essence', 'dark_essence']
CRAFT_FIELDS = ['wood', 'leather', 'rock', 'ore', 'mithril', 'cloth', 'rune_piece', 'dust', 'symbol_harmony', 'symbol_transcendance', 'symbol_chaos', 'crystal_water', 'crystal_fire', 'crystal_wind', 'crystal_light', 'crystal_dark', 'crystal_magic', 'crystal_pure']
owner = models.OneToOneField(Summoner, on_delete=models.CASCADE)
# Elemental Essences
magic_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Magic Essence')
fire_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Fire Essence')
water_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Water Essence')
wind_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Wind Essence')
light_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Light Essence')
dark_essence = ArrayField(models.IntegerField(default=0), size=3, default=_default_storage_data, help_text='Dark Essence')
# Crafting materials
wood = models.IntegerField(default=0, help_text='Hard Wood')
leather = models.IntegerField(default=0, help_text='Tough Leather')
rock = models.IntegerField(default=0, help_text='Solid Rock')
ore = models.IntegerField(default=0, help_text='Solid Iron Ore')
mithril = models.IntegerField(default=0, help_text='Shining Mythril')
cloth = models.IntegerField(default=0, help_text='Thick Cloth')
rune_piece = models.IntegerField(default=0, help_text='Rune Piece')
dust = models.IntegerField(default=0, help_text='Magic Dust')
symbol_harmony = models.IntegerField(default=0, help_text='Symbol of Harmony')
symbol_transcendance = models.IntegerField(default=0, help_text='Symbol of Transcendance')
symbol_chaos = models.IntegerField(default=0, help_text='Symbol of Chaos')
crystal_water = models.IntegerField(default=0, help_text='Frozen Water Crystal')
crystal_fire = models.IntegerField(default=0, help_text='Flaming Fire Crystal')
crystal_wind = models.IntegerField(default=0, help_text='Whirling Wind Crystal')
crystal_light = models.IntegerField(default=0, help_text='Shiny Light Crystal')
crystal_dark = models.IntegerField(default=0, help_text='Pitch-black Dark Crystal')
crystal_magic = models.IntegerField(default=0, help_text='Condensed Magic Crystal')
crystal_pure = models.IntegerField(default=0, help_text='Pure Magic Crystal')
def get_storage(self):
storage = OrderedDict()
storage['magic'] = OrderedDict()
storage['magic']['low'] = self.magic_essence[Storage.ESSENCE_LOW]
storage['magic']['mid'] = self.magic_essence[Storage.ESSENCE_MID]
storage['magic']['high'] = self.magic_essence[Storage.ESSENCE_HIGH]
storage['fire'] = OrderedDict()
storage['fire']['low'] = self.fire_essence[Storage.ESSENCE_LOW]
storage['fire']['mid'] = self.fire_essence[Storage.ESSENCE_MID]
storage['fire']['high'] = self.fire_essence[Storage.ESSENCE_HIGH]
storage['water'] = OrderedDict()
storage['water']['low'] = self.water_essence[Storage.ESSENCE_LOW]
storage['water']['mid'] = self.water_essence[Storage.ESSENCE_MID]
storage['water']['high'] = self.water_essence[Storage.ESSENCE_HIGH]
storage['wind'] = OrderedDict()
storage['wind']['low'] = self.wind_essence[Storage.ESSENCE_LOW]
storage['wind']['mid'] = self.wind_essence[Storage.ESSENCE_MID]
storage['wind']['high'] = self.wind_essence[Storage.ESSENCE_HIGH]
storage['light'] = OrderedDict()
storage['light']['low'] = self.light_essence[Storage.ESSENCE_LOW]
storage['light']['mid'] = self.light_essence[Storage.ESSENCE_MID]
storage['light']['high'] = self.light_essence[Storage.ESSENCE_HIGH]
storage['dark'] = OrderedDict()
storage['dark']['low'] = self.dark_essence[Storage.ESSENCE_LOW]
storage['dark']['mid'] = self.dark_essence[Storage.ESSENCE_MID]
storage['dark']['high'] = self.dark_essence[Storage.ESSENCE_HIGH]
return storage
@staticmethod
def _min_zero(x):
return max(x, 0)
def save(self, *args, **kwargs):
# Ensure all are at 0 or higher
self.magic_essence = list(map(self._min_zero, self.magic_essence))
self.fire_essence = list(map(self._min_zero, self.fire_essence))
self.wind_essence = list(map(self._min_zero, self.wind_essence))
self.light_essence = list(map(self._min_zero, self.light_essence))
self.dark_essence = list(map(self._min_zero, self.dark_essence))
self.wood = max(self.wood, 0)
self.leather = max(self.leather, 0)
self.rock = max(self.rock, 0)
self.ore = max(self.ore, 0)
self.mithril = max(self.mithril, 0)
self.cloth = max(self.cloth, 0)
self.rune_piece = max(self.rune_piece, 0)
self.dust = max(self.dust, 0)
self.symbol_harmony = max(self.symbol_harmony, 0)
self.symbol_transcendance = max(self.symbol_transcendance, 0)
self.symbol_chaos = max(self.symbol_chaos, 0)
self.crystal_water = max(self.crystal_water, 0)
self.crystal_fire = max(self.crystal_fire, 0)
self.crystal_wind = max(self.crystal_wind, 0)
self.crystal_light = max(self.crystal_light, 0)
self.crystal_dark = max(self.crystal_dark, 0)
self.crystal_magic = max(self.crystal_magic, 0)
self.crystal_pure = max(self.crystal_pure, 0)
super(Storage, self).save(*args, **kwargs)
class MonsterTag(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
def __str__(self):
return mark_safe(self.name)
class MonsterInstance(models.Model):
PRIORITY_DONE = 0
PRIORITY_LOW = 1
PRIORITY_MED = 2
PRIORITY_HIGH = 3
PRIORITY_CHOICES = [
(PRIORITY_LOW, 'Low'),
(PRIORITY_MED, 'Medium'),
(PRIORITY_HIGH, 'High'),
]
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
owner = models.ForeignKey(Summoner, on_delete=models.CASCADE)
monster = models.ForeignKey(Monster, on_delete=models.CASCADE)
com2us_id = models.BigIntegerField(blank=True, null=True)
created = models.DateTimeField(blank=True, null=True)
stars = models.IntegerField()
level = models.IntegerField()
skill_1_level = models.IntegerField(blank=True, default=1)
skill_2_level = models.IntegerField(blank=True, default=1)
skill_3_level = models.IntegerField(blank=True, default=1)
skill_4_level = models.IntegerField(blank=True, default=1)
fodder = models.BooleanField(default=False)
in_storage = models.BooleanField(default=False)
ignore_for_fusion = models.BooleanField(default=False)
priority = models.IntegerField(choices=PRIORITY_CHOICES, blank=True, null=True)
tags = models.ManyToManyField(MonsterTag, blank=True)
notes = models.TextField(null=True, blank=True, help_text=mark_safe('<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled'))
custom_name = models.CharField(default='', max_length=20, blank=True)
# Calculated fields (on save)
base_hp = models.IntegerField(blank=True, default=0)
rune_hp = models.IntegerField(blank=True, default=0)
base_attack = models.IntegerField(blank=True, default=0)
rune_attack = models.IntegerField(blank=True, default=0)
base_defense = models.IntegerField(blank=True, default=0)
rune_defense = models.IntegerField(blank=True, default=0)
base_speed = models.IntegerField(blank=True, default=0)
rune_speed = models.IntegerField(blank=True, default=0)
base_crit_rate = models.IntegerField(blank=True, default=0)
rune_crit_rate = models.IntegerField(blank=True, default=0)
base_crit_damage = models.IntegerField(blank=True, default=0)
rune_crit_damage = models.IntegerField(blank=True, default=0)
base_resistance = models.IntegerField(blank=True, default=0)
rune_resistance = models.IntegerField(blank=True, default=0)
base_accuracy = models.IntegerField(blank=True, default=0)
rune_accuracy = models.IntegerField(blank=True, default=0)
avg_rune_efficiency = models.FloatField(blank=True, null=True)
class Meta:
ordering = ['-stars', '-level', 'monster__name']
def is_max_level(self):
return self.level == self.monster.max_level_from_stars(self.stars)
def max_level_from_stars(self):
return self.monster.max_level_from_stars(self.stars)
def skill_ups_to_max(self):
skill_ups_remaining = self.monster.skill_ups_to_max or 0
skill_levels = [self.skill_1_level, self.skill_2_level, self.skill_3_level, self.skill_4_level]
for idx in range(0, self.monster.skills.count()):
skill_ups_remaining -= skill_levels[idx] - 1
return skill_ups_remaining
def get_rune_set_summary(self):
sets = []
# Determine rune sets
rune_counts = self.runeinstance_set.values('type').order_by().annotate(count=Count('type'))
num_equipped = self.runeinstance_set.count()
for rune_count in rune_counts:
type_name = RuneInstance.TYPE_CHOICES[rune_count['type'] - 1][1]
required = RuneInstance.RUNE_SET_COUNT_REQUIREMENTS[rune_count['type']]
present = rune_count['count']
if present >= required:
num_equipped -= required * (present // required)
sets += [type_name] * (present // required)
if num_equipped:
# Some runes are present that aren't in a set
sets.append('Broken')
# Summarize slot 2/4/6 main stats
stats = []
for x in [2, 4, 6]:
try:
stats.append(self.runeinstance_set.get(slot=x).get_main_stat_display())
except:
continue
return '/'.join(sets) + ' - ' + '/'.join(stats)
def get_rune_set_bonuses(self):
rune_counts = self.runeinstance_set.values('type').order_by().annotate(count=Count('type'))
rune_bonuses = []
for rune_count in rune_counts:
type_name = RuneInstance.TYPE_CHOICES[rune_count['type'] - 1][1]
required = RuneInstance.RUNE_SET_COUNT_REQUIREMENTS[rune_count['type']]
present = rune_count['count']
bonus_text = RuneInstance.RUNE_SET_BONUSES[rune_count['type']]['description']
if present >= required:
rune_bonuses.extend([type_name + ' ' + bonus_text] * (present // required))
return rune_bonuses
def get_avg_rune_efficiency(self):
efficiencies = sum(self.runeinstance_set.filter(efficiency__isnull=False).values_list('efficiency', flat=True))
return efficiencies / 6
# Stat callables. Base = monster's own stat. Rune = amount gained from runes. Stat by itself is combined total
def calc_base_hp(self):
return self.monster.actual_hp(self.stars, self.level)
def hp(self):
return self.base_hp + self.rune_hp
def calc_base_attack(self):
return self.monster.actual_attack(self.stars, self.level)
def attack(self):
return self.base_attack + self.rune_attack
def calc_base_defense(self):
return self.monster.actual_defense(self.stars, self.level)
def defense(self):
return self.base_defense + self.rune_defense
def calc_base_speed(self):
return self.monster.speed
def speed(self):
return self.base_speed + self.rune_speed
def calc_base_crit_rate(self):
return self.monster.crit_rate
def crit_rate(self):
return self.base_crit_rate + self.rune_crit_rate
def calc_base_crit_damage(self):
return self.monster.crit_damage
def crit_damage(self):
return self.base_crit_damage + self.rune_crit_damage
def calc_base_resistance(self):
return self.monster.resistance
def resistance(self):
return self.base_resistance + self.rune_resistance
def calc_base_accuracy(self):
return self.monster.accuracy
def accuracy(self):
return self.base_accuracy + self.rune_accuracy
def get_base_stats(self):
return {
RuneInstance.STAT_HP: self.base_hp,
RuneInstance.STAT_HP_PCT: self.base_hp,
RuneInstance.STAT_DEF: self.base_defense,
RuneInstance.STAT_DEF_PCT: self.base_defense,
RuneInstance.STAT_SPD: self.base_speed,
RuneInstance.STAT_CRIT_RATE_PCT: self.base_crit_rate,
RuneInstance.STAT_CRIT_DMG_PCT: self.base_crit_damage,
RuneInstance.STAT_RESIST_PCT: self.base_resistance,
RuneInstance.STAT_ACCURACY_PCT: self.base_accuracy,
}
def get_max_level_stats(self):
max_base_hp = self.monster.actual_hp(6, 40)
max_base_atk = self.monster.actual_attack(6, 40)
max_base_def = self.monster.actual_defense(6, 40)
max_rune_stats = self.get_rune_stats(at_max_level=True)
stats = {
'base': {
'hp': max_base_hp,
'attack': max_base_atk,
'defense': max_base_def,
},
'rune': {
'hp': max_rune_stats[RuneInstance.STAT_HP] + max_rune_stats[RuneInstance.STAT_HP_PCT],
'attack': max_rune_stats[RuneInstance.STAT_ATK] + max_rune_stats[RuneInstance.STAT_ATK_PCT],
'defense': max_rune_stats[RuneInstance.STAT_DEF] + max_rune_stats[RuneInstance.STAT_DEF_PCT],
},
}
stats['deltas'] = {
'hp': int(round(float(stats['base']['hp'] + stats['rune']['hp']) / self.hp() * 100 - 100)),
'attack': int(round(float(stats['base']['attack'] + stats['rune']['attack']) / self.attack() * 100 - 100)),
'defense': int(round(float(stats['base']['defense'] + stats['rune']['defense']) / self.defense() * 100 - 100)),
}
return stats
def get_building_stats(self, area=Building.AREA_GENERAL):
owned_bldgs = BuildingInstance.objects.filter(
Q(building__element__isnull=True) | Q(building__element=self.monster.element),
owner=self.owner,
building__area=area,
).select_related('building')
bonuses = {
Building.STAT_HP: 0,
Building.STAT_ATK: 0,
Building.STAT_DEF: 0,
Building.STAT_SPD: 0,
Building.STAT_CRIT_RATE_PCT: 0,
Building.STAT_CRIT_DMG_PCT: 0,
Building.STAT_RESIST_PCT: 0,
Building.STAT_ACCURACY_PCT: 0,
}
for b in owned_bldgs:
if b.building.affected_stat in bonuses.keys() and b.level > 0:
bonuses[b.building.affected_stat] += b.building.stat_bonus[b.level - 1]
return {
'hp': int(ceil(round(self.base_hp * (bonuses[Building.STAT_HP] / 100.0), 3))),
'attack': int(ceil(round(self.base_attack * (bonuses[Building.STAT_ATK] / 100.0), 3))),
'defense': int(ceil(round(self.base_defense * (bonuses[Building.STAT_DEF] / 100.0), 3))),
'speed': int(ceil(round(self.base_speed * (bonuses[Building.STAT_SPD] / 100.0), 3))),
'crit_rate': bonuses[Building.STAT_CRIT_RATE_PCT],
'crit_damage': bonuses[Building.STAT_CRIT_DMG_PCT],
'resistance': bonuses[Building.STAT_RESIST_PCT],
'accuracy': bonuses[Building.STAT_ACCURACY_PCT],
}
def get_guild_stats(self):
return self.get_building_stats(Building.AREA_GUILD)
def get_possible_skillups(self):
devilmon = MonsterInstance.objects.filter(owner=self.owner, monster__name='Devilmon').count()
family = MonsterInstance.objects.filter(owner=self.owner, monster__family_id=self.monster.family_id).exclude(pk=self.pk).order_by('ignore_for_fusion')
pieces = MonsterPiece.objects.filter(owner=self.owner, monster__family_id=self.monster.family_id)
return {
'devilmon': devilmon,
'family': family,
'pieces': pieces,
'none': devilmon + family.count() + pieces.count() == 0,
}
def get_rune_stats(self, at_max_level=False):
if at_max_level:
base_stats = {
RuneInstance.STAT_HP: self.monster.actual_hp(6, 40),
RuneInstance.STAT_HP_PCT: self.monster.actual_hp(6, 40),
RuneInstance.STAT_ATK: self.monster.actual_attack(6, 40),
RuneInstance.STAT_ATK_PCT: self.monster.actual_attack(6, 40),
RuneInstance.STAT_DEF: self.monster.actual_defense(6, 40),
RuneInstance.STAT_DEF_PCT: self.monster.actual_defense(6, | |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from numpy import array, sum, sqrt, log, abs, loadtxt, zeros_like, shape
from matplotlib.pyplot import plot, figure, subplot, show, ion, title, suptitle, legend, gca, ioff, axvline, gcf
import subprocess, sys, os, time
from six.moves import range
from builtins import str
print()
print(' ######## start script rms_tracker ########')
print(' please give input path and some options:')
print(' available options are: 0 = rms')
print(' 1 = total energy')
print(' 2 = charge neutrality')
print(' 3 = total moment')
print()
print(' if last given option is -1 then no refreshing is done and plots are only generated once')
print()
print(
" after the options there is the possibility to give the 'nodos' option which prevents the plotting of the dos if 'dos.atom*' files are there"
)
print()
print(' input might look like this:')
print(' for self refreshing plot of rms charge neutrality and moment: ./rms_tracker.py ./path/ 0 2 3')
print(' for non refreshing plot of rms and total energy without dos plot: ./rms_tracker.py ./path/ 0 1 -1 nodos')
print()
print(' default values: ./rms_tracker.py ./path/ 0 1 2 3')
if len(sys.argv) > 1:
allpaths = sys.argv[1]
# create list of paths from input
allpath = []
for path0 in allpaths.split(','):
outfile_found = False
for file in ['out', 'out_kkr']:
if file in os.listdir(path0):
outfile_found = True
outfile = file
if outfile_found:
if path0 == '.' or path0 == './':
path0 = os.getcwd()
if path0[-1] != '/':
path0 += '/'
allpath.append(path0)
else:
print("WARNING: file 'out' not found in", path0)
if allpath == []:
print("no file 'out' found")
sys.exit()
if len(sys.argv) > 2:
nopt = len(sys.argv[2:])
opt = sys.argv[2:]
if len(opt[-1]) <= 2:
dos = 'dos.atom1' in os.listdir(path0)
else:
dos = False
nopt = nopt - 1
opt = opt[:-1]
opt = [int(i) for i in opt]
print(opt)
if opt[-1] == -1:
refresh = False
nopt = nopt - 1
opt = opt[:-1]
else:
refresh = True
else:
nopt = 4
opt = [0, 1, 2, 3]
refresh = True
if len(sys.argv[-1]) <= 2:
dos = 'dos.atom1' in os.listdir(path0)
else:
dos = False
else:
print('Please give input path')
sys.exit()
# consistency check and names
names = []
for i in opt:
if i not in [0, 1, 2, 3]:
print('Found non existing option:', i)
sys.exit()
else:
if i == 0:
names.append('rms error')
elif i == 1:
names.append('total energy')
elif i == 2:
names.append('neutrality')
elif i == 3:
names.append('total moment')
# print input
print()
print(' your input options:', opt, '; refresh=', refresh, '; DOS=', dos)
print(' path(s):', allpath)
print()
def read_rms_data(path0):
global opt, nopt
### rms
try:
f = subprocess.check_output('grep aver ' + path0 + outfile, shell=True).decode('utf-8').split('\n')
rms = array([float((i.split()[-1]).replace('D', 'e').replace('\n', '')) for i in f if i != ''])
except:
rms = []
### charge neutrality
try:
f = subprocess.check_output('grep neutr ' + path0 + outfile, shell=True).decode('utf-8').split('\n')
neut = array([float((i.split()[-1]).replace('D', 'e').replace('\n', '')) for i in f if i != ''])
except:
neut = []
# check if neutrality info is available (not there in case of impurity code output)
if len(neut) < len(rms) and 2 in opt:
nopt = nopt - 1
j = 0
for i in range(nopt + 1):
if opt[i] == 2:
j = i
print('removing option', opt.pop(j), names.pop(j))
### read total energy
try:
f = subprocess.check_output("grep 'TOTAL ENERGY' " + path0 + outfile, shell=True).decode('utf-8').split('\n')
etot = array([float((i.split()[-1]).replace('D', 'e').replace('\n', '')) for i in f if i != ''])
except:
etot = []
### moment
try:
f = subprocess.check_output("grep 'L m' " + path0 + outfile, shell=True).decode('utf-8').split('\n')
mom = array([float((i.split()[-1]).replace('D', 'e').replace('\n', '')) for i in f if i != ''])
except:
mom = []
# check if moment info is available
if len(mom) < len(rms) and 3 in opt:
nopt = nopt - 1
for i in range(nopt + 1):
if opt[i] == 3:
j = i
print('removing option', opt.pop(j), names.pop(j))
else:
try:
f = subprocess.check_output("grep 'ITERATION ' " + path0 + outfile, shell=True).decode('utf-8').split('\n')
tmp = array(
[float((i.split()[1]).replace('D', 'e').replace('\n', '')) for i in f if 'SCF' not in i and i != ''])
except:
tmp = []
if tmp != [] and 'out_magneticmoments' in os.listdir(path0):
it = int(tmp[-1])
tmp = loadtxt(path0 + 'out_magneticmoments')
ncls = len(tmp) // it
tmp = tmp[::ncls, :3]
tmpmomx = tmp[:, 0]
tmpmomy = tmp[:, 1]
tmpmomz = tmp[:, 2]
tmpmom = mom
mom = array([tmpmom, tmpmomx, tmpmomy, tmpmomz])
else:
inp = open(path0 + 'inputcard').readlines()
natyp = -1
for iline in range(len(inp)):
if 'NATYP' in inp[iline]:
natyp = iline
if natyp == -1:
for iline in range(len(inp)):
if 'NAEZ' in inp[iline]:
natyp = iline
natyp = int(inp[natyp].split('NAEZ=')[1])
else:
natyp = int(inp[natyp].split('NATYP=')[1])
try:
tmp2 = subprocess.check_output("grep 'm_spin' " + path0 + outfile + ' -A' + str(natyp),
shell=True).decode('utf-8').replace('TOT', '').split('\n')
if tmp2 != '':
tmp2 = tmp2 #[-natyp-1:]
tmp2 = array([float(i.split()[2]) for i in tmp2 if i != '' and 'dn' not in i and i != '--'])
tmp2 = tmp2.reshape(-1, natyp)
mom = array([mom] + [tmp2[:, i] for i in range(len(tmp2[0]))])
except:
tmpmom, tmpmomx, tmpmomy, tmpmomz = mom, mom, mom, mom
mom = array([tmpmom, tmpmomx, tmpmomy, tmpmomz])
return rms, neut, mom, etot
# turn interactive mode on (needed for figures to appear immediately in while loop)
ion()
rms = [[] for i in range(len(allpath))]
neut = [[] for i in range(len(allpath))]
etot = [[] for i in range(len(allpath))]
mom = [[] for i in range(len(allpath))]
istart = [True for i in range(len(allpath))]
while (True):
for ipath0 in range(len(allpath)):
path0 = allpath[ipath0]
l0 = len(rms[ipath0])
rms[ipath0], neut[ipath0], mom[ipath0], etot[ipath0] = read_rms_data(path0)
if len(rms[ipath0]) > l0:
### fig 1 for plots of values from option
figure(ipath0)
figure(ipath0).clf()
# give path name to figure
gcf().canvas.set_window_title(allpath[ipath0])
for i in range(nopt):
# choose data according to option
if opt[i] == 0:
data = rms[ipath0]
elif opt[i] == 1:
data = etot[ipath0]
elif opt[i] == 2:
data = neut[ipath0]
elif opt[i] == 3:
data = mom[ipath0]
# choose appropriate subplot according to number of options
if nopt == 1:
subplot(1, 2, 1 + i)
elif nopt == 2:
subplot(2, 2, 1 + i)
elif nopt == 3:
subplot(2, 3, 1 + i)
elif nopt == 4:
subplot(2, 4, 1 + i)
# plot data in linear scale
title(names[i])
if opt[i] == 3:
clrs = ['b', 'g', 'r', 'y', 'm', 'c', 'k']
for j in range(len(data) - 1, -1, -1):
plot(data[j], 'x--') #+clrs[j])
else:
plot(data, 'x--')
# substract reference for Etot and mom
if len(data) > 1 and opt[i] in [1, 3]:
if opt[i] == 3 and len(data[0]) > 1:
datanew = zeros_like(data)[:, :-1]
for j in range(len(data[:, 0])):
datanew[j, :] = (data[j, :] - data[j, -1])[:-1]
data = datanew
else:
data = (data - data[-1])[:-1]
# plot log scale if data is not always 0
if (abs(data).any() > 0):
if nopt == 1:
subplot(1, 2, 1 + i + nopt)
elif nopt == 2:
subplot(2, 2, 1 + i + nopt)
elif nopt == 3:
subplot(2, 3, 1 + i + nopt)
elif nopt == 4:
subplot(2, 4, 1 + i + nopt)
if opt[i] in [1, 3]:
name = 'change in ' + names[i]
else:
name = names[i] + ', log scale'
title(name)
if opt[i] == 3:
for j in range(len(data) - 1, -1, -1):
plot(abs(data[j]), 'x--') #+clrs[j])
else:
plot(abs(data), 'x--')
gca().set_yscale('log')
try:
t_iter = subprocess.check_output('grep Iter ' + path0 + 'out_timing.000.txt',
shell=True).decode('utf-8').split('\n')[-2].split()[-1]
t_iter = f'{float(t_iter) / 60:5.2f} min'
except:
t_iter = 'no time info for last iteration'
suptitle(time.ctime() + ', time in last iteration: ' + t_iter)
### fig 2 for DOS plots
if dos:
figure(ipath0 + 1000)
figure(ipath0 + 1000).clf()
j = 0
for i in os.listdir(path0):
if len(i) > 8 and 'dos.atom' == i[:8]:
tmpi = int(i.replace('dos.atom', ''))
| |
"""
helper.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import print_function
import os
import re
import copy
import time
import pprint
import urllib2
import unittest
import tempfile
import httpretty
from functools import wraps
from nose.plugins.skip import SkipTest
from nose.plugins.attrib import attr
import w3af.core.data.kb.knowledge_base as kb
import w3af.core.controllers.output_manager as om
from w3af.core.controllers.w3afCore import w3afCore
from w3af.core.controllers.misc.homeDir import W3AF_LOCAL_PATH
from w3af.core.controllers.misc.decorators import retry
from w3af.core.data.fuzzer.utils import rand_alnum
from w3af.core.data.options.opt_factory import opt_factory
from w3af.core.data.options.option_types import URL_LIST
from w3af.core.data.options.option_list import OptionList
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.kb.read_shell import ReadShell
from w3af.core.data.kb.info_set import InfoSet
os.chdir(W3AF_LOCAL_PATH)
RE_COMPILE_TYPE = type(re.compile(''))
@attr('moth')
class PluginTest(unittest.TestCase):
"""
These tests can be configured using two environment variables:
* HTTP_PROXY=127.0.0.1:8080 , route HTTP traffic through a local proxy
* HTTPS_PROXY=127.0.0.1:8080 , route HTTPS traffic through a local proxy
* DEBUG=1 , enable logging
For example:
HTTP_PROXY=127.0.0.1:8080 nosetests -s w3af/plugins/tests/infrastructure/test_allowed_methods.py
Remember that nosetests can't find test generators in unittest.TestCase,
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
MOCK_RESPONSES = []
runconfig = {}
kb = kb.kb
target_url = None
base_path = None
def setUp(self):
self.kb.cleanup()
self.w3afcore = w3afCore()
self.request_callback_call_count = 0
self.request_callback_match = 0
if self.MOCK_RESPONSES:
httpretty.reset()
httpretty.enable()
try:
url = URL(self.target_url)
except ValueError, ve:
msg = ('When using MOCK_RESPONSES you need to set the'
' target_url attribute to a valid URL, exception was:'
' "%s".')
raise Exception(msg % ve)
domain = url.get_domain()
proto = url.get_protocol()
port = url.get_port()
self._register_httpretty_uri(proto, domain, port)
def _register_httpretty_uri(self, proto, domain, port):
assert isinstance(port, int), 'Port needs to be an integer'
if (port == 80 and proto == 'http') or \
(port == 443 and proto == 'https'):
re_str = "%s://%s/(.*)" % (proto, domain)
else:
re_str = "%s://%s:%s/(.*)" % (proto, domain, port)
all_methods = set(mock_resp.method for mock_resp in self.MOCK_RESPONSES)
for http_method in all_methods:
httpretty.register_uri(http_method,
re.compile(re_str),
body=self.__internal_request_callback)
def tearDown(self):
self.w3afcore.quit()
self.kb.cleanup()
self.assert_all_get_desc_work()
if self.MOCK_RESPONSES:
httpretty.disable()
httpretty.reset()
def assert_all_get_desc_work(self):
"""
Since the InfoSet does some custom rendering at get_desc(), I want to
make sure that any InfoSets render properly, some of my tests might not
be calling it implicitly, so we call it here.
"""
for info in self.kb.get_all_findings():
if isinstance(info, InfoSet):
info.get_desc()
def assertAllVulnNamesEqual(self, vuln_name, vulns):
if not vulns:
self.assertTrue(False, 'No vulnerabilities found to match')
for vuln in vulns:
self.assertEqual(vuln.get_name(), vuln_name)
def assertExpectedVulnsFound(self, expected, found_vulns):
found_tokens = [(v.get_url().get_file_name(),
v.get_token_name()) for v in found_vulns]
self.assertEquals(
set(found_tokens),
set(expected)
)
def tokenize_kb_vulns(self):
all_info = self.kb.get_all_findings()
info_tokens = set()
for info in all_info:
url = None if info.get_url() is None else info.get_url().get_path()
token_name = None if info.get_token() is None else info.get_token_name()
info_tokens.add((info.get_name(), url, token_name))
return info_tokens
def assertMostExpectedVulnsFound(self, expected, percentage=0.85):
"""
Assert that at least :percentage: of the expected vulnerabilities were
found during the current scan.
"""
len_exp_found = len(expected.intersection(self.tokenize_kb_vulns()))
found_perc = float(len_exp_found) / len(expected)
self.assertGreater(found_perc, percentage)
def assertAllExpectedVulnsFound(self, expected):
self.assertEqual(expected, self.tokenize_kb_vulns())
def assertAllURLsFound(self, expected):
frs = self.kb.get_all_known_fuzzable_requests()
found = []
for fr in frs:
uri = fr.get_uri()
path = uri.get_path()
qs = str(uri.get_querystring())
if qs:
data = path + '?' + qs
else:
data = path
found.append(data)
self.assertEquals(set(found),
set(expected))
def __internal_request_callback(self, http_request, uri, headers):
self.request_callback_call_count += 1
match = None
for mock_response in self.MOCK_RESPONSES:
if mock_response.matches(http_request, uri, headers):
match = mock_response
break
if match is not None:
self.request_callback_match += 1
fmt = (uri, match)
om.out.debug('[request_callback] URI %s matched %s' % fmt)
return match.get_response(http_request, uri, headers)
else:
om.out.debug('[request_callback] URI %s will return 404' % uri)
return MockResponse.get_404(http_request, uri, headers)
@retry(tries=3, delay=0.5, backoff=2)
def _verify_targets_up(self, target_list):
msg = 'The target site "%s" is down: "%s"'
for target in target_list:
try:
response = urllib2.urlopen(target.url_string)
response.read()
except urllib2.URLError, e:
if hasattr(e, 'code'):
# pylint: disable=E1101
if e.code in (404, 403, 401):
continue
else:
no_code = 'Unexpected code %s' % e.code
self.assertTrue(False, msg % (target, no_code))
# pylint: enable=E1101
self.assertTrue(False, msg % (target, e.reason))
except Exception, e:
self.assertTrue(False, msg % (target, e))
def _scan(self, target, plugins, debug=False, assert_exceptions=True,
verify_targets=True):
"""
Setup env and start scan. Typically called from children's
test methods.
:param target: The target to scan.
:param plugins: PluginConfig objects to activate and setup before
the test runs.
"""
if not isinstance(target, (basestring, tuple)):
raise TypeError('Expected basestring or tuple in scan target.')
if isinstance(target, tuple):
target = tuple([URL(u) for u in target])
elif isinstance(target, basestring):
target = (URL(target),)
if verify_targets and not self.MOCK_RESPONSES:
self._verify_targets_up(target)
target_opts = create_target_option_list(*target)
self.w3afcore.target.set_options(target_opts)
# Enable plugins to be tested
for ptype, plugincfgs in plugins.items():
self.w3afcore.plugins.set_plugins([p.name for p in plugincfgs],
ptype)
for pcfg in plugincfgs:
if pcfg.name == 'all':
continue
plugin_instance = self.w3afcore.plugins.get_plugin_inst(ptype,
pcfg.name)
default_option_list = plugin_instance.get_options()
unit_test_options = pcfg.options
for option in default_option_list:
if option.get_name() not in unit_test_options:
unit_test_options.add(option)
self.w3afcore.plugins.set_plugin_options(ptype, pcfg.name,
unit_test_options)
# Enable text output plugin for debugging
environ_debug = os.environ.get('DEBUG', '0') == '1'
if debug or environ_debug:
self._configure_debug()
# Set a special user agent to be able to grep the logs and identify
# requests sent by each test
custom_test_agent = self.get_custom_agent()
self.w3afcore.uri_opener.settings.set_user_agent(custom_test_agent)
# Verify env and start the scan
self.w3afcore.plugins.init_plugins()
self.w3afcore.verify_environment()
self.w3afcore.start()
#
# I want to make sure that we don't have *any hidden* exceptions in our
# tests. This was in tearDown before, but moved here because I was
# getting failed assertions in my test code that were because of
# exceptions in the scan and they were hidden.
#
if assert_exceptions:
caught_exceptions = self.w3afcore.exception_handler.get_all_exceptions()
tracebacks = [e.get_details() for e in caught_exceptions]
self.assertEqual(len(caught_exceptions), 0, tracebacks)
def _scan_assert(self, config, expected_path_param, ok_to_miss,
kb_addresses, skip_startwith=(), debug=False):
# Make sure the subclass is properly configured
self.assertIsNotNone(self.target_url)
self.assertIsNotNone(self.base_path)
# Scan
self._scan(self.target_url, config, debug=debug)
# Get the results
vulns = []
for kb_address in kb_addresses:
vulns.extend(self.kb.get(*kb_address))
found_path_param = set()
for vuln in vulns:
path = vuln.get_url().get_path().replace(self.base_path, '')
found_path_param.add((path, vuln.get_token_name()))
self.assertEqual(expected_path_param, found_path_param)
#
# Now we assert the unknowns
#
all_known_urls = self.kb.get_all_known_urls()
all_known_files = [u.get_path().replace(self.base_path, '') for u in all_known_urls]
expected = [path for path, param in expected_path_param]
missing = []
for path in all_known_files:
should_continue = False
for skip_start in skip_startwith:
if path.startswith(skip_start):
should_continue = True
break
if should_continue:
continue
if path == u'':
continue
if path in ok_to_miss:
continue
if path in expected:
# Already checked this one
continue
missing.append(path)
missing.sort()
self.assertEqual(missing, [])
def get_custom_agent(self):
"""
:return: The test agent for easier log grep
"""
return 'Mozilla/4.0 (compatible; w3af.org; TestCase: %s)' % self.id()
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit
message
"""
if msg:
data = '%s:\n%s' % (standardMsg, pprint.pformat(msg))
return data.replace('\\n', '\n')
return standardMsg
def _configure_debug(self):
"""
Configure debugging for the scans to be run.
"""
ptype = 'output'
pname = 'text_file'
enabled_output = self.w3afcore.plugins.get_enabled_plugins(ptype)
enabled_output += [pname]
self.w3afcore.plugins.set_plugins(enabled_output, ptype)
# Now we configure the output file to point to CircleCI's artifact
# directory (when run on circle) and /tmp/ when run on our
# workstation
output_dir = os.environ.get('CIRCLE_ARTIFACTS', tempfile.gettempdir())
rnd = rand_alnum(6)
text_output = os.path.join(output_dir, 'output-%s.txt' % rnd)
http_output = os.path.join(output_dir, 'output-http-%s.txt' % rnd)
text_file_inst = self.w3afcore.plugins.get_plugin_inst(ptype, pname)
default_opts = text_file_inst.get_options()
default_opts['output_file'].set_value(text_output)
default_opts['http_output_file'].set_value(http_output)
default_opts['verbose'].set_value(True)
print('Logging to %s' % text_output)
self.w3afcore.plugins.set_plugin_options(ptype, pname, default_opts)
class PluginConfig(object):
BOOL = 'boolean'
STR = 'string'
LIST = 'list'
INT = 'integer'
URL = 'url'
INPUT_FILE = 'input_file'
QUERY_STRING = 'query_string'
HEADER = 'header'
def __init__(self, name, *opts):
self._name = name
self._options = OptionList()
for optname, optval, optty in opts:
self._options.append(opt_factory(optname, str(optval), '', optty))
@property
| |
Collision: {}'.format(
arm, rew, col))
if col == 0:
# We care about reward only before starting exploiting/exploring or if we
# are the leader
self.arms_nselected[arm - 1] += 1
self.arms_reward[arm - 1] += rew
self.arms_emp_avg_reward[
arm -
1] = self.arms_reward[arm - 1] / self.arms_nselected[arm - 1]
if self.cdpe and not self.is_leader:
if not self.leader or not self.players:
self.players, self.leader = self.cdpe.centralized()
# If the leader is not present, just update all the other players also
if not self.leader:
self.backlog[0][arm - 1] += 1
self.backlog[1][arm - 1] += rew
else:
if not self.backlog_check:
self.backlog_check = True
for i in range(self.K):
self.leader.arms_nselected[i] += self.backlog[0][i]
self.leader.arms_reward[i] += self.backlog[1][i]
if self.leader.arms_nselected[i] > 0:
self.leader.arms_emp_avg_reward[
i] = self.leader.arms_reward[
i] / self.leader.arms_nselected[i]
self.leader.arms_nselected[arm - 1] += 1
self.leader.arms_reward[arm - 1] += rew
self.leader.arms_emp_avg_reward[
arm - 1] = self.leader.arms_reward[
arm - 1] / self.leader.arms_nselected[arm - 1]
def _kl_index_update(self):
# KL Indexes update
if not self.disable_comp:
f_t = np.log(
self.state_round) + 4 * np.log(np.log(self.state_round))
self._add_log(
'Starting update of KL-UCB idxs, f(t): {}'.format(f_t))
for k in range(self.K):
if self.arms_nselected[k] > 0:
f_t_k = f_t / self.arms_nselected[k]
self.arms_kl_index[k] = klucbBern(
self.arms_emp_avg_reward[k], f_t_k)
else:
self.arms_kl_index[k] = np.inf
self._add_log('Updating KL-UCB index for arm {}. b: {}'.format(
k, self.arms_kl_index[k]))
def _B_update(self, arms):
# Update set of arms to explore
if not self.disable_comp:
if not self.dpe2:
self.arms_to_explore = []
for ra in arms:
if self.arms_kl_index[
ra - 1] >= self.best_arms_min_emp_reward[1]:
self.arms_to_explore.append(ra)
else:
self.arms_to_explore = [[] for i in range(self.J)]
for s in range(self.J):
for ra in arms:
if (s - 1) * self.J <= ra and ra < self.J * s:
if self.arms_kl_index[
ra -
1] >= self.best_arms_min_emp_reward[1]:
self.arms_to_explore[s].append(ra)
def _update_best_arms_min_emp_rew(self):
# Used to obtain mu_Mhat
if not self.disable_comp:
self.best_arms_min_emp_reward = [1, np.inf]
for arm in self.best_arms_set:
if self.arms_emp_avg_reward[
arm - 1] < self.best_arms_min_emp_reward[1]:
self.best_arms_min_emp_reward[0] = arm
self.best_arms_min_emp_reward[
1] = self.arms_emp_avg_reward[arm - 1]
def _best_arms_update(self):
if not self.disable_comp:
M_minus = set(self.best_arms_set)
# Get a list of the sorted arms according to the empirical avg reward
sorted_arms = np.argsort(self.arms_emp_avg_reward)
# The new set of best arms has the first M elements of the list of sorted arms
new_best_arms = [i + 1 for i in sorted_arms[-self.num_players:]]
M_plus = set(new_best_arms)
# Compute arms to be removed/added
self.arms_to_remove = list(M_minus - M_plus)
self.arms_to_add = list(M_plus - M_minus)
L = len(self.arms_to_remove)
if L != len(self.arms_to_add):
self._add_log(
'Arms to remove and arms to add have different size!'
'To remove: {} - To add: {}'.format(
self.arms_to_remove, self.arms_to_add))
raise Exception(
'Arms to remove and arms to add have different size!'
'To remove: {} - To add: {}'.format(
self.arms_to_remove, self.arms_to_add))
if L > 0:
self.changes_in_best_arms_set.append((self.total_time, L))
self._add_log(
'Leader: arms to remove {} - arms to add {}'.format(
self.arms_to_remove, self.arms_to_add))
if self.cdpe:
self._add_log(
'Leader is communicating immediately the change of arm (CENTRALIZED CASE).'
)
self._centralized_update()
# Update set B
self._B_update([i + 1 for i in sorted_arms[:-self.num_players]])
return True if (L > 0 and not self.cdpe) else False
def _centralized_update(self):
if not self.disable_comp:
if not self.cdpe:
raise Exception(
'Tried to do a centralized update, but CDPE is not enabled!.'
)
if self.is_leader:
while len(self.arms_to_remove) > 0:
ar = self.best_arms_set.index(self.arms_to_remove.pop(0))
self.best_arms_set[ar] = self.arms_to_add.pop(0)
self.best_arms_set = np.sort(self.best_arms_set).tolist()
if not self.players:
self.players, _ = self.cdpe.centralized()
for p in self.players:
p.best_arms_set = self.best_arms_set
self._add_log('Centralized update finished')
else:
self._add_log('A follower tried to do a centralized update!')
raise Exception('A follower tried to do a centralized update!')
def update(self, play, obs):
self.total_time += 1
arm = play + 1
rew, col = obs
self.total_collisions += col
if col > 0 and self.phase >= self.EXPLOIT:
self._add_log('Had a collision! Playing {}'.format(arm))
self._update_arm_stats(arm, rew, col)
# If we are in Sampling phase, we move to verification and
# assign state 'arm' if no collision happened
if self.phase == self.INIT_ORTHOG_SAMPLE:
self.phase = self.INIT_ORTHOG_VERIFICATION
self.collisions = 0
self.state_round = 1
if self.id == 0:
self.id = arm if col == 0 else 0
self._add_log('ID is: {}'.format(self.id))
# In verification phase we count the number of collisions we make
# If at the end we have no collisions we move to the next phase,
# otherwise we restart the process from the sampling phase
elif self.phase == self.INIT_ORTHOG_VERIFICATION:
self.collisions += col
if self.state_round == self.K:
if self.collisions == 0:
self.phase = self.INIT_RANK_ASSIGN
self.state_round = 1
self.collisions = 0
else:
self.phase = self.INIT_ORTHOG_SAMPLE
self.state_round = 1
self.collisions = 0
else:
self.state_round += 1
elif self.phase == self.INIT_RANK_ASSIGN:
# Increase number of collisions, this will be equal to M at the end of
# the round.
self.collisions += col
if self.state_round == 2 * self.id - 1:
self.relative_position = self.collisions + 1
self._add_log('Identified relative position: {}'.format(
self.relative_position))
if self.collisions == 0:
self.is_leader = True
self._add_log('We are leader')
self.state_round += 1
# Finished all the rounds, move to next phase
if self.state_round == 2 * self.K - 2:
# At the end of our block we check how many collisions we
# had. That number is the number of players
self.num_players = self.collisions + 1
self.collisions = 0
self._add_log('Identified number of players: {}'.format(
self.num_players))
self.best_arms_set = [i + 1 for i in range(self.num_players)]
# If we are the leader, we need to update the kl-indexes in order to understand
# which arms to explore
if self.is_leader:
self._kl_index_update()
self._B_update([
self.num_players + 1 + i
for i in range(self.K - self.num_players)
])
self.phase = self.EXPLOIT
elif self.phase == self.EXPLOIT:
self.state_round += 1
if self.is_leader:
# Check if we have to do update of the best arms
m = self.state_round % self.num_players
if self.dpe2:
j = ((self.state_round - m) / self.num_players % self.J)
m = m == j
else:
m = m == 0
if m:
self._kl_index_update()
if self._best_arms_update():
self.phase = self.COMMUNICATION
self.stats[self.START_OF_COMMUNICATION] += 1
self.t0 = self.state_round - 1
self.t1 = self.t0 + self.num_players - 1
self.t2 = self.t1 + self.num_players
self.t3 = self.t2 + self.K
self._add_log(
'Leader started communication: t0 {} t1 {} t2 {}'.
format(self.t0, self.t1, self.t2))
elif not self.is_leader and col > 0:
# Leader is starting communication
self.t0 = self.state_round - (
2 + self.num_players - self.relative_position)
self.t1 = self.t0 + self.num_players - 1
self.t2 = self.t1 + self.num_players
self.t3 = self.t2 + self.K
self._add_log(
'Leader started communication: t0 {} t1 {} t2 {}'.format(
self.t0, self.t1, self.t2))
self.phase = self.COMMUNICATION
elif not self.is_leader and self.cdpe and col > 0:
self._add_log(
'There has been a collision, even though this is a follower and we are in the centralized case!'
)
raise Exception(
'There has been a collision, even though this is a follower and we are in the centralized case!'
)
elif self.phase == self.COMMUNICATION:
if self.cdpe:
self._add_log(
'We switched to communication phase though this is the centralized case! Mrel: {}'
.format(self.relative_position))
raise Exception(
'We switched to communication phase though this is the centralized case! Mrel: {}'
.format(self.relative_position))
self.state_round += 1
if not self.is_leader and col > 0:
if self.state_round - 1 >= self.t1 and self.state_round - 1 <= self.t2:
self._add_log('Arm to remove: {}'.format(
self.is_leader, arm))
self.arms_to_remove = [arm]
if arm not in self.best_arms_set:
raise Exception('Arm to be removed not present!')
elif self.state_round - 1 > self.t2 and self.state_round - 1 <= self.t3:
self._add_log('Arm to add:{}'.format(self.is_leader, arm))
self.arms_to_add = [arm]
if self.state_round == self.t3 + 1:
if len(self.arms_to_remove) > 0:
ar = self.best_arms_set.index(self.arms_to_remove.pop(0))
self.best_arms_set[ar] = self.arms_to_add.pop(0)
self.best_arms_set = np.sort(self.best_arms_set).tolist()
self._add_log('Ended communication, new set of arms {}'.format(
self.best_arms_set))
if not self.is_leader:
self.phase = self.EXPLOIT
elif self.is_leader:
# Compute the min empirical average reward from the best arms
self._update_best_arms_min_emp_rew()
if len(self.arms_to_add) == 0:
self.phase = self.EXPLOIT
else:
# Leader is starting communication again
self.t0 = self.state_round - 1
self.t1 = self.t0 + self.num_players - 1
self.t2 = self.t1 + self.num_players
self.t3 = self.t2 + self.K
self._add_log(
'Leader started communication (again): t0 {} t1 {} t2 {}'
.format(self.t0, self.t1, self.t2))
self.phase = self.COMMUNICATION
self.stats[self.START_OF_COMMUNICATION] += 1
class CDPE(object):
# Used for centralized coordination
def __init__(self):
self.players = None
self.leader = None
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes for detecting stars in an astronomical
image. The convention is that all star-finding classes are subclasses of
an abstract base class called ``StarFinderBase``. Each star-finding
class should define a method called ``find_stars`` that finds stars in
an image.
"""
import abc
import math
import warnings
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.table import Table
from astropy.utils import lazyproperty
import numpy as np
from .core import find_peaks
from ..utils._convolution import _filter_data
from ..utils._moments import _moments, _moments_central
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['StarFinderBase', 'DAOStarFinder', 'IRAFStarFinder']
class _StarFinderKernel:
"""
Class to calculate a 2D Gaussian density enhancement kernel.
The kernel has negative wings and sums to zero. It is used by both
`DAOStarFinder` and `IRAFStarFinder`.
Parameters
----------
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor and major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel, measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``]. The default is 1.5.
normalize_zerosum : bool, optional
Whether to normalize the Gaussian kernel to have zero sum, The
default is `True`, which generates a density-enhancement kernel.
Notes
-----
The class attributes include the dimensions of the elliptical kernel
and the coefficients of a 2D elliptical Gaussian function expressed
as:
``f(x,y) = A * exp(-g(x,y))``
where
``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2``
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
normalize_zerosum=True):
if fwhm < 0:
raise ValueError('fwhm must be positive.')
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be positive and less or equal '
'than 1.')
if sigma_radius <= 0:
raise ValueError('sigma_radius must be positive.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
self.ysigma = self.xsigma * self.ratio
theta_radians = np.deg2rad(self.theta)
cost = np.cos(theta_radians)
sint = np.sin(theta_radians)
xsigma2 = self.xsigma**2
ysigma2 = self.ysigma**2
self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
# CCW
self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))
# find the extent of an ellipse with radius = sigma_radius*sigma;
# solve for the horizontal and vertical tangents of an ellipse
# defined by g(x,y) = f
self.f = self.sigma_radius**2 / 2.0
denom = (self.a * self.c) - self.b**2
# nx and ny are always odd
self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1
self.xc = self.xradius = self.nx // 2
self.yc = self.yradius = self.ny // 2
# define the kernel on a 2D grid
yy, xx = np.mgrid[0:self.ny, 0:self.nx]
self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
self.elliptical_radius = (self.a * (xx - self.xc)**2 +
2.0 * self.b * (xx - self.xc) *
(yy - self.yc) +
self.c * (yy - self.yc)**2)
self.mask = np.where(
(self.elliptical_radius <= self.f) |
(self.circular_radius <= 2.0), 1, 0).astype(int)
self.npixels = self.mask.sum()
# NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask
# denom = variance * npixels
denom = ((self.gaussian_kernel**2).sum() -
(self.gaussian_kernel.sum()**2 / self.npixels))
self.relerr = 1.0 / np.sqrt(denom)
# normalize the kernel to zero sum
if normalize_zerosum:
self.data = ((self.gaussian_kernel -
(self.gaussian_kernel.sum() / self.npixels)) /
denom) * self.mask
else:
self.data = self.gaussian_kernel
self.shape = self.data.shape
class _StarCutout:
"""
Class to hold a 2D image cutout of a single star for the star finder
classes.
Parameters
----------
data : 2D array_like
The cutout 2D image from the input unconvolved 2D image.
convdata : 2D array_like
The cutout 2D image from the convolved 2D image.
slices : tuple of two slices
A tuple of two slices representing the minimal box of the cutout
from the original image.
xpeak, ypeak : float
The (x, y) pixel coordinates of the peak pixel.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``data``.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold value input to the star finder
class multiplied by the kernel relerr.
"""
def __init__(self, data, convdata, slices, xpeak, ypeak, kernel,
threshold_eff):
self.data = data
self.convdata = convdata
self.slices = slices
self.xpeak = xpeak
self.ypeak = ypeak
self.kernel = kernel
self.threshold_eff = threshold_eff
self.shape = data.shape
self.nx = self.shape[1] # always odd
self.ny = self.shape[0] # always odd
self.cutout_xcenter = int(self.nx // 2)
self.cutout_ycenter = int(self.ny // 2)
self.xorigin = self.slices[1].start # in original image
self.yorigin = self.slices[0].start # in original image
self.mask = kernel.mask # kernel mask
self.npixels = kernel.npixels # unmasked pixels
self.data_masked = self.data * self.mask
class _DAOFindProperties:
"""
Class to calculate the properties of each detected star, as defined
by `DAOFIND`_.
Parameters
----------
star_cutout : `_StarCutout`
A `_StarCutout` object containing the image cutout for the star.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``star_cutout``.
sky : float, optional
The local sky level around the source. ``sky`` is used only to
calculate the source peak value, flux, and magnitude. The
default is 0.
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
"""
def __init__(self, star_cutout, kernel, sky=0.):
if not isinstance(star_cutout, _StarCutout):
raise ValueError('data must be an _StarCutout object')
if star_cutout.data.shape != kernel.shape:
raise ValueError('cutout and kernel must have the same shape')
self.cutout = star_cutout
self.kernel = kernel
self.sky = sky # DAOFIND has no sky input -> same as sky=0.
self.data = star_cutout.data
self.data_masked = star_cutout.data_masked
self.npixels = star_cutout.npixels # unmasked pixels
self.nx = star_cutout.nx
self.ny = star_cutout.ny
self.xcenter = star_cutout.cutout_xcenter
self.ycenter = star_cutout.cutout_ycenter
@lazyproperty
def data_peak(self):
return self.data[self.ycenter, self.xcenter]
@lazyproperty
def conv_peak(self):
return self.cutout.convdata[self.ycenter, self.xcenter]
@lazyproperty
def roundness1(self):
# set the central (peak) pixel to zero
cutout_conv = self.cutout.convdata.copy()
cutout_conv[self.ycenter, self.xcenter] = 0.0 # for sum4
# calculate the four roundness quadrants.
# the cutout size always matches the kernel size, which have odd
# dimensions.
# quad1 = bottom right
# quad2 = bottom left
# quad3 = top left
# quad4 = top right
# 3 3 4 4 4
# 3 3 4 4 4
# 3 3 x 1 1
# 2 2 2 1 1
# 2 2 2 1 1
quad1 = cutout_conv[0:self.ycenter + 1, self.xcenter + 1:]
quad2 = cutout_conv[0:self.ycenter, 0:self.xcenter + 1]
quad3 = cutout_conv[self.ycenter:, 0:self.xcenter]
quad4 = cutout_conv[self.ycenter + 1:, self.xcenter:]
sum2 = -quad1.sum() + quad2.sum() - quad3.sum() + quad4.sum()
if sum2 == 0:
return 0.
sum4 = np.abs(cutout_conv).sum()
if sum4 <= 0:
return None
return 2.0 * sum2 / sum4
@lazyproperty
def sharpness(self):
npixels = self.npixels - 1 # exclude the peak pixel
data_mean = (np.sum(self.data_masked) - self.data_peak) / npixels
return (self.data_peak - data_mean) / self.conv_peak
def daofind_marginal_fit(self, axis=0):
"""
Fit 1D Gaussians, defined from the marginal x/y kernel
distributions, to the marginal x/y distributions of the original
(unconvolved) image.
These fits are used calculate the star centroid and roundness
("GROUND") properties.
Parameters
----------
axis : {0, 1}, optional
The axis for which the marginal fit is performed:
* 0: for the x axis
* 1: for the y axis
Returns
-------
dx : float
The fractional shift in x or y (depending on ``axis`` value)
of the image centroid relative to the maximum pixel.
hx : float
The height of the best-fitting Gaussian to the marginal x or
y (depending on | |
tmp1.ppf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)
with pytest.raises(ValueError, match="cdf"):
Cont.from_rv(tmp1)
tmp2 = Tmp()
tmp2.cdf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)
with pytest.raises(ValueError, match="ppf"):
Cont.from_rv(tmp2)
def test_from_rv_options(self):
norm = distrs.norm
# Finite support detection and usage of `small_prob` option
with config.context({"small_prob": 1e-6}):
rv_norm = Cont.from_rv(norm)
assert_array_almost_equal(
rv_norm.support(), norm.ppf([1e-6, 1 - 1e-6]), decimal=DECIMAL
)
with config.context({"small_prob": 1e-6}):
rv_norm_right = Cont.from_rv(norm, supp=(-1, None))
assert_array_almost_equal(
rv_norm_right.support(), [-1, norm.ppf(1 - 1e-6)], decimal=DECIMAL
)
with config.context({"small_prob": 1e-6}):
rv_norm_left = Cont.from_rv(norm, supp=(None, 1))
assert_array_almost_equal(
rv_norm_left.support(), [norm.ppf(1e-6), 1], decimal=DECIMAL
)
# Usage of `n_grid` option
with config.context({"n_grid": 11}):
rv_norm_small = Cont.from_rv(norm)
assert len(rv_norm_small.x) <= 20
# Usage of `cdf_tolerance` option
with config.context({"cdf_tolerance": 1e-4}):
rv_norm_1 = Cont.from_rv(norm)
with config.context({"cdf_tolerance": 1e-1}):
rv_norm_2 = Cont.from_rv(norm)
## Increasing CDF tolerance should lead to decrease of density grid
assert len(rv_norm_1.x) > len(rv_norm_2.x)
def test_from_sample_basic(self):
norm = distrs.norm()
rng = np.random.default_rng(101)
x = norm.rvs(100, random_state=rng)
rv = Cont.from_sample(x)
assert isinstance(rv, Cont)
def test_from_sample_errors(self):
with pytest.raises(TypeError, match="numpy array with float"):
Cont.from_sample(["a"])
with pytest.raises(ValueError, match="1d"):
Cont.from_sample([[1], [2]])
def test_from_sample_options(self):
norm = distrs.norm()
rng = np.random.default_rng(101)
x = norm.rvs(100, random_state=rng)
# "estimator_cont"
def uniform_estimator(x):
x_min, x_max = x.min(), x.max()
def res(x):
return np.where((x >= x_min) & (x <= x_max), 1 / (x_max - x_min), 0)
return res
with config.context({"estimator_cont": uniform_estimator}):
rv = Cont.from_sample(x)
assert len(rv.y) == 2
assert np.allclose(rv.y, rv.y[0], atol=1e-13)
# "estimator_cont" which returns allowed classes
## `Rand` class should be forwarded to `from_rv()` method
_test_from_sample_rand(
cls=Cont,
sample=x,
estimator_option="estimator_cont",
)
## "Scipy" distribution should be forwarded to `Cont.from_rv()`
rv_norm = distrs.norm()
with config.context({"estimator_cont": lambda x: rv_norm}):
rv = Cont.from_sample(np.asarray([0, 1, 2]))
rv_ref = Cont.from_rv(rv_norm)
_test_equal_rand(rv, rv_ref)
# "density_mincoverage"
with config.context({"density_mincoverage": 0.0}):
rv = Cont.from_sample(x)
## With minimal density mincoverage output range should be equal to
## sample range
assert_array_equal(rv.x[[0, -1]], [x.min(), x.max()])
# "n_grid"
with config.context({"n_grid": 11}):
rv = Cont.from_sample(x)
assert len(rv.x) <= 22
# "cdf_tolerance"
with config.context({"cdf_tolerance": 2.0}):
rv = Cont.from_sample(x)
## With very high CDF tolerance downgridding should result into grid
## with three elements. That is because CDF is approximated with
## simplest quadratic spline with single segment. That requires three
## knots.
assert len(rv.x) == 3
@pytest.mark.slow
def test_from_sample_single_value(self):
"""How well `from_sample()` handles single unique value in sample
Main problem here is how density range is initialized during estimation.
"""
zero_vec = np.zeros(10)
# Default density estimator can't handle situation with single unique
# sample value (gives `LinAlgError: singular matrix`).
# Case when sample width is zero but density is not zero
density_centered_interval = make_circ_density([(-1, 1)])
with config.context({"estimator_cont": lambda x: density_centered_interval}):
assert from_sample_cdf_max_error(zero_vec) <= 1e-4
# Case when both sample width and density are zero
density_shifted_interval = make_circ_density([(10, 20)])
with config.context({"estimator_cont": lambda x: density_shifted_interval}):
# Here currently the problem is that support is estimated way to
# wide with very small (~1e-9) non-zero density outside of [10,
# 20]. However, CDFs are still close.
assert from_sample_cdf_max_error(zero_vec) <= 2e-4
def test_pdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
# Regular checks
x = np.array([-1, 0, 0.5, 1, 2, 3, 4])
assert_array_equal(rv.pdf(x), np.array([0, 0.5, 0.5, 0.5, 0.25, 0, 0]))
# Coercion of not ndarray input
_test_input_coercion(rv.pdf, x)
# Input around edges
x = np.array([0 - 1e-10, 0 + 1e-10, 3 - 1e-10, 3 + 1e-10])
assert_array_almost_equal(
rv.pdf(x), np.array([0, 0.5, 0.25e-10, 0]), decimal=DECIMAL
)
# Bad input
x = np.array([-np.inf, np.nan, np.inf])
assert_array_equal(rv.pdf(x), np.array([0, np.nan, 0]))
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])
## Accuracy is of order of 10 due to extreme magnitudes of values
assert_array_almost_equal(
rv_dirac.pdf(x), np.array([0, 0.5e8, 1e8, 0.5e8, 0]), decimal=-1
)
# Broadcasting
x = np.array([[-1, 0.5], [2, 4]])
assert_array_equal(rv.pdf(x), np.array([[0.0, 0.5], [0.25, 0.0]]))
# One value input
_test_one_value_input(rv.pdf, 0.5)
_test_one_value_input(rv.pdf, -1)
_test_one_value_input(rv.pdf, np.nan)
def test_logpdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logpdf, rv.pdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_pmf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
with pytest.raises(AttributeError, match=r"Use `pdf\(\)`"):
rv.pmf(0)
def test_logpmf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
with pytest.raises(AttributeError, match=r"Use `logpdf\(\)`"):
rv.logpmf(0)
def test_cdf(self):
rv_1 = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
x = np.array([-1, 0, 0.5, 1, 1.5, 2, 3])
assert_array_equal(rv_1.cdf(x), np.array([0, 0, 0.125, 0.5, 0.875, 1, 1]))
# Coercion of not ndarray input
_test_input_coercion(rv_1.cdf, x)
# Bad input
x = np.array([-np.inf, np.nan, np.inf])
assert_array_equal(rv_1.cdf(x), np.array([0, np.nan, 1]))
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])
assert_array_almost_equal(
rv_dirac.cdf(x), np.array([0, 0.125, 0.5, 0.875, 1]), decimal=DECIMAL
)
# Broadcasting
x = np.array([[-1, 0.5], [2, 4]])
assert_array_equal(rv_1.cdf(x), np.array([[0.0, 0.125], [1.0, 1.0]]))
# One value input
_test_one_value_input(rv_1.cdf, 0.5)
_test_one_value_input(rv_1.cdf, -1)
_test_one_value_input(rv_1.cdf, np.nan)
def test_logcdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logcdf, rv.cdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_sf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
x_ref = [-1, 0.1, 3, np.inf, np.nan]
assert_array_equal(rv.sf(x_ref), 1 - rv.cdf(x_ref))
def test_logsf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logsf, rv.sf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_ppf(self):
# `ppf()` method should be inverse to `cdf()` for every sensible input
rv_1 = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
q = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_equal(rv_1.ppf(q), np.array([0, 0.5, 1, 1.5, 2]))
# Coercion of not ndarray input
_test_input_coercion(rv_1.ppf, q)
# Bad input
q = np.array([-np.inf, -h, np.nan, 1 + h, np.inf])
assert_array_equal(
rv_1.ppf(q), np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
)
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
q = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_almost_equal(
rv_dirac.ppf(q),
np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h]),
decimal=DECIMAL,
)
# Broadcasting
q = np.array([[0, 0.5], [0.0, 1.0]])
assert_array_equal(rv_1.ppf(q), np.array([[0.0, 1.0], [0.0, 2.0]]))
# One value input
_test_one_value_input(rv_1.ppf, 0.25)
_test_one_value_input(rv_1.ppf, -1)
_test_one_value_input(rv_1.ppf, np.nan)
# Should return the smallest x-value in case of zero-density interval(s)
rv_zero_density = Cont([0, 1, 2, 3, 4, 5, 6], [0, 0.5, 0, 0, 0, 0.5, 0])
assert rv_zero_density.ppf(0.5) == 2
def test_isf(self):
rv = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
q_ref = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_equal(rv.sf(rv.isf(q_ref)), q_ref)
def test_rvs(self):
rv_1 = Cont([0, 1, 2], [0, 1, 0])
_test_rvs_method(rv_1)
def test__cdf_spline(self):
rv = Cont([0, 1, 2], [0, 1, 0])
x = [-10, 0, 0.5, 1, 1.5, 2, 10]
assert_array_equal(rv._cdf_spline(x), rv.cdf(x))
def test_integrate_cdf(self):
rv = Cont([0, 1, 2], [0, 1, 0])
assert np.allclose(rv.integrate_cdf(-10, 10), quad(rv.cdf, -10, 10)[0])
def test_convert(self):
import randomvars._boolean as bool
import randomvars._discrete as disc
import randomvars._mixture as mixt
rv = Cont([0, 1, 2], [0, 1, 0])
# By default and supplying `None` should return self
assert rv.convert() is rv
assert rv.convert(None) is rv
# Converting to Bool should result into boolean with probability of
# `False` being 0 (because probability of continuous RV being exactly
# zero is 0).
out_bool = rv.convert("Bool")
assert isinstance(out_bool, bool.Bool)
assert out_bool.prob_true == 1.0
# Converting to own class should return self
out_cont = rv.convert("Cont")
assert out_cont is rv
# Converting to Disc should result into discrete RV with the same `x`
# values as in input's xy-grid
out_disc = rv.convert("Disc")
assert isinstance(out_disc, disc.Disc)
assert_array_equal(out_disc.x, rv.x)
# Converting to Mixt should result into degenerate mixture with only
# continuous component
out_mixt = rv.convert("Mixt")
assert isinstance(out_mixt, mixt.Mixt)
assert out_mixt.cont is rv
assert out_mixt.weight_cont == 1.0
# Any other target class should result into error
with pytest.raises(ValueError, match="one of"):
rv.convert("aaa")
class TestFromRVAccuracy:
"""Accuracy of `Cont.from_rv()`"""
# Output of `from_rv()` should have CDF that differs from original CDF by
# no more than `thres`
@pytest.mark.slow
@pytest.mark.parametrize(
"distr_dict,thres",
[
(DISTRIBUTIONS_COMMON, 1e-4),
(DISTRIBUTIONS_INF_DENSITY, 1e-3),
(DISTRIBUTIONS_HEAVY_TAILS, 5e-3),
],
)
def test_cdf_maxerror(self, distr_dict, thres):
test_passed = {
name: TestFromRVAccuracy.from_rv_cdf_maxerror(distr) <= thres
| |
#if piece between src and dst, return false
return True
elif(dx==0 and dy != 0):
#move from source x to destination x, ignoring itself (hence the src[1] +- 1)
for y in range(src[0]+dyDir,dst[0],dyDir):
if(board[y][src[1]] != ""):
return False #if piece between src and dst, return false
return True
elif(abs(dy)==abs(dx)):
for i in range (1,abs(dx)):
if(board[src[0]+i*dyDir][src[1]+i*dxDir] != ""):
return False
return True
return False
def isOpponentPiece(src: tuple, dst: tuple):
if(board[src[0]][src[1]] in whitepieces):
return (board[dst[0]][dst[1]] in blackpieces)
elif (board[src[0]][src[1]] in blackpieces):
return (board[dst[0]][dst[1]] in whitepieces)
return False
def movePiece(msg: str):
src = parseMove(msg.split(" ")[0])
dst = parseMove(msg.split(" ")[1])
board[dst[0]][dst[1]] = board[src[0]][src[1]]
board[src[0]][src[1]] = ""
def checkPlayerMove(msg: str, castlingDict: dict):
coords = msg.split(" ")
if(len(coords) != 2):
return "Please give 2 coordinates separated by spaces. Ex: a2 a4"
src = parseMove(coords[0])
dst = parseMove(coords[1])
if(src[0]==None):
return "The first coordinate entered is in an invalid format (a-h)(1-8). Ex: A5 or a5"
if(dst[0]==None):
return "The second coordinate entered is in an invalid format (a-h)(1-8). Ex: A5 or a5"
if((currentPlayerId == 2 and board[src[0]][src[1]] in whitepieces) or (currentPlayerId == 1 and board[src[0]][src[1]] in blackpieces)):
return "You can not move your opponent's pieces"
if(validateMove(src,dst,castlingDict)):
return f"Turn {turn}: {currentPlayer} moved from {coords[0].upper()} to {coords[1].upper()}\n{otherPlayer}, Type two coordinates to move"
if(board[src[0]][src[1]] == ""):
return ("You did not select a valid piece")
return "That piece can not move there"
def inCheck(src: tuple, dst: tuple, player=None):
if(player==None): #check player dependinbg on src piece
pass
elif (player == player1): #if player is defined, check if white is in check
pass
elif (player == player2): #if player is defined, check if black is in check
pass
return False #placeholder
### Send Message
boardMessage = None #the message so that it can be deleted and altered when a move is made
# Create Message
em = discord.Embed()
em.title = f'{player2} challenged {player1} to a game of chess'
em.description = f"{self.etDisplay()}"
em.color = 0x444444
em.add_field(name=f"{player1}", value=f"Type two coordinates (piece -> destination), or type 'decline' to refuse\nYou are playing white", inline=False)
em.add_field(name="Example", value="a2 a3", inline=False)
await ctx.send(embed=em)
# Add message to edit later
async for x in ctx.channel.history(limit = 1):
boardMessage = x
for x in range(4):
try:
em = discord.Embed()
em.title = f'{player2} challenged {player1} to a game of chess'
msg = await self.client.wait_for('message',check=lambda message: message.author.name == player1, timeout=30)
if(msg.content=='decline'):
em.description = f"{self.etDisplay()}"
em.add_field(name=f"{player1}", value="Challenge refused", inline=False)
await boardMessage.edit(embed=em)
return
gameMsg = checkPlayerMove(msg.content,castlingDict)
if(gameMsg[0:4]!="Turn"):
player1badInput+=1
em.description = f"{self.etDisplay()}"
em.color = 0xFF0000
em.add_field(name="Error", value=f"{gameMsg}", inline=False)
await boardMessage.edit(embed=em)
continue
await ctx.channel.delete_messages(await self.getMessages(ctx,1))
turn += 1
movePiece(msg.content)
em.color = 0x00FF00
em.description = f"{self.etDisplay()}"
em.add_field(name=f"{otherPlayer}'s turn:", value=f"{gameMsg}", inline=False)
await boardMessage.edit(embed=em)
gameLoop = True
currentPlayer,otherPlayer = otherPlayer,currentPlayer
currentPlayerId = 2 if (currentPlayerId == 1) else 1
player1badInput = 0
prevMove = msg.content
break;
except asyncio.exceptions.TimeoutError:
em.description = f"{self.etDisplay()}"
em.color = 0xFF0000
em.add_field(name=f"{player1}", value="Game timed out", inline=False)
await boardMessage.edit(embed=em)
return
if(player1badInput==3):
em.description = f"{self.etDisplay()}"
em.color = 0xFF0000
em.add_field(name=f"{player1}", value="Did not enter a valid move in 3 tries. Game ended.", inline=False)
await boardMessage.edit(embed=em)
return
#Main game loop
while gameLoop:
try:
em = discord.Embed()
em.title = f'Chess match between {player2} and {player1}'
em.add_field(name="Moves:", value=f"Type the 2 coordinates for the piece you want to move and the spot to move to, or type 'quit' to stop the game.", inline=False)
msg = await self.client.wait_for('message',check=lambda message: message.author.name == currentPlayer, timeout=30)
gameMsg = checkPlayerMove(msg.content,castlingDict)
if(msg.content[0:4]=="quit"):
em.color = 0x770000
em.description = f"{self.etDisplay()}"
em.add_field(name=f"{currentPlayer} Quits", value=f"{otherPlayer} wins!", inline=False)
await boardMessage.edit(embed=em)
return
elif(gameMsg == "That piece can not move there"):
coords = msg.content.split(" ")
if(inCheck(parseMove(coords[0]),parseMove(coords[1]))):
em.color = 0xFF0000
em.description = f"{self.etDisplay()}"
em.add_field(name="Error", value=f"Can not move into check", inline=False)
else:
em.color = 0x770000
em.description = f"{self.etDisplay()}"
em.add_field(name="Invalid Move", value=f"{gameMsg}", inline=False)
await boardMessage.edit(embed=em)
continue
elif(gameMsg[0:4]!="Turn"):
if(currentPlayer == player1):
player1badInput+=1
else:
player2badInput+=1
em.color = 0x770000
em.description = f"{self.etDisplay()}"
em.add_field(name="Invalid Move", value=f"{gameMsg}", inline=False)
await boardMessage.edit(embed=em)
continue
await ctx.channel.delete_messages(await self.getMessages(ctx,1))
turn += 1
movePiece(msg.content)
em.description = f"{self.etDisplay()}"
em.color = 0x00FF00
em.add_field(name=f"{otherPlayer}'s turn:", value=f"{gameMsg}", inline=False)
if(currentPlayerId == 1):
player1badInput = 0
elif(currentPlayerId == 2):
player2badInput = 0
currentPlayer,otherPlayer = otherPlayer,currentPlayer
currentPlayerId = 2 if (currentPlayerId == 1) else 1
prevMove = msg.content
await boardMessage.edit(embed=em)
except asyncio.exceptions.TimeoutError:
em.description = f"{self.etDisplay()}"
em.color = 0x770000
em.add_field(name=f"{currentPlayer} Forfeit", value="Didn't make a move within 30 seconds", inline=False)
await boardMessage.edit(embed=em)
return
if(player1badInput==3):
em.description = f"{self.etDisplay()}"
em.color = 0x770000
em.add_field(name=f"{player1} Forfeit", value="Did not enter a valid move in 3 tries. Game ended.", inline=False)
await boardMessage.edit(embed=em)
return
if(player2badInput==3):
em.description = f"{self.etDisplay()}"
em.color = 0x770000
em.add_field(name=f"{player2} Forfeit", value="Did not enter a valid move in 3 tries. Game ended.", inline=False)
await boardMessage.edit(embed=em)
return
#ToDO
#Finish castling (move the rook)
#check
@commands.command(aliases = ['t'])
@commands.cooldown(3, 30, commands.BucketType.channel)
async def trivia(self, ctx):
data = requests.get(f'https://opentdb.com/api.php?amount=1').json()
results = data['results'][0]
embed = discord.Embed(
title = ":question: Trivia",
description = f"Category: {results['category']} | Difficulty: {results['difficulty'].capitalize()}",
color = ctx.author.color
)
embed2 = embed
def decode(answers):
new = []
for i in answers:
new.append(html.unescape(i))
return new
if results['type'] == 'boolean':
if results['correct_answer'] == "False":
answers = results['incorrect_answers'] + [results['correct_answer']]
else:
answers = [results['correct_answer']] + results['incorrect_answers']
answers = decode(answers)
embed.add_field(name = html.unescape(results['question']), value = f"True or False")
available_commands = ['true', 'false', 't', 'f']
else:
pos = random.randint(0, 3)
if pos == 3:
answers = results['incorrect_answers'] + [results['correct_answer']]
else:
answers = results['incorrect_answers'][0:pos] + [results['correct_answer']] + results['incorrect_answers'][pos:]
answers = decode(answers)
embed.add_field(name = html.unescape(results['question']), value = f"A) {answers[0]}\nB) {answers[1]}\nC) {answers[2]}\nD) {answers[3]}")
available_commands = ['a', 'b', 'c', 'd'] + [x.lower() for x in answers]
question = await ctx.send(embed = embed)
correct_answer = html.unescape(results['correct_answer'])
def check(m):
return m.channel == ctx.channel and m.content.lower() in available_commands and not m.author.bot
try:
msg = await self.client.wait_for('message', timeout = 30.0, check = check)
except asyncio.TimeoutError:
return
correct = False
if results['type'] == 'boolean':
if msg.content.lower() == correct_answer.lower() or msg.content.lower() == correct_answer.lower()[0]:
correct = True
answer_string = f"The answer was **{correct_answer}**"
else:
letters = ['a', 'b', 'c', 'd']
if msg.content.lower() == correct_answer.lower() or msg.content.lower() == letters[pos]:
correct = True
answer_string = f"The answer was **{letters[pos].upper()}) {correct_answer}**"
if correct:
name = ":white_check_mark: Correct"
else:
name = ":x: Incorrect"
embed2.clear_fields()
embed2.add_field(name = name, value = answer_string)
await question.edit(embed = embed2)
@trivia.error
async def trivia_error(self, ctx, error):
await ctx.send(error)
#<EMAIL>.max_concurrency(1, commands.BucketType.channel, wait = False)
@commands.command(aliases = ['hang', 'hm'])
async def hangman(self, ctx):
with open('txt/words.txt') as f:
word = random.choice(f.readlines()).rstrip("\n")
hang = [
"**``` ____",
" | |",
" | ",
" | ",
" | ",
" | ",
"___|__________```**"
]
empty = '\n'.join(hang)
man = [['@', 2], [' |', 3], ['\\', 3, 7], ['/', 3], ['|', 4], ['/', 5], [' \\', 5]]
string = [':blue_square:' for i in word]
embed = discord.Embed(
title = "Hangman",
color = ctx.author.color,
description = f"Type a letter in chat to guess.\n\n**{' '.join(string)}**\n\n{empty}",
)
incorrect = 0
original = await ctx.send(embed = embed)
guessed = []
incorrect_guessed = []
already_guessed = None
def check(m):
return m.channel == ctx.channel and m.content.isalpha() and len(m.content) == 1 and m.author == ctx.author
while incorrect < len(man) and ':blue_square:' in string:
try:
msg = await self.client.wait_for('message', timeout = 120.0, check = check)
letter = msg.content.lower()
except asyncio.TimeoutError:
await ctx.send("Game timed out.")
return
if already_guessed:
await already_guessed.delete()
already_guessed = None
if letter in guessed:
already_guessed = await ctx.send("You have already guessed that letter.")
await msg.delete()
continue
guessed += letter
if letter not in word:
incorrect_guessed += letter
if embed.fields:
embed.set_field_at(0, name = "Incorrect letters:", value = ', '.join(incorrect_guessed))
else:
embed.add_field(name = "Incorrect letters:", value = ', '.join(incorrect_guessed))
part = man[incorrect]
if len(part) > 2:
hang[part[1]] = hang[part[1]][0:part[2]] + part[0] + hang[part[1]][part[2] + 1:]
else:
hang[part[1]] += part[0]
incorrect += 1
else:
for j in range(len(word)):
if letter == word[j]:
string[j] = word[j]
new = '\n'.join(hang)
if ':blue_square:' not in string:
embed.description = f"You guessed the word!\n\n**{' '.join(string)}**\n\n{new}"
elif incorrect == len(man):
embed.description = f"You've been hanged! The word was \n\n**{' '.join([k for k in word])}**\n\n{new}"
else:
embed.description = f"Type a letter in chat to guess.\n\n**{' '.join(string)}**\n\n{new}"
await msg.delete()
await original.edit(embed = embed)
@hangman.error
async def | |
<reponame>medialab/bibliotools3.0
#! /usr/bin/env python
"""
Author : <NAME> (http://www.sebastian-grauwin.com/)
Copyright (C) 2012
All rights reserved.
BSD license.
"""
import os
import sys
import glob
import numpy
import argparse
## ##################################################
## ##################################################
## ##################################################
class Wosline:
def __init__(self):
self.PT = "" ## Publication Type (J=Journal; B=Book; S=Series)
self.AU = "" ## Authors
self.BA = "" ## Book Authors
self.BE = "" ## Book Editor
self.GP = "" ## Book Group Authors
self.AF = "" ## Author Full Name
self.CA = "" ## Group Authors
self.TI = "" ## Document Title
self.SO = "" ## Publication Name
self.SE = "" ## Book Series Title
self.LA = "" ## Language
self.DT = "" ## Document Type
self.CT = "" ## Conference Title
self.CY = "" ## Conference Date
self.CL = "" ## Conference Location
self.SP = "" ## Conference Sponsors
self.FO = "" ## Funding Organization
self.DE = "" ## Author Keywords
self.ID = "" ## Keywords Plus
self.AB = "" ## Abstract
self.C1 = "" ## Author Address
self.RP = "" ## Reprint Address
self.EM = "" ## E-mail Address
self.FU = "" ## Funding Agency and Grant Number
self.FX = "" ## Funding Text
self.CR = "" ## Cited References
self.NR = "" ## Cited Reference Count
self.TC = "" ## Times Cited
self.Z9 = "" ##
self.PU = "" ## Publisher
self.PI = "" ## Publisher City
self.PA = "" ## Publisher Address
self.SN = "" ## ISSN
self.BN = "" ## ISBN
self.J9 = "" ## 29-Character Source Abbreviation
self.JI = "" ## ISO Source Abbreviation
self.PD = "" ## Publication Date
self.PY = 0 ## Year Published
self.VL = "" ## Volume
self.IS = "" ## Issue
self.PN = "" ## Part Number
self.SU = "" ## Supplement
self.SI = "" ## Special Issue
self.BP = "" ## Beginning Page
self.EP = "" ## Ending Page
self.AR = "" ## Article Number
self.DI = "" ## Digital Object Identifier (DOI)
self.D2 = "" ##
self.PG = "" ## Page Count
self.P2 = "" ##
self.WC = "" ## Web of Science Category
self.SC = "" ## Subject Category
self.GA = "" ## Document Delivery Number
self.UT = "" ## Unique Article Identifier
def parse_line(self, line, defCols, numCols):
"""
parse a line of the WoS txt output file
"""
s = line.split("\t")
if len(s)==numCols:
if(s[defCols['PT']]=='J'): self.PT = 'Journal' ## Publication Type (J=Journal; B=Book; S=Series)
if(s[defCols['PT']]=='B'): self.PT = 'Book'
if(s[defCols['PT']]=='S'): self.PT = 'Series'
self.AU = s[defCols['AU']] ## Authors
self.TI = s[defCols['TI']] ## Document Title
self.SO = s[defCols['SO']] ## Publication Name
self.DT = s[defCols['DT']] ## Document Type
self.DE = s[defCols['DE']] ## Author Keywords
self.ID = s[defCols['ID']] ## Keywords Plus
self.C1 = s[defCols['C1']] ## Author Address
self.CR = s[defCols['CR']] ## Cited References
self.TC = s[defCols['TC']] ## Times Cited
self.J9 = s[defCols['J9']] ## 29-Character Source Abbreviation
self.PD = s[defCols['PD']] ## Publication Date
if s[defCols['PY']].isdigit(): self.PY = int(s[defCols['PY']])
else: self.PY = 0 ## Year Published
self.VL = s[defCols['VL']] ## Volume
self.IS = s[defCols['IS']] ## Issue
self.BP = s[defCols['BP']] ## Beginning Page
self.WC = s[defCols['WC']] ## Web of Science Category
self.UT = s[defCols['UT']] ## Unique Article Identifier
else:
print "ARG %s != %s"%(len(s),numCols)
## ##################################################
def defColumns(line):
# initialize
Cols = ['PT', 'AU', 'TI', 'SO', 'DT', 'DE', 'ID', 'C1', 'CR', 'TC', 'J9', 'PD', 'PY', 'VL', 'IS', 'BP', 'WC', 'UT'];
defCols = dict();
# match columns number in "line"
foo = line.replace('\xef\xbb\xbf','').split('\t')
for i in range(len(foo)):
if foo[i] in Cols:
defCols[foo[i]] = i
numCols = len(foo)
return (defCols, numCols)
## ##################################################
## ##################################################
## ##################################################
## ##################################################
class ArticleList:
def __init__(self):
self.articles = [] # articles list
def read_file(self,filename):
articles_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
aux = 0
for line in fd.readlines():
line = line.strip("\n") # removes \n
if (line != ""):
if (aux == 1): # do not take 1st line into account!
wline = Wosline()
wline.parse_line(line, defCols, numCols)
articles_list.append( wline )
if (aux == 0): # define columns thanks to 1st line
(defCols, numCols) = defColumns( line )
aux = 1
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.articles = articles_list
## ##################################################
## ##################################################
## ##################################################
class Article:
def __init__(self):
self.id = 0
self.firstAU = ""
self.year = 0
self.journal = ""
self.volume = ""
self.page = ""
self.doi = ""
self.pubtype = ""
self.doctype = ""
self.times_cited = ""
self.title = ""
self.uniqueID = ""
self.articles = [] # liste d'articles
def read_file(self,filename):
"""
Lecture des articles
"""
articles_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
aux = 0
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
aline = Article()
aline.id = int(s[0])
if(len(s)>1): aline.firstAU = s[1]
if(len(s)>2): aline.year = int(s[2])
if(len(s)>3): aline.journal = s[3]
if(len(s)>4): aline.volume = s[4]
if(len(s)>5): aline.page = s[5]
if(len(s)>6): aline.doi = s[6]
if(len(s)>7): aline.pubtype = s[7]
if(len(s)>8): aline.doctype = s[8]
if(len(s)>9): aline.times_cited = s[9]
if(len(s)>10): aline.title = s[10]
if(len(s)>11): aline.uniqueID = s[11]
articles_list.append( aline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.articles = articles_list
## ##################################################
## ##################################################
## ##################################################
class Author:
def __init__(self):
self.id = 0
self.rank = 0
self.author = ""
self.authors = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
alines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
lncnt=0
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
aline = Author()
aline.id = int(s[0])
aline.rank = int(s[1])
if len(s)<3:
print "missing author in : %s %s"%(lncnt,s)
aline.author = "name missing"
else:
aline.author = s[2]
#print int(s[0]), int(s[1]),s[2], 'author'
alines_list.append( aline )
lncnt+=1
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.authors = alines_list
## ##################################################
## ##################################################
## ##################################################
class Country:
def __init__(self):
self.id = 0
self.rank = 0
self.country = ""
self.countries = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
clines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
cline = Country()
cline.id = int(s[0])
cline.rank = int(s[1])
cline.country = s[2].lower().capitalize()
clines_list.append( cline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.countries = clines_list
## ##################################################
## ##################################################
## ##################################################
class Institution:
def __init__(self):
self.id = 0
self.rank = 0
self.institution = ""
self.institutions = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
ilines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
iline = Institution()
if len(s)==3:
iline.id = int(s[0])
iline.rank = int(s[1])
iline.institution = s[2].upper()
ilines_list.append( iline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.institutions = ilines_list
## ##################################################
## ##################################################
## ##################################################
class Keyword:
def __init__(self):
self.id = 0
self.ktype = ""
self.keyword = ""
self.keywords = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
klines_list = []
try:
# open
if filename != 'st.lower().capitalize()din':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
kline = Keyword()
kline.id = int(s[0])
kline.ktype = s[1]
kline.keyword = s[2].upper()
klines_list.append( kline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
| |
<gh_stars>0
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Qiskit Aer qasm simulator backend.
"""
import copy
import logging
from warnings import warn
from qiskit.providers.options import Options
from qiskit.providers.models import QasmBackendConfiguration
from ..version import __version__
from ..aererror import AerError
from .aerbackend import AerBackend
from .backend_utils import (cpp_execute, available_methods,
MAX_QUBITS_STATEVECTOR,
LEGACY_METHOD_MAP,
map_legacy_method_options)
# pylint: disable=import-error, no-name-in-module
from .controller_wrappers import aer_controller_execute
logger = logging.getLogger(__name__)
class QasmSimulator(AerBackend):
"""
Noisy quantum circuit simulator backend.
**Configurable Options**
The `QasmSimulator` supports multiple simulation methods and
configurable options for each simulation method. These may be set using the
appropriate kwargs during initialization. They can also be set of updated
using the :meth:`set_options` method.
Run-time options may also be specified as kwargs using the :meth:`run` method.
These will not be stored in the backend and will only apply to that execution.
They will also override any previously set options.
For example, to configure a density matrix simulator with a custom noise
model to use for every execution
.. code-block:: python
noise_model = NoiseModel.from_backend(backend)
backend = QasmSimulator(method='density_matrix',
noise_model=noise_model)
**Simulating an IBMQ Backend**
The simulator can be automatically configured to mimic an IBMQ backend using
the :meth:`from_backend` method. This will configure the simulator to use the
basic device :class:`NoiseModel` for that backend, and the same basis gates
and coupling map.
.. code-block:: python
backend = QasmSimulator.from_backend(backend)
**Simulation Method Option**
The simulation method is set using the ``method`` kwarg.
Supported simulation methods are
* ``"statevector"``: A dense statevector simulation that can sample
measurement outcomes from *ideal* circuits with all measurements at
end of the circuit. For noisy simulations each shot samples a
randomly sampled noisy circuit from the noise model.
``"statevector_cpu"`` is an alias of ``"statevector"``.
* ``"statevector_gpu"``: A dense statevector simulation that provides
the same functionalities with ``"statevector"``. GPU performs the computation
to calculate probability amplitudes as CPU does. If no GPU is available,
a runtime error is raised.
* ``"density_matrix"``: A dense density matrix simulation that may
sample measurement outcomes from *noisy* circuits with all
measurements at end of the circuit. It can only simulate half the
number of qubits as the statevector method.
* ``"density_matrix_gpu"``: A dense density matrix simulation that provides
the same functionalities with ``"density_matrix"``. GPU performs the computation
to calculate probability amplitudes as CPU does. If no GPU is available,
a runtime error is raised.
* ``"stabilizer"``: An efficient Clifford stabilizer state simulator
that can simulate noisy Clifford circuits if all errors in the noise model are also
Clifford errors.
* ``"extended_stabilizer"``: An approximate simulated based on a
ranked-stabilizer decomposition that decomposes circuits into stabilizer
state terms. The number of terms grows with the number of
non-Clifford gates.
* ``"matrix_product_state"``: A tensor-network statevector simulator that
uses a Matrix Product State (MPS) representation for the state.
* ``"automatic"``: The default behavior where the method is chosen
automatically for each circuit based on the circuit instructions,
number of qubits, and noise model.
**Additional Backend Options**
The following simulator specific backend options are supported
* ``method`` (str): Set the simulation method (Default: ``"automatic"``).
Use :meth:`available_methods` to return a list of all availabe methods.
* ``device`` (str): Set the simulation device (Default: ``"CPU"``).
Use :meth:`available_devices` to return a list of devices supported
on the current system.
* ``precision`` (str): Set the floating point precision for
certain simulation methods to either ``"single"`` or ``"double"``
precision (default: ``"double"``).
* ``executor`` (futures.Executor): Set a custom executor for
asynchronous running of simulation jobs (Default: None).
* ``max_job_size`` (int or None): If the number of run circuits
exceeds this value simulation will be run as a set of of sub-jobs
on the executor. If ``None`` simulation of all circuits are submitted
to the executor as a single job (Default: None).
* ``enable_truncation`` (bool): If set to True this removes unnecessary
qubits which do not affect the simulation outcome from the simulated
circuits (Default: True).
* ``zero_threshold`` (double): Sets the threshold for truncating
small values to zero in the result data (Default: 1e-10).
* ``validation_threshold`` (double): Sets the threshold for checking
if initial states are valid (Default: 1e-8).
* ``max_parallel_threads`` (int): Sets the maximum number of CPU
cores used by OpenMP for parallelization. If set to 0 the
maximum will be set to the number of CPU cores (Default: 0).
* ``max_parallel_experiments`` (int): Sets the maximum number of
qobj experiments that may be executed in parallel up to the
max_parallel_threads value. If set to 1 parallel circuit
execution will be disabled. If set to 0 the maximum will be
automatically set to max_parallel_threads (Default: 1).
* ``max_parallel_shots`` (int): Sets the maximum number of
shots that may be executed in parallel during each experiment
execution, up to the max_parallel_threads value. If set to 1
parallel shot execution will be disabled. If set to 0 the
maximum will be automatically set to max_parallel_threads.
Note that this cannot be enabled at the same time as parallel
experiment execution (Default: 0).
* ``max_memory_mb`` (int): Sets the maximum size of memory
to store a state vector. If a state vector needs more, an error
is thrown. In general, a state vector of n-qubits uses 2^n complex
values (16 Bytes). If set to 0, the maximum will be automatically
set to the system memory size (Default: 0).
These backend options only apply when using the ``"statevector"``
simulation method:
* ``statevector_parallel_threshold`` (int): Sets the threshold that
the number of qubits must be greater than to enable OpenMP
parallelization for matrix multiplication during execution of
an experiment. If parallel circuit or shot execution is enabled
this will only use unallocated CPU cores up to
max_parallel_threads. Note that setting this too low can reduce
performance (Default: 14).
* ``statevector_sample_measure_opt`` (int): Sets the threshold that
the number of qubits must be greater than to enable a large
qubit optimized implementation of measurement sampling. Note
that setting this two low can reduce performance (Default: 10)
These backend options only apply when using the ``"stabilizer"``
simulation method:
* ``stabilizer_max_snapshot_probabilities`` (int): set the maximum
qubit number for the
`~qiskit.providers.aer.extensions.SnapshotProbabilities`
instruction (Default: 32).
These backend options only apply when using the ``"extended_stabilizer"``
simulation method:
* ``extended_stabilizer_sampling_method`` (string): Choose how to simulate
measurements on qubits. The performance of the simulator depends
significantly on this choice. In the following, let n be the number of
qubits in the circuit, m the number of qubits measured, and S be the
number of shots (Default: resampled_metropolis).
- ``"metropolis"``: Use a Monte-Carlo method to sample many output
strings from the simulator at once. To be accurate, this method
requires that all the possible output strings have a non-zero
probability. It will give inaccurate results on cases where
the circuit has many zero-probability outcomes.
This method has an overall runtime that scales as n^{2} + (S-1)n.
- ``"resampled_metropolis"``: A variant of the metropolis method,
where the Monte-Carlo method is reinitialised for every shot. This
gives better results for circuits where some outcomes have zero
probability, but will still fail if the output distribution
is sparse. The overall runtime scales as Sn^{2}.
- ``"norm_estimation"``: An alternative sampling method using
random state inner products to estimate outcome probabilites. This
method requires twice as much memory, and significantly longer
runtimes, but gives accurate results on circuits with sparse
output distributions. The overall runtime scales as Sn^{3}m^{3}.
* ``extended_stabilizer_metropolis_mixing_time`` (int): Set how long the
monte-carlo method runs before performing measurements. If the
output distribution is strongly peaked, this can be decreased
alongside setting extended_stabilizer_disable_measurement_opt
to True (Default: 5000).
* ``"extended_stabilizer_approximation_error"`` (double): Set the error
in the approximation for | |
<reponame>ledatelescope/bifrost_tcc_wrapper
#!/usr/bin/env python
# Copyright (c) 2019-2021, The Bifrost Authors. All rights reserved.
# Copyright (c) 2019-2021, The University of New Mexico. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import re
import sys
import glob
import warnings
import argparse
from textwrap import fill as tw_fill
# Class vs function diviner
_CLASS_RE = re.compile(r'((?P<class>.+)_(?P<method>(create|init|execute|destroy))|(?P<function>.+))')
# Python wrapper template
_WRAPPER_TEMPLATE = r"""
from bifrost.libbifrost import _check, _get, BifrostObject
from bifrost.ndarray import asarray
import {libname}_generated as _gen
"""
def _patch_bifrost_objects(filename, includes):
"""
Given a wrapper generated by ctypesgen, clean up the file to make sure
that it uses the "official" Bifrost objects.
"""
# Deal with the includes by converting a string to a
# list.
if isinstance(includes, str):
includes = includes.split(None)
# Load it in a process it
wrapper = []
with open(filename, 'r') as fh:
inside_ignore = False
first_post_ignore = True
for line in fh:
# "# No modules" is the last valid line before we hit the internal Bifrost
# definitions that we need to cut out
if line.startswith("# No modules") or line.startswith("# End modules"):
inside_ignore = line
# If we are inside the internal Bifrost definitions, look for a sign that
# we are done by finding one of our include files mentioned in a comment
if inside_ignore:
for inclue in includes:
if line[0] == '#' and line.find(inclue) != -1:
if first_post_ignore:
wrapper.append(inside_ignore)
wrapper.append("\nfrom bifrost.libbifrost_generated import *\n\n")
first_post_ignore = True
inside_ignore = False
break
if not inside_ignore:
wrapper.append(line)
# A warning, if necessary
if not first_post_ignore:
warnings.warn(RuntimeWarning, "File may not have been wrapped correctly")
# Write it back out
with open(filename, 'w') as fh:
fh.write("".join(wrapper))
def _normalize_function_name(function):
"""
Given a C++ function name, convert it to a nicely formatted Python
function name. For example "SetPositions" becomes "set_positions".
"""
name = function[0].lower()
for i,l in enumerate(function[1:]):
if l.isupper():
name += '_'
name += l.lower()
return name
def _reverse_normalize_function_name(function):
"""
Given a Python funciton name, convert it into a C++ function name. For
example "set_positions" becomes "SetPositions".
"""
name = function[0].upper()
next_upper = False
for l in function[1:]:
if l == '_':
next_upper = True
continue
name += l.upper() if next_upper else l
next_upper = False
return name
def _split_and_clean_args(args):
"""
Given a string of arguments from a ctypesgen-generated wrapper, parse
them into a list of something we can use later.
"""
args = args.replace('[', '').replace(']', '')
args = args.split(',')
args = [arg.strip().rstrip() for arg in args]
for i in range(len(args)):
# Deal with pointers
if args[i].startswith('POINTER'):
if args[i].find('BFarray') != -1:
args[i] = 'BFarray'
elif args[i] == 'POINTER(None)':
args[i] = 'ptr_generic'
elif args[i] == 'POINTER(POINTER(None))':
args[i] = 'ptr_ptr_generic'
else:
ctype = args[i].split('(', 1)[1]
ctype = ctype.replace(')', '')
args[i] = "ptr_%s" % ctype
return args
def _extract_calls(filename, libname):
"""
Given a wrapper generated by ctypesgen, extract all of the function call
information and return it as a dictionary.
"""
# Load the file in
wrapper = []
with open(filename, 'r') as fh:
for line in fh:
wrapper.append(line)
# Pass 1: Find the functions and defintion locations
functions = {}
locations = {}
for i,line in enumerate(wrapper):
if line.find('if not hasattr(_lib') != -1 or line.find('if hasattr(_lib') != -1:
function = line.split(None)[-1][1:]
function = function.replace("'):", '')
py_name = function.split(_reverse_normalize_function_name(libname), 1)[-1]
if py_name == '':
## Catch for when the library name is the same as the function
py_name = function
py_name = _normalize_function_name(py_name)
functions[py_name] = function
if wrapper[i-1][0] == '#' and wrapper[i-1].find(':') != -1:
locations[py_name] = wrapper[i-1].split(None, 1)[1]
elif wrapper[i-2][0] == '#' and wrapper[i-2].find(':') != -1:
locations[py_name] = wrapper[i-2].split(None, 1)[1]
# Pass 2: Find the argument and return types
arguments, results = {}, {}
for line in wrapper:
for py_name in functions:
c_name = functions[py_name]
if line.find("%s.argtypes" % c_name) != -1:
value = line.split('=', 1)[-1]
arguments[py_name] = _split_and_clean_args(value)
elif line.find("%s.restype" % c_name) != -1:
value = line.split('=', 1)[-1]
results[py_name] = _split_and_clean_args(value)
# Pass 3: Find the argument names
names = {}
for py_name in locations:
filename, line_no = locations[py_name].split(':',1)
line_no = int(line_no, 10)
definition = ''
with open(filename, 'r') as fh:
i = 0
for line in fh:
i += 1
if i < line_no:
continue
definition += line.strip().rstrip()
if line.find(')') != -1:
break
definition = definition.split('(', 1)[1]
definition = definition.split(')', 1)[0]
args = definition.split(',')
args = [arg.rsplit(None, 1)[1].replace('*', '') for arg in args]
names[py_name] = args
# Combine and done
calls = {}
for py_name in functions:
calls[py_name] = {}
calls[py_name]['c_name'] = functions[py_name]
calls[py_name]['arguments'] = arguments[py_name]
try:
calls[py_name]['names'] = names[py_name]
except KeyError:
## Fallback in case we didn't find anything useful
calls[py_name]['names'] = ['arg%i' for i in range(len(arguments[py_name]))]
calls[py_name]['results'] = results[py_name]
return calls
def _class_or_functions(calls):
"""
Given a dictionary of call signatures extracted by _extract_calls(),
determine what kind of wrapper to build. Options are 'class' for
something that looks like it shoud be a class or 'functions' for a
collection of functions.
"""
wrap_type = "functions"
calls = [_CLASS_RE.match(call).group('method') for call in calls]
if 'create' in calls \
and 'destroy' in calls \
and 'init' in calls \
and 'execute'in calls:
wrap_type = "class"
return wrap_type
def _check_get_or_return(py_name, call):
"""
Given a call signature extracted by _extract_calls(), figure out how
to deal with the outcome of the function call. Valid options are
'return' to just return it, 'check' to just call the Bifrost _check()
function, or 'get' to call the Bifrost _get() function.
"""
args = call['arguments']
ress = call['results']
if ress[0] != 'BFstatus':
return 'return'
else:
if args[-1].startswith('ptr') and not py_name.startswith('set_'):
return 'get'
else:
return 'check'
def _convert_call_args(call, for_method=True):
"""
Convert a set of arguments associated with a call signature extracted by
_extract_calls() into two strings: one for Python calls and one for
wrapped C calls.
"""
args = call['arguments']
names = call['names']
py_args, c_args = [], []
for arg,name in zip(args, names):
py_args.append("%s_%s" % (name, arg))
c_args.append(py_args[-1])
# Make sure we wrap arguments of type BFarray with
# "asarray().as_BFarray()".
if c_args[-1].find('_BFarray') != -1:
c_args[-1] = "asarray(%s).as_BFarray()" % c_args[-1]
if for_method:
py_args[0] = 'self'
c_args[0] = 'self.obj'
py_args = ', '.join(py_args)
c_args = ', '.join(c_args)
return py_args, c_args
def _render_call(py_name, call, for_method=False, indent=0):
"""
Given a Python function name and its associated call signature, return
a string corresponding to full Python function definition.
"""
c_name = call['c_name']
py_args, c_args = _convert_call_args(call, for_method=for_method)
call_base = "_gen.{name}({args})".format(name=c_name, args=c_args)
return_type = _check_get_or_return(py_name, call)
tw_padding = indent + 4 + len(c_name) + 5 + 1
if return_type == 'check':
tw_padding += 7
elif return_type == 'get':
tw_padding += 5
else:
tw_padding += 7
call_base = tw_fill(call_base,
subsequent_indent=' '*tw_padding,
break_long_words=False,
| |
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
"""
packit started as source-git and we're making a source-git module after such a long time, weird
"""
import logging
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
from typing import Optional, Tuple
from git import GitCommandError
from packit.config.common_package_config import CommonPackageConfig
from packit.config.config import Config
from packit.config.package_config import PackageConfig
from packit.constants import (
RPM_MACROS_FOR_PREP,
FEDORA_DOMAIN,
CENTOS_DOMAIN,
CENTOS_STREAM_GITLAB,
)
from packit.distgit import DistGit
from packit.exceptions import PackitException
from packit.local_project import LocalProject
from packit.utils.commands import run_command
from packit.utils.repo import (
clone_centos_8_package,
clone_centos_9_package,
get_default_branch,
)
logger = logging.getLogger(__name__)
class CentOS8DistGit(DistGit):
"""
CentOS dist-git layout implementation for 8: CentOS Linux 8 and CentOS Stream 8
which lives in git.centos.org
"""
# spec files are stored in this dir in dist-git
spec_dir_name = "SPECS"
# sources are stored in this dir in dist-git
# this applies to CentOS Stream 8 and CentOS Linux 7 and 8
source_dir_name = "SOURCES"
@classmethod
def clone(
cls,
config: Config,
package_config: CommonPackageConfig,
path: Path,
branch: str = None,
) -> "CentOS8DistGit":
clone_centos_8_package(
package_config.downstream_package_name, path, branch=branch
)
lp = LocalProject(working_dir=path)
return cls(config, package_config, local_project=lp)
class CentOS9DistGit(DistGit):
"""
CentOS dist-git layout implementation for CentOS Stream 9
which lives in gitlab.com/redhat/centos-stream/rpms
"""
# spec files are stored in this dir in dist-git
spec_dir_name = ""
# sources are stored in this dir in dist-git
source_dir_name = ""
@classmethod
def clone(
cls,
config: Config,
package_config: CommonPackageConfig,
path: Path,
branch: str = None,
) -> "CentOS9DistGit":
clone_centos_9_package(
package_config.downstream_package_name, path, branch=branch
)
lp = LocalProject(working_dir=path)
return cls(config, package_config, local_project=lp)
def get_distgit_kls_from_repo(
repo_path: Path, config: Config
) -> Tuple[DistGit, Optional[str], Optional[str]]:
"""
:return: DistGit instance, centos package name, fedora package name
"""
path = Path(repo_path)
pc = PackageConfig(downstream_package_name=path.name)
lp = LocalProject(working_dir=path)
if FEDORA_DOMAIN in lp.git_url:
return DistGit(config, pc, local_project=lp), None, path.name
elif CENTOS_DOMAIN in lp.git_url:
return CentOS8DistGit(config, pc, local_project=lp), path.name, None
elif CENTOS_STREAM_GITLAB in lp.git_url:
return CentOS9DistGit(config, pc, local_project=lp), path.name, None
raise PackitException(
f"Dist-git URL {lp.git_url} not recognized, we expected one of: "
f"{FEDORA_DOMAIN}, {CENTOS_DOMAIN} or {CENTOS_STREAM_GITLAB}"
)
def get_tarball_comment(tarball_path: str) -> Optional[str]:
"""Return the comment header for the tarball
If written by git-archive, this contains the Git commit ID.
Return None if the file is invalid or does not contain a comment.
shamelessly stolen:
https://pagure.io/glibc-maintainer-scripts/blob/master/f/glibc-sync-upstream.py#_75
"""
try:
with tarfile.open(tarball_path) as tar:
return tar.pax_headers["comment"]
except Exception as ex:
logger.debug(f"Could not get 'comment' header from the tarball: {ex}")
return None
class SourceGitGenerator:
"""
generate a source-git repo from provided upstream repo
and a corresponding package in Fedora/CentOS ecosystem
"""
def __init__(
self,
local_project: LocalProject,
config: Config,
upstream_url: Optional[str] = None,
upstream_ref: Optional[str] = None,
dist_git_path: Optional[Path] = None,
dist_git_branch: Optional[str] = None,
fedora_package: Optional[str] = None,
centos_package: Optional[str] = None,
tmpdir: Optional[Path] = None,
):
"""
:param local_project: this source-git repo
:param config: global configuration
:param upstream_url: upstream repo URL we want to use as a base
:param upstream_ref: upstream git-ref to use as a base
:param dist_git_path: path to a local clone of a dist-git repo
:param dist_git_branch: branch in dist-git to use
:param fedora_package: pick up specfile and downstream sources from this fedora package
:param centos_package: pick up specfile and downstream sources from this centos package
:param tmpdir: path to a directory where temporary repos (upstream,
dist-git) will be cloned
"""
self.local_project = local_project
self.config = config
self.tmpdir = tmpdir or Path(tempfile.mkdtemp(prefix="packit-sg-"))
self._dist_git: Optional[DistGit] = None
self._primary_archive: Optional[Path] = None
self._upstream_ref: Optional[str] = upstream_ref
self.dist_git_branch = dist_git_branch
logger.info(
f"The source-git repo is going to be created in {local_project.working_dir}."
)
if dist_git_path:
(
self._dist_git,
self.centos_package,
self.fedora_package,
) = get_distgit_kls_from_repo(dist_git_path, config)
self.dist_git_path = dist_git_path
self.package_config = self.dist_git.package_config
else:
self.centos_package = centos_package
self.fedora_package = fedora_package
if centos_package:
self.package_config = PackageConfig(
downstream_package_name=centos_package
)
elif fedora_package:
self.fedora_package = (
self.fedora_package or local_project.working_dir.name
)
self.package_config = PackageConfig(
downstream_package_name=fedora_package
)
else:
raise PackitException(
"Please tell us the name of the package in the downstream."
)
self.dist_git_path = self.tmpdir.joinpath(
self.package_config.downstream_package_name
)
if upstream_url:
if Path(upstream_url).is_dir():
self.upstream_repo_path: Path = Path(upstream_url)
self.upstream_lp: LocalProject = LocalProject(
working_dir=self.upstream_repo_path
)
else:
self.upstream_repo_path = self.tmpdir.joinpath(
f"{self.package_config.downstream_package_name}-upstream"
)
self.upstream_lp = LocalProject(
git_url=upstream_url, working_dir=self.upstream_repo_path
)
else:
# $CWD is the upstream repo and we just need to pick
# downstream stuff
self.upstream_repo_path = self.local_project.working_dir
self.upstream_lp = self.local_project
@property
def primary_archive(self) -> Path:
if not self._primary_archive:
self._primary_archive = self.dist_git.download_upstream_archive()
return self._primary_archive
@property
def dist_git(self) -> DistGit:
if not self._dist_git:
self._dist_git = self._get_dist_git()
# we need to parse the spec twice
# https://github.com/rebase-helper/rebase-helper/issues/848
self._dist_git.download_remote_sources()
self._dist_git.specfile.reload()
return self._dist_git
@property
def upstream_ref(self) -> str:
if self._upstream_ref is None:
self._upstream_ref = get_tarball_comment(str(self.primary_archive))
if self._upstream_ref:
logger.info(
"upstream base ref was not set, "
f"discovered it from the archive: {self._upstream_ref}"
)
else:
# fallback to HEAD
try:
self._upstream_ref = self.local_project.commit_hexsha
except ValueError as ex:
raise PackitException(
"Current branch seems to be empty - we cannot get the hash of "
"the top commit. We need to set upstream_ref in packit.yaml to "
"distinct between upstream and downstream changes. "
"Please set --upstream-ref or pull the upstream git history yourself. "
f"Error: {ex}"
)
logger.info(
"upstream base ref was not set, "
f"falling back to the HEAD commit: {self._upstream_ref}"
)
return self._upstream_ref
@property
def specfile_path(self) -> Path:
return self.dist_git.get_root_downstream_dir_for_source_git(
self.local_project.working_dir
).joinpath(self.dist_git.absolute_specfile_path.name)
def _get_dist_git(
self,
) -> DistGit:
"""
For given package names, clone the dist-git repo in the given directory
and return the DistGit class
:return: DistGit instance
"""
if self.centos_package:
self.dist_git_branch = self.dist_git_branch or "c9s"
# let's be sure to cover anything 9 related,
# even though "c9" will probably never be a thing
if "c9" in self.dist_git_branch:
return CentOS9DistGit.clone(
config=self.config,
package_config=self.package_config,
path=self.dist_git_path,
branch=self.dist_git_branch,
)
return CentOS8DistGit.clone(
config=self.config,
package_config=self.package_config,
path=self.dist_git_path,
branch=self.dist_git_branch,
)
else:
# If self.dist_git_branch is None we will checkout/store repo's default branch
dg = DistGit.clone(
config=self.config,
package_config=self.package_config,
path=self.dist_git_path,
branch=self.dist_git_branch,
)
self.dist_git_branch = (
self.dist_git_branch or dg.local_project.git_project.default_branch
)
return dg
def _pull_upstream_ref(self):
"""
Pull the base ref from upstream to our source-git repo
"""
# fetch operation is pretty intense
# if upstream_ref is a commit, we need to fetch everything
# if it's a tag or branch, we can only fetch that ref
self.local_project.fetch(
str(self.upstream_lp.working_dir), "+refs/heads/*:refs/remotes/origin/*"
)
self.local_project.fetch(
str(self.upstream_lp.working_dir),
"+refs/remotes/origin/*:refs/remotes/origin/*",
)
try:
next(self.local_project.get_commits())
except GitCommandError as ex:
logger.debug(f"Can't get next commit: {ex}")
# the repo is empty, rebase would fail
self.local_project.reset(self.upstream_ref)
else:
self.local_project.rebase(self.upstream_ref)
def _run_prep(self):
"""
run `rpmbuild -bp` in the dist-git repo to get a git-repo
in the %prep phase so we can pick the commits in the source-git repo
"""
_packitpatch_path = shutil.which("_packitpatch")
if not _packitpatch_path:
raise PackitException(
"We are trying to unpack a dist-git archive and lay patches on top "
'by running `rpmbuild -bp` but we cannot find "_packitpatch" command on PATH: '
"please install packit as an RPM."
)
logger.info(
f"expanding %prep section in {self.dist_git.local_project.working_dir}"
)
rpmbuild_args = [
"rpmbuild",
"--nodeps",
"--define",
f"_topdir {str(self.dist_git.local_project.working_dir)}",
"-bp",
"--define",
f"_specdir {str(self.dist_git.absolute_specfile_dir)}",
"--define",
f"_sourcedir {str(self.dist_git.absolute_source_dir)}",
]
rpmbuild_args += RPM_MACROS_FOR_PREP
if logger.level <= logging.DEBUG: # -vv can be super-duper verbose
rpmbuild_args.append("-v")
rpmbuild_args.append(str(self.dist_git.absolute_specfile_path))
run_command(
rpmbuild_args,
cwd=self.dist_git.local_project.working_dir,
print_live=True,
)
def _put_downstream_sources(self):
"""
place sources from the downstream into the source-git repository
"""
if self.dist_git_branch:
self.dist_git.checkout_branch(self.dist_git_branch)
self.dist_git.download_upstream_archive()
root_downstream_dir = self.dist_git.get_root_downstream_dir_for_source_git(
self.local_project.working_dir
)
os.makedirs(root_downstream_dir, exist_ok=True)
shutil.copy2(self.dist_git.absolute_specfile_path, root_downstream_dir)
logger.info(
f"Copy all sources from {self.dist_git.absolute_source_dir} to {root_downstream_dir}."
)
# we may not want to copy the primary archive - it's worth a debate
for source in self.dist_git.specfile.get_sources():
source_dest = root_downstream_dir / Path(source).name
logger.debug(f"copying {source} to {source_dest}")
shutil.copy2(source, source_dest)
self.local_project.stage(self.dist_git.source_git_downstream_suffix)
self.local_project.commit(message="add downstream distribution sources")
def _add_packit_config(self):
packit_yaml_path = self.local_project.working_dir.joinpath(".packit.yaml")
packit_yaml_path.write_text(
"---\n"
f'specfile_path: "{self.specfile_path.relative_to(self.local_project.working_dir)}"\n'
f'upstream_ref: "{self.upstream_ref}"\n'
f'patch_generation_ignore_paths: ["{self.dist_git.source_git_downstream_suffix}"]\n\n'
)
self.local_project.stage(".packit.yaml")
self.local_project.commit("add packit.yaml")
def get_BUILD_dir(self):
path = self.dist_git.local_project.working_dir
build_dirs = [d for d in (path / "BUILD").iterdir() if d.is_dir()]
if len(build_dirs) > 1:
raise RuntimeError(f"More than one directory found in {path / 'BUILD'}")
if len(build_dirs) < 1:
raise RuntimeError(f"No subdirectory found in {path / 'BUILD'}")
return build_dirs[0]
def _rebase_patches(self, from_branch):
"""Rebase current branch against the from_branch """
to_branch = "dist-git-commits" # temporary branch to store the dist-git history
logger.info(f"Rebase patches from dist-git {from_branch}.")
BUILD_dir = self.get_BUILD_dir()
self.local_project.fetch(BUILD_dir, f"+{from_branch}:{to_branch}")
# shorter format for better readability in case of an error
commits_to_cherry_pick = [
| |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import base64
import json
import logging
import re
import sys
from datetime import datetime
from functools import wraps
from io import BytesIO
import boto3
import flask
from flask import request, g
from waitress import serve
from werkzeug.exceptions import HTTPException
from werkzeug.serving import WSGIRequestHandler
from werkzeug.urls import url_encode
import research_pacs.shared.dicom_json as rpacs_dicom_json
import research_pacs.shared.util as rpacs_util
from research_pacs.shared.database import DB, DBDicomJson, DBExportTasks
from research_pacs.shared.orthanc import OrthancClient
from research_pacs.website.env import get_env
from research_pacs.website.log import AccessLogger
from research_pacs.website.permission import PermissionsManager, PermissionError
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Don't display waitress logs
logger_waitress = logging.getLogger('waitress.queue')
logger_waitress.disabled = True
app = flask.Flask(__name__)
env = None
client = None
def main():
logger.info('Starting website')
try:
global env
env = get_env()
# Create the clients
global client
client = rpacs_util.ClientList()
client.add('access_logger', AccessLogger(env.access_log_file, env.log_excluded_prefixes, env.log_excluded_suffixes))
client.add('orthanc', OrthancClient(env.orthanc_host, env.orthanc_user, env.orthanc_pwd))
client.add('permissions', PermissionsManager(env.permissions_file, env.region))
# Exit if any of the previous steps failed
except Exception as e:
logger.fatal(f'Failed to initialize the program - {e}')
sys.exit(1)
# Enable HTTP/1.1 and run the Flask application with Waitress
WSGIRequestHandler.protocol_version = "HTTP/1.1"
serve(app, host='0.0.0.0', port=8080, threads=4, _quiet=True)
# Before the program exits
logger.info('Stopping website')
def login_required(f):
"""
Retrieve the user name and the user groups from the JWT tokens passed by the Application Load
Balancer via HTTP headers. The user name will be available in `g.user` and the groups in
`g.groups`. The request is aborted is no user name is found.
"""
@wraps(f)
def wrapper(*args, **kwargs):
g.user = None
g.groups = []
# Parse the ID Token and Access Token to find the user name and groups
for header in ('X-Amzn-Oidc-Data', 'X-Amzn-Oidc-Accesstoken'):
if header in request.headers:
encoded_payload = request.headers[header].split('.')[1]
# Add a padding that may be missing in the JWT value
encoded_payload_padding = encoded_payload + "====="
decoded_payload = base64.b64decode(encoded_payload_padding).decode('utf-8')
payload = json.loads(decoded_payload)
if env.claim_user in payload:
g.user = payload[env.claim_user]
if env.claim_groups in payload:
g.groups = payload[env.claim_groups]
# Execute the wrapper function unless no user name was found
assert g.user != None, 'No user name'
return f(*args, **kwargs)
return wrapper
@app.before_request
def before_request_func():
"""Log the request for debugging purposes"""
logger.debug(f'Received a {request.method} request to {request.path}')
@app.after_request
def after_request_func(response):
"""Log the HTTP request and response in the access log file"""
logger.debug(f'Sent the response - StatusCode={response.status_code}')
client.access_logger.log_http_request(response)
if g.get('db') != None:
g.db.close()
return response
@app.errorhandler(Exception)
def errorhandler_func(e):
# Exception raised when the user as no profiles associated
if isinstance(e, PermissionError):
return flask.render_template('error.html', error_message="You don't have profiles associated to your user or your groups. Please check with your administrator."), 401
# Flask exceptions
elif isinstance(e, HTTPException):
if e.code == 401:
return flask.render_template('error.html', error_message='You are not authorized to view this page, or this page does not exist.'), 401
else:
logger.warning(f'Failed to process the request - Path={request.path} ResponseCode={e.code}')
return flask.render_template('error.html', error_message=f'Something went wrong. Error code: {e.code}'), e.code
# Other exceptions
else:
logger.error(f'Failed to process the request - Path={request.path} Error={e}')
return flask.render_template('error.html', error_message='Something went wrong.'), 500
@app.route('/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@login_required
def proxy_orthanc_func(*args, **kwargs):
"""By default, forward the request to the Orthanc server"""
# Abort if the user is not authorized to view this page
if not client.permissions.is_orthanc_request_allowed(request.method, request.path):
flask.abort(401)
# Forward the request and its content to the Orthanc server
response = client.orthanc._request(
method=request.method,
full_path=request.full_path[1:],
raise_error=False,
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False
)
# Reconstruct the original host URL as requested by the user
initial_host_url = request.host_url[:-1]
if 'X-Forwarded-Proto' in request.headers:
initial_scheme = request.headers['X-Forwarded-Proto'].lower()
initial_host_url.replace(request.scheme, initial_scheme)
# Exclude the following headers from the Orthanc response, because they should be forwarded to
# the original user
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection', 'keep-alive']
headers = [
(name, value) if (name.lower() != 'location')
else (name, value.replace(client.orthanc._host, initial_host_url))
for (name, value) in response.raw.headers.items() if name.lower() not in excluded_headers
]
# Return the Orthanc response to the user
return flask.Response(response.content, response.status_code, headers)
@app.route('/healthcheck')
def healthcheck_page():
"""Respond to the heartbeat request from the Application Load Balancer"""
return flask.render_template('healthcheck.html')
@app.route('/', methods=['GET'])
@login_required
def home_page():
"""Home page"""
orthanc_access = client.permissions.has_access_to_orthanc()
return flask.render_template('home.html', user_guide_url=env.user_guide_url, orthanc_access=orthanc_access)
@app.route('/aws/logout')
def logout_page():
"""Logout page. Expire the ALB Authentication cookies and redirect to the Cognito logout page"""
response = flask.make_response(flask.redirect(env.sign_out_url))
response.set_cookie('AWSELBAuthSessionCookie-0', '', expires=0)
response.set_cookie('AWSELBAuthSessionCookie-1', '', expires=0)
response.set_cookie('AWSELBAuthSessionCookie-2', '', expires=0)
response.set_cookie('AWSELBAuthSessionCookie-3', '', expires=0)
return response
@app.route('/aws/me')
@login_required
def me_page():
"""Page My Permissons"""
profiles = client.permissions.get_profiles_description()
return flask.render_template('me.html', profiles=profiles)
def can_access_instance_or_series(f):
"""Check if the current user is allowed to access the DICOM instance or series"""
@wraps(f)
def wrapper(*args, **kwargs):
# Generate a JSONPath query that corresponds to the current user's instance access
# permissions. If `jsonpath_query` is not empty, the current user does not have full
# access to all DICOM instances, so we check if the user is authorized to access this
# instance
jsonpath_query = client.permissions.get_jsonpath_query('')
if jsonpath_query == '':
return f(*args, **kwargs)
else:
g.db = DB(env.pg_host, env.pg_port, env.pg_user, env.pg_pwd, env.pg_db)
db_dicom_json = DBDicomJson(g.db)
if 'instance_id' in kwargs and db_dicom_json.has_access_to_instance(jsonpath_query, kwargs['instance_id']):
return f(*args, **kwargs)
if 'series_id' in kwargs and db_dicom_json.has_access_to_series(jsonpath_query, kwargs['series_id']):
return f(*args, **kwargs)
# Abort if the current user is not authorized to access this instance or series
flask.abort(401)
return wrapper
@app.route('/aws/instances/<instance_id>/preview')
@login_required
@can_access_instance_or_series
def preview_instance_page(instance_id):
"""Page Preview a DICOM Instance"""
nb_frames = client.orthanc.count_instance_frames(instance_id)
return flask.render_template('preview.html', instance_id=instance_id, nb_frames_to_show=min(10,nb_frames), nb_frames_total=nb_frames)
@app.route('/aws/instances/<instance_id>/frames/<int:frame>/preview')
@login_required
@can_access_instance_or_series
def preview_instance_func(instance_id, frame):
"""Preview the frame of a DICOM Instance"""
img_bytes = client.orthanc.download_instance_frame(instance_id, frame=frame)
return flask.send_file(BytesIO(img_bytes), mimetype='image/png')
@app.route('/aws/instances/<instance_id>/download')
@login_required
@can_access_instance_or_series
def download_instance_func(instance_id):
"""Download a DICOM Instance as DCM"""
file_bytes = client.orthanc.download_instance_dicom(instance_id)
return flask.send_file(BytesIO(file_bytes), mimetype='application/dicom', as_attachment=True, attachment_filename=f'{instance_id}.dcm')
@app.route('/aws/series/<series_id>/download')
@login_required
@can_access_instance_or_series
def download_series_func(series_id):
"""Download a DICOM Series as ZIP"""
file_bytes = client.orthanc.download_series_zip(series_id)
return flask.send_file(BytesIO(file_bytes), mimetype='application/zip', as_attachment=True, attachment_filename=f'{series_id}.zip')
@app.route('/aws/search')
@login_required
def search_page():
"""Page Search DICOM Instances"""
def get_unique_series_ids(instances):
"""Retrieve the list of series ID related to the instances"""
result = []
for instance in instances:
series_id = instance[1]
if not series_id in result:
result.append(series_id)
return result
# Retrieve the parameters from the query string
query = request.args.get('query', default='')
display_action = 'display' in request.args
export_action = 'export' in request.args
offset = int(request.args.get('offset', default=0))
# Display the page with no results if the form was not submitted
if display_action is False and export_action is False:
return flask.render_template('search.html')
# Translate the query and the user's instance access permissions into a JSON Path query
try:
jsonpath_query = client.permissions.get_jsonpath_query(query)
logger.debug(f'Search - JSON Path Query: {jsonpath_query}')
except ValueError:
return flask.render_template('search.html', error_message='Your query is invalid.')
g.db = DB(env.pg_host, env.pg_port, env.pg_user, env.pg_pwd, env.pg_db)
db_dicom_json = DBDicomJson(g.db)
# If the "Display" button was pressed
if display_action is True:
def generate_header(header_keywords, dicom_json):
"""Generate the accodion headers"""
lines = []
for keyword in header_keywords.split(','):
value = rpacs_dicom_json.get_top_level_elem_value_from_dict(dicom_json, keyword)
lines.append(f'{keyword}: <strong>{value}</strong>')
return '<br>'.join(lines)
def rewrite_full_path_new_offset(new_offset):
args = request.args.copy()
args['offset'] = new_offset
return f'{request.path}?{url_encode(args)}'
# Retrieve the number of instances and series that match the query, and associated details
# for up to `env.results_per_page` from the offset `offset`
try:
total_instances, total_series = db_dicom_json.count_instances(jsonpath_query)
instances_in_page = db_dicom_json.search_instances_with_series(jsonpath_query, limit=env.results_per_page, offset=offset)
series_ids_in_page = get_unique_series_ids(instances_in_page)
except:
logger.warning(f'Page {request.path} - Query: {query}')
return flask.render_template('search.html', error_message='Failed to query the database. Please check your query and retry.')
# Prepare a dict `results` that is used by the Jinja template to display the instances and
# series for the current page
results = []
for series_id in series_ids_in_page:
series = {
'SeriesId': series_id,
'Instances': []
}
for instance in instances_in_page:
instance_id = instance[0]
instance_series_id = instance[1]
index_in_series = instance[2]
instance_json = instance[3]
if instance_series_id == series_id:
instance_json_keywords = rpacs_dicom_json.add_keywords_to_dicom_json(instance_json)
series['Instances'].append({
'InstanceId': instance_id,
'IndexInSeries': index_in_series,
'InstanceHeader': generate_header(env.instance_header_keywords, instance_json),
'InstanceJSON': json.dumps(instance_json_keywords, indent=4, sort_keys=True)
})
if not 'SeriesHeader' in series:
series['SeriesHeader'] = generate_header(env.series_header_keywords, instance_json)
series['Instances'] = sorted(series['Instances'], key=lambda k: k['IndexInSeries'])
results.append(series)
# Calculate the pagination information
pagination = {
'TotalInstances': total_instances,
'TotalSeries': total_series,
}
if offset > 0:
pagination['PreviousEnabled'] = True
left_new_offset = max(0, offset - env.results_per_page)
pagination['PreviousLink'] = rewrite_full_path_new_offset(left_new_offset)
pagination['PreviousRange'] = f'{left_new_offset+1} - {left_new_offset+env.results_per_page}'
if offset + env.results_per_page < total_instances:
pagination['NextEnabled'] = True
right_new_offset = offset + env.results_per_page
pagination['NextLink'] = rewrite_full_path_new_offset(right_new_offset)
pagination['NextRange'] = f'{right_new_offset+1} - {min(right_new_offset+env.results_per_page, total_instances)}'
orthanc_access = client.permissions.has_access_to_orthanc()
response = flask.render_template('search.html', pagination=pagination, results=results, orthanc_access=orthanc_access)
client.access_logger.log_search("Display", query, jsonpath_query, total_instances, total_series)
# If the "Export" button was pressed, return a formatted JSON document for each of | |
i
f
i
e
r
.
Type `int`. """
self.servicedDate = None
"""
D
a
t
e
o
r
d
a
t
e
s
o
f
s
e
r
v
i
c
e
o
r
p
r
o
d
u
c
t
d
e
l
i
v
e
r
y
.
Type `FHIRDate` (represented as `str` in JSON). """
self.servicedPeriod = None
"""
D
a
t
e
o
r
d
a
t
e
s
o
f
s
e
r
v
i
c
e
o
r
p
r
o
d
u
c
t
d
e
l
i
v
e
r
y
.
Type `Period` (represented as `dict` in JSON). """
self.subSite = None
"""
A
n
a
t
o
m
i
c
a
l
s
u
b
-
l
o
c
a
t
i
o
n
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.udi = None
"""
U
n
i
q
u
e
d
e
v
i
c
e
i
d
e
n
t
i
f
i
e
r
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.unitPrice = None
"""
F
e
e
,
c
h
a
r
g
e
o
r
c
o
s
t
p
e
r
i
t
e
m
.
Type `Money` (represented as `dict` in JSON). """
super(ExplanationOfBenefitItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitItem, self).elementProperties()
js.extend([
("adjudication", "adjudication", ExplanationOfBenefitItemAdjudication, True, None, False),
("bodySite", "bodySite", codeableconcept.CodeableConcept, False, None, False),
("careTeamSequence", "careTeamSequence", int, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("detail", "detail", ExplanationOfBenefitItemDetail, True, None, False),
("diagnosisSequence", "diagnosisSequence", int, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, True, None, False),
("factor", "factor", float, False, None, False),
("informationSequence", "informationSequence", int, True, None, False),
("locationAddress", "locationAddress", address.Address, False, "location", False),
("locationCodeableConcept", "locationCodeableConcept", codeableconcept.CodeableConcept, False, "location", False),
("locationReference", "locationReference", fhirreference.FHIRReference, False, "location", False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("net", "net", money.Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("procedureSequence", "procedureSequence", int, True, None, False),
("productOrService", "productOrService", codeableconcept.CodeableConcept, False, None, True),
("programCode", "programCode", codeableconcept.CodeableConcept, True, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("revenue", "revenue", codeableconcept.CodeableConcept, False, None, False),
("sequence", "sequence", int, False, None, True),
("servicedDate", "servicedDate", fhirdate.FHIRDate, False, "serviced", False),
("servicedPeriod", "servicedPeriod", period.Period, False, "serviced", False),
("subSite", "subSite", codeableconcept.CodeableConcept, True, None, False),
("udi", "udi", fhirreference.FHIRReference, True, None, False),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class ExplanationOfBenefitItemAdjudication(backboneelement.BackboneElement):
"""
A
d
j
u
d
i
c
a
t
i
o
n
d
e
t
a
i
l
s
.
I
f
t
h
i
s
i
t
e
m
i
s
a
g
r
o
u
p
t
h
e
n
t
h
e
v
a
l
u
e
s
h
e
r
e
a
r
e
a
s
u
m
m
a
r
y
o
f
t
h
e
a
d
j
u
d
i
c
a
t
i
o
n
o
f
t
h
e
d
e
t
a
i
l
i
t
e
m
s
.
I
f
t
h
i
s
i
t
e
m
i
s
a
s
i
m
p
l
e
p
r
o
d
u
c
t
o
r
s
e
r
v
i
c
e
t
h
e
n
t
h
i
s
i
s
t
h
e
r
e
s
u
l
t
o
f
t
h
e
a
d
j
u
d
i
c
a
t
i
o
n
o
f
t
h
i
s
i
t
e
m
.
"""
resource_type = "ExplanationOfBenefitItemAdjudication"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
"""
M
o
n
e
t
a
r
y
a
m
o
u
n
t
.
Type `Money` (represented as `dict` in JSON). """
self.category = None
"""
T
y
p
e
o
f
a
d
j
u
d
i
c
a
t
i
o
n
i
n
f
o
r
m
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.reason = None
"""
E
x
p
l
a
n
a
t
i
o
n
o
f
a
d
j
u
d
i
c
a
t
i
o
n
o
u
t
c
o
m
e
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
"""
N
o
n
-
m
o
n
i
t
a
r
y
v
a
l
u
e
.
Type `float`. """
super(ExplanationOfBenefitItemAdjudication, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitItemAdjudication, self).elementProperties()
js.extend([
("amount", "amount", money.Money, False, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, True),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
("value", "value", float, False, None, False),
])
return js
class ExplanationOfBenefitItemDetail(backboneelement.BackboneElement):
"""
A
d
d
i
t
i
o
n
a
l
i
t
e
m
s
.
S
e
c
o
n
d
-
t
i
e
r
o
f
g
o
o
d
s
a
n
d
s
e
r
v
i
c
e
s
.
"""
resource_type = "ExplanationOfBenefitItemDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.adjudication = None
"""
D
e
t
a
i
l
l
e
v
e
l
a
d
j
u
d
i
c
a
t
i
o
n
d
e
t
a
i
l
s
.
List of `ExplanationOfBenefitItemAdjudication` items (represented as `dict` in JSON). """
self.category = None
"""
B
e
n
e
f
i
t
c
l
a
s
s
i
f
i
c
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.factor = None
"""
P
r
i
c
e
s
c
a
l
i
n
g
f
a
c
t
o
r
.
Type `float`. """
self.modifier = None
"""
S
e
r
v
i
c
e
/
P
r
o
d
u
c
t
b
i
l
l
i
n
g
m
o
d
i
f
i
e
r
s
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.net = None
"""
T
o
t
a
l
i
t
e
m
c
o
s
t
.
Type `Money` (represented as `dict` in JSON). """
self.noteNumber = None
"""
A
p
p
l
i
c
a
b
l
e
n
o
t
e
n
u
m
b
e
r
s
.
List of `int` items. """
self.productOrService = None
"""
B
i
l
l
i
n
g
,
s
e
r
v
i
c
e
,
p
r
o
d
u
c
t
,
o
r
d
r
u
g
c
o
d
e
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.programCode = None
"""
P
r
o
g
r
a
m
t
h
e
p
r
o
d
u
c
t
o
r
s
e
r
v
i
c
e
i
s
p
r
o
v
i
d
e
d
u
n
d
e
r
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.quantity = None
"""
C
o
u
n
t
o
f
p
r
o
d
u
c
t
s
o
r
s
e
r
v
i
c
| |
#%%
############################################################################
# IMPORTS
############################################################################
import pandas as pd
import numpy as np
from utils import model_zoo, data_transformer
import argparse
import pickle
import os
#%%
############################################################################
# CONSTANTS & PARAMETERS
############################################################################
# Default file Locations and model name (parameters)
MODEL_NAME = "KERAS_LENET5"
PICKLE_PATH = "C:/kaggle/kaggle_keypoints/pickle"
MODEL_PATH = "C:/kaggle/kaggle_keypoints/models"
# Processing behavior (parameters)
NORMALIZE_LABELS = False
VERBOSE = True
USE30 = True
# Processing behavior (constants)
AVAILABLE_MODELS = ["KERAS_LENET5", "KERAS_INCEPTION", "KERAS_KAGGLE1", "KERAS_NAIMISHNET", "KERAS_CONVNET5", "KERAS_INCEPTIONV3", "KERAS_KAGGLE2", "KERAS_RESNET50", "KERAS_RESNET", "KERAS_RESNEXT50", "KERAS_RESNEXT101"]
TEST_DATA_FILE = "cleandata_naive_test.pkl"
TEST_IDS_FILE = "raw_id_lookup.pkl"
OVERLAP_FILE = "cleandata_naive_overlap.pkl"
TEST8_DATA_FILE = "cleandata_test8.pkl"
TEST30_DATA_FILE = "cleandata_test30.pkl"
#%%
############################################################################
# ARGUMENT SPECIFICATION
############################################################################
parser = argparse.ArgumentParser(description = "Performs predictions for the Kaggle Facial Keypoints Detection challenge.")
# Commandline arguments
parser.add_argument('-nv', '--no_verbose', action = 'store_true', help = 'Disables verbose output mode for more detailed descriptions of process.')
parser.add_argument('-pp', '--pickle_path', type = str, default = "C:/kaggle/kaggle_keypoints/pickle", help = "Path to location of output pickle files (post processing files).")
parser.add_argument('-mp', '--model_path', type = str, default = "C:/kaggle/kaggle_keypoints/models", help = "Path to location of output model files.")
parser.add_argument('-m', '--model_name', type = str, default = "KERAS_LENET5", help = "Name of the model to train.")
parser.add_argument('-pa', '--partial', action = 'store_true', help = 'Trains only using the 8-value dataset (vs. the full 30-value dataset)')
parser.add_argument('-nl', '--normalize_labels', action = 'store_true', help = "Enables the normalization of prediction label values prior to training.")
############################################################################
# ARGUMENT PARSING
############################################################################
def process_arguments(parsed_args, display_args = False):
global VERBOSE, PICKLE_PATH, MODEL_PATH, MODEL_NAME, NORMALIZE_LABELS, USE30
args = vars(parser.parse_args())
if display_args:
print("".join(["\PREDICT Arguments in use:\n", "-" * 30, "\n"]))
for arg in args:
print("Parameter '%s' == %s" % (arg, str(getattr(parser.parse_args(), arg))))
print("\n")
# Assign arguments to globals
VERBOSE = not args['no_verbose']
USE30 = not args['partial']
MODEL_NAME = args['model_name']
NORMALIZE_LABELS = args['normalize_labels']
MODEL_PATH = str(args['model_path']).lower().strip().replace('\\', '/')
PICKLE_PATH = str(args['pickle_path']).lower().strip().replace('\\', '/')
# validate the presence of the paths
for p, v, l in zip([MODEL_PATH, PICKLE_PATH], ['model_path', 'pickle_path'], ['Model file path', 'Pickle file path']):
if not os.path.exists(p):
raise RuntimeError(" ".join([l, "'%s'" % p, "specified in parameter `%s` does not exist." % v]))
# validate the parameters entered
if not MODEL_NAME in AVAILABLE_MODELS:
raise RuntimeError("Parameter `model_name` value of '%s' is invalid. Must be in list: %s" % (MODEL_NAME, str(AVAILABLE_MODELS)))
#%%
############################################################################
# LOAD DATA
############################################################################
# load the data for training
def load_data(pickle_path, test_file, id_file, overlap_file, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN LOAD DATA <<<\n", "-" * 50, "\n"]))
if not pickle_path.endswith("/"): pickle_path = "".join([pickle_path, "/"])
test_file = "".join([pickle_path, test_file])
id_file = "".join([pickle_path, id_file])
overlap_file = "".join([pickle_path, overlap_file])
for f, l in zip([test_file, id_file, overlap_file], ['Test', 'Test IDs', 'Overlap']):
if not os.path.isfile(f):
raise RuntimeError("%s file '%s' not found - training cancelled." % (l, f))
test = pickle.load(open(test_file, "rb"))
if verbose: print("Test file '%s' loaded; shape: %s" % (test_file, str(test.shape)))
ids = pickle.load(open(id_file, "rb"))
if verbose: print("Test IDs file '%s' loaded; shape: %s" % (id_file, str(ids.shape)))
overlap = pickle.load(open(overlap_file, "rb"))
if verbose: print("Overlap file '%s' loaded; shape: %s" % (overlap_file, str(overlap.shape)))
if verbose: print("".join(["\n", "-" * 50, "\n>>> END LOAD DATA <<<\n", "-" * 50, "\n"]))
return test, ids, overlap
# %%
############################################################################
# PREDICT MODEL (GENERIC HANDLER)
############################################################################
def predict_model(model_path, pickle_path, model_name, normalize_labels, test, ids, overlap, predict_file, skip_output = False, skip_overlap = False, full = True, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
# load helper modules for models and data transformation
models = model_zoo.Models(model_path = MODEL_PATH)
xform = data_transformer.Xform(pickle_path = PICKLE_PATH, verbose = VERBOSE)
# validate the existence of the model output path; if it doesn't exist, create it
if model_path.endswith("/"): sep_add = ""
else: sep_add = "/"
validate_path = "".join([model_path, sep_add, model_name])
if not os.path.exists(validate_path):
if verbose: print("Model output path '%s' does not yet exist, creating it." % validate_path)
os.makedirs(validate_path)
# call the training module specific to the algorithm called
if model_name == "KERAS_LENET5":
feature_name = "ALL_FEATURES"
pred = predict_model_lenet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTIONV3":
feature_name = "ALL_FEATURES"
pred, _ = predict_model_inceptionv3(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT101":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext101(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTION":
feature_name = "ALL_FEATURES"
Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols = predict_model_inception(models = models,
xform = xform, test = test, ids = ids, feature_name = feature_name, full = full, verbose = verbose)
pred = [Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols]
elif model_name == "KERAS_KAGGLE1":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle1(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_KAGGLE2":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle2(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_CONVNET5":
feature_name = "ALL_FEATURES"
pred = predict_model_convnet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_NAIMISHNET":
if full:
feature_name = ['left_eye_center', 'right_eye_center', 'left_eye_inner_corner', 'left_eye_outer_corner',
'right_eye_inner_corner', 'right_eye_outer_corner', 'left_eyebrow_inner_end', 'left_eyebrow_outer_end',
'right_eyebrow_inner_end', 'right_eyebrow_outer_end', 'nose_tip', 'mouth_left_corner', 'mouth_right_corner',
'mouth_center_top_lip', 'mouth_center_bottom_lip']
else:
feature_name = ['left_eye_center', 'right_eye_center', 'nose_tip', 'mouth_center_bottom_lip']
pred = predict_model_naimishnet(models = models, xform = xform, test = test, ids = ids, feature_name = feature_name,
normalize_labels = normalize_labels, full = full, verbose = verbose)
else:
raise RuntimeError("Model name '%s' not understood; cancelling training." % model_name)
if not skip_output:
# this branch for normal output against TEST
output_prediction(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
else:
# this branch for output of STACK cross validation
output_stack(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
if verbose: print("".join(["-" * 50, "\n>>> END PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
return pred
# %%
############################################################################
# PREDICT MODEL NAIMISHNET
############################################################################
def predict_model_naimishnet(models, xform, test, ids, feature_name, normalize_labels, full = True, verbose = True):
# create empty DF for capturing inferenced values (unpivoted x,y coordinates to columns)
submission = pd.DataFrame({'image_id':int(), 'variable':'', 'value':float()},index=[1])
submission = submission[(submission.index == -1)]
df = {}
for keypoint in feature_name:
X, subset = xform.PrepareTest(test, ids, keypoint, verbose = verbose)
subset = subset[['image_id']]
Y = models.predict_keras_naimishnet(X = X, feature_name = keypoint, full = full, verbose = verbose)
# un-normalize the predictions
mod_subset = subset.copy()
for i, lbl in zip(range(Y.shape[1]), ['_x', '_y']):
if normalize_labels:
Y[:,i] = xform.UnNormalize_Labels(Y[:,i])
# ensure pixel boundaries are clipped between 0.0 and 96.0
Y[:,i] = np.clip(Y[:,i], 0.0, 96.0)
col = "".join([keypoint, lbl])
mod_subset[col] = Y[:,i]
submission = submission.append(pd.melt(mod_subset, id_vars = ['image_id']), ignore_index = True)
submission.columns = ['image_id','feature_name','location']
return submission
#%%
############################################################################
# PREDICT MODEL LENET5
############################################################################
def predict_model_lenet5(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_lenet5(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL KAGGLE1
############################################################################
def predict_model_kaggle1(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, | |
<reponame>opensafely-core/stata-docker
'''
*! version 1.0.0 06may2019
'''
__all__ = [
"Characteristic", "Data", "Datetime", "Frame", "FrameError",
"Macro", "Mata", "Matrix", "Missing", "Platform", "Preference",
"Scalar", "SFIError", "SFIToolkit", "StrLConnector", "ValueLabel"]
__version__ = '0.1.0'
__author__ = 'StataCorp LLC'
from math import ceil, log, floor
from platform import platform
import sys
import datetime
import stata_plugin as _stp
class Characteristic:
"""
This class provides access to Stata characteristics.
"""
def __init__(self):
pass
@staticmethod
def getDtaChar(name):
"""
Get a characteristic for the current dataset.
Parameters
----------
name : str
The name of the characteristic to retrieve.
Returns
-------
str
Value of the characteristic. Returns an empty string if the
characteristic is not found.
"""
return _stp._st_getdtachar(name)
@staticmethod
def getVariableChar(var, name):
"""
Get a characteristic for a variable in the current dataset.
Parameters
----------
var : str or int
Name or index of the variable.
name : str
Name of the characteristic.
Returns
-------
str
Value of the characteristic. Returns an empty string if the
characteristic is not found.
Raises
------
ValueError
If `var` is not found or is out of :ref:`range <ref-datarange>`.
"""
oindex = _get_var_index_single(var)
var = _stp._st_getvarname(oindex)
return _stp._st_getvariablechar(var, name)
@staticmethod
def setDtaChar(name, value):
"""
Set a characteristic for the current dataset.
Parameters
----------
name : str
Name of the characteristic.
value : str
Value to set.
"""
return _stp._st_setdtachar(name, value)
@staticmethod
def setVariableChar(var, name, value):
"""
Set a characteristic for a variable in the current dataset.
Parameters
----------
var : str or int
Name or index of the variable.
name : str
Name of the characteristic.
value : str
Value to set.
Raises
------
ValueError
If `var` is not found or is out of :ref:`range <ref-datarange>`.
"""
oindex = _get_var_index_single(var)
var = _stp._st_getvarname(oindex)
return _stp._st_setvariablechar(var, name, value)
def _check_all(iterable):
for element in iterable:
if not element:
return False
return True
def _check_any(iterable):
for element in iterable:
if element:
return True
return False
def _check_var(var, nvars=None):
if nvars is None:
nvars = _stp._st_getvarcount()
if var<-nvars or var>=nvars:
return True
else:
return False
def _check_obs(obs, nobs=None):
if nobs is None:
nobs = _stp._st_getobstotal()
if obs<-nobs or obs>=nobs:
return True
else:
return False
def _get_var_index_all(ovar):
if isinstance(ovar, int):
if _check_var(ovar):
raise ValueError("%d: var out of range" % (ovar))
return [ovar]
elif isinstance(ovar, str):
oret = []
ovar = ovar.split()
for o in ovar:
ovari = _stp._st_getvarindex(o)
if ovari<0:
raise ValueError("variable %s not found" % (o))
oret.append(ovari)
return oret
elif isinstance(ovar, list):
if _check_all(isinstance(o, int) for o in ovar):
oret = []
nvars = _stp._st_getvarcount()
for o in ovar:
if _check_var(o, nvars):
raise ValueError("%d: var out of range" % (o))
oret.append(o)
return oret
elif _check_all(isinstance(o, str) for o in ovar):
oret = []
for o in ovar:
ovari = _stp._st_getvarindex(o)
if ovari<0:
raise ValueError("variable %s not found" % (o))
oret.append(ovari)
return oret
else:
raise TypeError("all values for variable input must be a string or an integer")
elif isinstance(ovar, tuple):
if _check_all(isinstance(o, int) for o in ovar):
oret = []
nvars = _stp._st_getvarcount()
for o in ovar:
if _check_var(o, nvars):
raise ValueError("%d: var out of range" % (o))
oret.append(o)
return oret
elif _check_all(isinstance(o, str) for o in ovar):
oret = []
for o in ovar:
ovari = _stp._st_getvarindex(o)
if ovari<0:
raise ValueError("variable %s not found" % (o))
oret.append(ovari)
return oret
else:
raise TypeError("all values for variable input must be a string or an integer")
elif hasattr(ovar, "__iter__"):
ovar = tuple(ovar)
if _check_all(isinstance(o, int) for o in ovar):
oret = []
nvars = _stp._st_getvarcount()
for o in ovar:
if _check_var(o, nvars):
raise ValueError("%d: var out of range" % (o))
oret.append(o)
return oret
elif _check_all(isinstance(o, str) for o in ovar):
oret = []
for o in ovar:
ovari = _stp._st_getvarindex(o)
if ovari<0:
raise ValueError("variable %s not found" % (o))
oret.append(ovari)
return oret
else:
raise TypeError("all values for variable input must be a string or an integer")
else:
raise TypeError("unsupported operand type(s) for variable input")
def _get_var_index_single(ovar):
if isinstance(ovar, int):
if _check_var(ovar):
raise ValueError("%d: var out of range" % (ovar))
return ovar
elif isinstance(ovar, str):
ovari = _stp._st_getvarindex(ovar)
if ovari<0:
raise ValueError("variable %s not found" % (ovar))
return ovari
else:
raise TypeError("unsupported operand type(s) for variable input")
def _get_var_index(ovar):
if isinstance(ovar, int):
if _check_var(ovar):
raise ValueError("%d: var out of range" % (ovar))
return ovar
else:
raise TypeError("unsupported operand type(s) for variable input")
def _get_var_name(ovar):
if isinstance(ovar, str):
ovari = _stp._st_getvarindex(ovar)
if ovari<0:
raise ValueError("variable %s not found" % (ovar))
return ovar
else:
raise TypeError("unsupported operand type(s) for variable input")
def _get_obs_index(oobs):
if isinstance(oobs, int):
if _check_obs(oobs):
raise ValueError("%d: obs out of range" % (oobs))
return [oobs]
elif isinstance(oobs, list):
if _check_all(isinstance(o, int) for o in oobs):
nobs = _stp._st_getobstotal()
if _check_all(o>-nobs and o<nobs for o in oobs):
return oobs
raise ValueError("obs out of range")
else:
raise TypeError("all values for observation index must be an integer")
elif isinstance(oobs, tuple):
if _check_all(isinstance(o, int) for o in oobs):
nobs = _stp._st_getobstotal()
if _check_all(o>-nobs and o<nobs for o in oobs):
return list(oobs)
raise ValueError("obs out of range")
else:
raise TypeError("all values for observation index must be an integer")
elif hasattr(oobs, "__iter__"):
oobs = tuple(oobs)
if _check_all(isinstance(o, int) for o in oobs):
nobs = _stp._st_getobstotal()
if _check_all(o>-nobs and o<nobs for o in oobs):
return list(oobs)
raise ValueError("obs out of range")
else:
raise TypeError("all values for observation index must be an integer")
else:
raise TypeError("unsupported operand type(s) for variable input")
def _get_obs_max(oobs):
maxo = max(oobs)
mino = min(oobs)
nobs = _stp._st_getobstotal()
if maxo < 0:
maxo = maxo + nobs
if mino < 0:
mino = mino + nobs
if maxo >= mino:
return maxo
else:
return mino
class _DefaultMissing:
def __repr__(self):
return "_DefaultMissing()"
class Data:
"""
This class provides access to the current Stata dataset. All variable
and observation numbering begins at 0. The allowed values for the
variable index `var` and the observation index `obs` are
.. _ref-datarange:
.. centered:: **-nvar** `<=` `var` `<` **nvar**
and
.. centered:: **-nobs** `<=` `obs` `<` **nobs**
Here **nvar** is the number of variables defined in the dataset
currently loaded in Stata, which is returned by :meth:`getVarCount()`.
**nobs** is the number of observations defined in the dataset
currently loaded in Stata, which is returned by :meth:`getObsTotal()`.
Negative values for `var` and `obs` are allowed and are interpreted
in the usual way for Python indexing. In all functions that
take `var` as an argument, `var` can be specified as either the
variable index or the variable name. Note that passing the
variable index will be more efficient because looking up the index
for the specified variable name is avoided for each function call.
"""
def __init__(self):
pass
@staticmethod
def addObs(n, nofill=False):
"""
Add `n` observations to the current Stata dataset. By default,
the added observations are filled with the appropriate
missing-value code. If `nofill` is specified and equal to True,
the added observations are not filled, which speeds up the process.
Setting `nofill` to True is not recommended. If you choose this setting,
it is your responsibility to ensure that the added observations are
ultimately filled in or removed before control is returned to Stata.
There need not be any variables defined to add observations.
If you are attempting to create a dataset from nothing, you can
add the observations first and then add the variables.
Parameters
----------
n : int
Number of observations to add.
nofill : bool, optional
Do not fill the added observations. Default is False.
Raises
------
ValueError
If the number of observations to add, `n`, exceeds the limit of observations.
"""
if nofill is True:
bnofill = 1
elif nofill is False:
bnofill = 0
else:
raise TypeError("nofill must be a boolean value")
return _stp._st_addobs(n, bnofill)
@staticmethod
def addVarByte(name):
"""
Add a variable of type **byte** to the current Stata dataset.
Parameters
----------
name : str
Name of | |
<filename>build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/Extensions/ipy_completers.py<gh_stars>10-100
""" Implementations for various useful completers
See Extensions/ipy_stock_completers.py on examples of how to enable a completer,
but the basic idea is to do:
ip.set_hook('complete_command', svn_completer, str_key = 'svn')
"""
import IPython.ipapi
import glob,os,shlex,sys
import inspect
from time import time
from zipimport import zipimporter
ip = IPython.ipapi.get()
try:
set
except:
from sets import Set as set
TIMEOUT_STORAGE = 3 #Time in seconds after which the rootmodules will be stored
TIMEOUT_GIVEUP = 20 #Time in seconds after which we give up
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string
(that will be split on whitespace)
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, basestring):
completions = completions.split()
def do_complete(self,event):
return completions
ip.set_hook('complete_command',do_complete, str_key = cmd)
def getRootModules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
"""
modules = []
if ip.db.has_key('rootmodules'):
return ip.db['rootmodules']
t = time()
store = False
for path in sys.path:
modules += moduleList(path)
if time() - t >= TIMEOUT_STORAGE and not store:
store = True
print "\nCaching the list of root modules, please wait!"
print "(This will only be done once - type '%rehashx' to " + \
"reset cache!)"
print
if time() - t > TIMEOUT_GIVEUP:
print "This is taking too long, we give up."
print
ip.db['rootmodules'] = []
return []
modules += sys.builtin_module_names
modules = list(set(modules))
if '__init__' in modules:
modules.remove('__init__')
modules = list(set(modules))
if store:
ip.db['rootmodules'] = modules
return modules
def moduleList(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
if os.path.isdir(path):
folder_list = os.listdir(path)
elif path.endswith('.egg'):
try:
folder_list = [f for f in zipimporter(path)._files]
except:
folder_list = []
else:
folder_list = []
#folder_list = glob.glob(os.path.join(path,'*'))
folder_list = [p for p in folder_list \
if os.path.exists(os.path.join(path, p,'__init__.py'))\
or p[-3:] in ('.py','.so')\
or p[-4:] in ('.pyc','.pyo','.pyd')]
folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]
return folder_list
def moduleCompletion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
def tryImport(mod, only_modules=False):
def isImportable(module, attr):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m,module)
if (not hasattr(m, '__file__')) or (not only_modules) or\
(hasattr(m, '__file__') and '__init__' in m.__file__):
completion_list = [attr for attr in dir(m) if isImportable(m, attr)]
completion_list.extend(getattr(m,'__all__',[]))
if hasattr(m, '__file__') and '__init__' in m.__file__:
completion_list.extend(moduleList(os.path.dirname(m.__file__)))
completion_list = list(set(completion_list))
if '__init__' in completion_list:
completion_list.remove('__init__')
return completion_list
words = line.split(' ')
if len(words) == 3 and words[0] == 'from':
return ['import ']
if len(words) < 3 and (words[0] in ['import','from']) :
if len(words) == 1:
return getRootModules()
mod = words[1].split('.')
if len(mod) < 2:
return getRootModules()
completion_list = tryImport('.'.join(mod[:-1]), True)
completion_list = ['.'.join(mod[:-1] + [el]) for el in completion_list]
return completion_list
if len(words) >= 3 and words[0] == 'from':
mod = words[1]
return tryImport(mod)
def vcs_completer(commands, event):
""" utility to make writing typical version control app completers easier
VCS command line apps typically have the format:
[sudo ]PROGNAME [help] [command] file file...
"""
cmd_param = event.line.split()
if event.line.endswith(' '):
cmd_param.append('')
if cmd_param[0] == 'sudo':
cmd_param = cmd_param[1:]
if len(cmd_param) == 2 or 'help' in cmd_param:
return commands.split()
return ip.IP.Completer.file_matches(event.symbol)
pkg_cache = None
def module_completer(self,event):
""" Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return moduleCompletion(event.line)
svn_commands = """\
add blame praise annotate ann cat checkout co cleanup commit ci copy
cp delete del remove rm diff di export help ? h import info list ls
lock log merge mkdir move mv rename ren propdel pdel pd propedit pedit
pe propget pget pg proplist plist pl propset pset ps resolved revert
status stat st switch sw unlock update
"""
def svn_completer(self,event):
return vcs_completer(svn_commands, event)
hg_commands = """
add addremove annotate archive backout branch branches bundle cat
clone commit copy diff export grep heads help identify import incoming
init locate log manifest merge outgoing parents paths pull push
qapplied qclone qcommit qdelete qdiff qfold qguard qheader qimport
qinit qnew qnext qpop qprev qpush qrefresh qrename qrestore qsave
qselect qseries qtop qunapplied recover remove rename revert rollback
root serve showconfig status strip tag tags tip unbundle update verify
version
"""
def hg_completer(self,event):
""" Completer for mercurial commands """
return vcs_completer(hg_commands, event)
__bzr_commands = None
def bzr_commands():
global __bzr_commands
if __bzr_commands is not None:
return __bzr_commands
out = os.popen('bzr help commands')
__bzr_commands = [l.split()[0] for l in out]
return __bzr_commands
def bzr_completer(self,event):
""" Completer for bazaar commands """
cmd_param = event.line.split()
if event.line.endswith(' '):
cmd_param.append('')
if len(cmd_param) > 2:
cmd = cmd_param[1]
param = cmd_param[-1]
output_file = (param == '--output=')
if cmd == 'help':
return bzr_commands()
elif cmd in ['bundle-revisions','conflicts',
'deleted','nick','register-branch',
'serve','unbind','upgrade','version',
'whoami'] and not output_file:
return []
else:
# the rest are probably file names
return ip.IP.Completer.file_matches(event.symbol)
return bzr_commands()
def shlex_split(x):
"""Helper function to split lines into segments."""
#shlex.split raise exception if syntax error in sh syntax
#for example if no closing " is found. This function keeps dropping
#the last character of the line until shlex.split does not raise
#exception. Adds end of the line to the result of shlex.split
#example: %run "c:/python -> ['%run','"c:/python']
endofline=[]
while x!="":
try:
comps=shlex.split(x)
if len(endofline)>=1:
comps.append("".join(endofline))
return comps
except ValueError:
endofline=[x[-1:]]+endofline
x=x[:-1]
return ["".join(endofline)]
def runlistpy(self, event):
comps = shlex_split(event.line)
relpath = (len(comps) > 1 and comps[-1] or '').strip("'\"")
#print "\nev=",event # dbg
#print "rp=",relpath # dbg
#print 'comps=',comps # dbg
lglob = glob.glob
isdir = os.path.isdir
if relpath.startswith('~'):
relpath = os.path.expanduser(relpath)
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*')
if isdir(f)]
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
#filter(
if filter(lambda f: f.endswith('.py') or f.endswith('.ipy') or
f.endswith('.pyw'),comps):
pys = [f.replace('\\','/') for f in lglob('*')]
else:
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath + '*.pyw')]
return dirs + pys
greedy_cd_completer = False
def cd_completer(self, event):
relpath = event.symbol
#print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks',{})
return bkms.keys()
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
if relpath.startswith('~'):
relpath = os.path.expanduser(relpath).replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise IPython.ipapi.TryNext
found.append( d )
if not found:
if os.path.isdir(relpath):
return [relpath]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{}).keys()
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise IPython.ipapi.TryNext
def single_dir_expand(matches):
"Recursively expand match lists containing a single dir."
if len(matches) == 1 and os.path.isdir(matches[0]):
# Takes care of links to directories also. Use '/'
# explicitly, even under Windows, so that name completions
# don't end up escaped.
d = matches[0]
if d[-1] in ['/','\\']:
d = d[:-1]
subdirs = [p for p in os.listdir(d) if os.path.isdir( d + '/' + p) and not p.startswith('.')]
if subdirs:
matches = [ (d + '/' + p) for p in subdirs ]
return single_dir_expand(matches)
else:
return matches
else:
return matches
if greedy_cd_completer:
| |
<filename>train.py
import os
import gc
import random
import pprint
import warnings
from six.moves import range
from time import gmtime, strftime
from timeit import default_timer as timer
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler
from torch.utils.data import DataLoader
import options
from misc.dataloader import VQAPoolDataset
from misc.cub_dataloader import CUBPoolDataset
from misc.awa_dataloader import AWAPoolDataset
import misc.utilities as utils
from misc.eval_questioner import evalQBot
from models.vqg import VQGModel
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import numpy as np
import pdb
params = options.readCommandLine()
data_params = options.data_params(params)
model_params = options.model_params(params)
# Seed rng for reproducibility
random.seed(params['randomSeed'])
torch.manual_seed(params['randomSeed'])
if params['useGPU']:
torch.cuda.manual_seed_all(params['randomSeed'])
# Setup dataloader
if params.get('dataset', 'VQA') == 'VQA':
splits = ['train', 'val1', 'val2']
val_split = 'val1'
dataset = VQAPoolDataset(data_params, splits)
elif params['dataset'] == 'CUB':
assert params['poolType'] == 'random'
splits = ['train', 'val', 'test']
val_split = 'val'
dataset = CUBPoolDataset(data_params, splits)
elif params['dataset'] == 'AWA':
assert params['poolType'] == 'random'
splits = ['train', 'val', 'test']
val_split = 'val'
dataset = AWAPoolDataset(data_params, splits)
# used for evaluation even when not mixing
if params['trainMode'] == 'fine-qbot' and params.get('mixing', 0) != 0:
mixing_data_params = data_params.copy()
mixing_data_params['poolType'] = 'contrast'
mixing_data_params['poolSize'] = 2
mixing_dataset = VQAPoolDataset(data_params, splits)
pre_eval_dataset = mixing_dataset
else:
mixing_dataset = None
pre_eval_dataset = dataset
# Params to transfer from dataset
transfer = ['vocabSize', 'startToken', 'endToken', 'unkToken', 'num_ans_candidates']
for key in transfer:
if hasattr(dataset, key):
model_params[key] = getattr(dataset, key)
# Create save path and checkpoints folder
if not os.path.exists('checkpoints'): os.makedirs('checkpoints')
os.makedirs(params['savePath'], exist_ok=True)
# Create/Load Modules
parameters = []
aBot = None
qBot = None
# load qbot
if params['qstartFrom']:
qBot = utils.loadModelFromFile(params['qstartFrom'], 'qbot',
params['useGPU'], params['speakerStartFrom'])
qBot.train()
elif 'abot' not in params['trainMode']:
qBot = utils.loadModel(model_params, 'qbot',
params['useGPU'], params['speakerStartFrom'])
if params['ewcLossCoeff'] > 0:
prior_qBot_parameters = {n: p.clone().detach() for n, p in qBot.named_parameters()}
def compute_ewc_loss(scores, qBot, prior_qBot_parameters):
N = scores.shape[0]
ewc = 0
# NOTE: This single forward pass with many backwards deals with modules
# that use batch statistics like BatchNorm. The loss value should probably
# depend on the entire batch. That doesn't happen if examples are forwarded
# individually, as in some other implementations.
for i in range(N):
print(i)
import pdb; pdb.set_trace()
optimizer.zero_grad()
scores[i].backward(retain_graph=True)
for n, p in qBot.named_parameters():
# the 2nd can happen for unused encoder parameters
if not p.requires_grad or p.grad is None:
continue
Fi = p.grad.pow(2)
pp = prior_qBot_parameters[n]
ewc += (Fi * (p - pp)**2).sum()
return ewc / 2
# load abot
if params['astartFrom']:
aBot = utils.loadModelFromFile(params['astartFrom'], 'abot', params['useGPU'])
aBot.train()
elif 'abot' in params['trainMode'] or 'fine' in params['trainMode']:
aBot = utils.loadModel(model_params, 'abot', params['useGPU'])
vqg = None
# load vqg model
if (params.get('vqgstartFrom') is not None or
params.get('qcycleLossCoeff', 0) != 0):
# TODO: proper checkpointing and loading
# TODO: proper decoder sharing.... what's the right way to do that?
vqg = VQGModel(model_params, qBot.decoder)
vqg.cuda()
aBot.tracking = True
def fix_params(freeze_mode):
# define some parameter groups
decoder_params = ['z2hdec', 'gumbel_codebook', 'decoder']
predict_params = ['predict_']
dialog_params = ['dialogRNN']
policy_pt1_params = ['ansEmebed', 'quesStartEmbed', 'quesEncoder',
'ctx_coder', 'query_embed']
policy_pt2_params = ['hc2z', 'henc2z', 'encoder']
policy_params = policy_pt1_params + policy_pt2_params
# decide which params
fix_params_names = ['speaker'] # speaker is always fixed
if freeze_mode == 'decoder':
fix_params_names += decoder_params
elif freeze_mode == 'predict':
fix_params_names += decoder_params + predict_params
elif freeze_mode == 'all_but_policy':
fix_params_names += decoder_params + predict_params + dialog_params
elif freeze_mode == 'all_but_ctx':
fix_params_names += decoder_params + predict_params + dialog_params + policy_pt2_params
elif freeze_mode == 'policy':
fix_params_names += decoder_params + policy_params
# actually fix them
fix_params_names
exclude_params = []
include_params = []
inc_tensors = []
if qBot is not None:
for key, value in dict(qBot.named_parameters()).items():
if np.sum([name in key for name in fix_params_names]):
value.requires_grad = False
exclude_params.append(key)
else:
value.requires_grad = True
include_params.append(key)
inc_tensors.append(value)
return exclude_params, include_params, inc_tensors
# decide which parameters to optimize
if 'fine-qbot' in params['trainMode']:
exclude_params, include_params, inc_tensors = fix_params(params['freezeMode'])
print(f'Params exclude from Training {exclude_params}')
print(f'Params included in Training {include_params}')
parameters = inc_tensors
if params.get('freezeMode2') is not None:
assert 'freezeMode2Freq' in params
exclude_params2, include_params2, inc_tensors2 = fix_params(params['freezeMode2'])
print(f'Freeze Mode 2 Params exclude from Training {exclude_params2}')
print(f'Freeze Mode 2 Params included in Training {include_params2}')
parameters = list(set(inc_tensors + inc_tensors2))
elif 'qbot' in params['trainMode']:
if params['freezeMode'] != 'none':
warnings.warn('Attempting to freeze some parameters during initial '
'pre-training. Check to be sure this is what you want.')
parameters.extend(filter(lambda p: p.requires_grad, qBot.parameters()))
if 'abot' in params['trainMode']:
parameters.extend(filter(lambda p: p.requires_grad, aBot.parameters()))
if vqg is not None:
assert params.get('freezeMode2') is None
if params['freezeMode'] == 'all_but_ctx':
parameters = []
parameters.extend(filter(lambda p: p.requires_grad, vqg.parameters()))
dataset.split = 'train'
dataloader = DataLoader(
dataset,
batch_size=params['batchSize'],
shuffle=True,
num_workers=params['numWorkers'],
drop_last=True,
pin_memory=True)
if params.get('mixing', 0) != 0:
mixing_dataset.split = 'train'
mixing_dataloader = DataLoader(
mixing_dataset,
batch_size=params['batchSize'],
shuffle=True,
num_workers=params['numWorkers'],
drop_last=True,
pin_memory=True)
else:
mixing_dataloader = None
viz = utils.TBlogger('logs', params['saveName'])
# Setup optimizer
if params['continue']:
# Continuing from a loaded checkpoint restores the following
startIterID = params['ckpt_iterid'] + 1 # Iteration ID
lRate = params['ckpt_lRate'] # Learning rate
print("Continuing training from iterId[%d]" % startIterID)
else:
# Beginning training normally, without any checkpoint
lRate = params['learningRate']
startIterID = 0
optimizer = optim.Adam(parameters, lr=lRate, amsgrad=True)
if params['scheduler'] == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
patience=params['lrPatience'],
cooldown=params['lrCooldown'],
factor=params['lrDecayRate'],
min_lr=params['minLRate'])
else:
scheduler = None
assert not params['continue']
runningLoss = None
ce_criterion = nn.CrossEntropyLoss()
ce_criterion_nored = nn.CrossEntropyLoss(reduction='none')
# bce_criterion = nn.BCEWithLogitsLoss()
numIterPerEpoch = len(dataloader)
print('\n%d iter per epoch.' % numIterPerEpoch)
#---------------------------------------------------------------------------------------
# Training
#---------------------------------------------------------------------------------------
def batch_iter(dataloader, mixing_dataloader=None):
mode = params['trainMode']
if mixing_dataloader is not None:
mixing_iter = iter(mixing_dataloader)
freeze_mode1 = params['freezeMode']
freeze_mode2 = params.get('freezeMode2')
freeze2_freq = params.get('freezeMode2Freq')
freeze2_freq = str(0 if freeze2_freq is None else freeze2_freq)
freeze2_epoch = freeze2_freq.startswith('ep')
freeze2_freq = int(freeze2_freq[2:] if freeze2_epoch else freeze2_freq)
for epochId in range(params['numEpochs']):
if freeze2_epoch:
freeze_mode = freeze_mode1
if freeze2_freq and (epochId+1) % freeze2_freq == 0:
freeze_mode = freeze_mode2
print(f'epoch {epochId}, mode {freeze_mode}')
for idx, batch in enumerate(dataloader):
if not freeze2_epoch:
freeze_mode = freeze_mode1
if freeze2_freq and (idx+1) % freeze2_freq == 0:
freeze_mode = freeze_mode2
yield mode, freeze_mode, epochId, idx, batch
if params.get('mixing', 0) != 0 and idx % params.get('mixing', 0) == 0:
try:
batch = next(mixing_iter)
except StopIteration:
mixing_iter = iter(mixing_dataloader)
batch = next(mixing_iter)
yield 'pre-qbot', freeze_mode, epochId, idx, batch
start_t = timer()
kl_weight_encoder = params['klWeight']
kl_weight_cc = params['klWeightCC']
load_diff = 0
compute_diff = 0
log_N = 0
for trainMode, freeze_mode, epochId, idx, batch in batch_iter(dataloader, mixing_dataloader):
# track load time
end_t = timer()
load_diff += end_t - start_t
iterId = startIterID + idx + (epochId * numIterPerEpoch)
epochFrac = iterId / numIterPerEpoch
fix_params(freeze_mode)
#############################################
# Evaluate and checkpoint (every epoch)
# ignore the pre-train iteration of mixed fine-tuning
if idx == 0 and params['trainMode'] == trainMode: # and iterId != 0:
# Set eval mode
if qBot: qBot.eval()
if aBot: aBot.eval()
# Mapping iteration count to epoch count
viz.linePlot(iterId, epochId, 'iter x epoch', 'epochs')
print('Performing validation...')
print('Validation:')
evalResult = evalQBot(params, qBot, dataset, val_split, aBot=aBot,
exampleLimit=params['evalLimit'], vqg=vqg)
viz.linePlot(iterId, epochId, 'iter x epoch', 'epochs')
if qBot:
val_metrics = ['encoder_nll', 'cc_nll', 'predict_loss', 'accuracy',
'question_matches', 'question_match_ratio', 'qcycle_loss']
elif aBot:
val_metrics = ['abot_accu', 'abot_rel_accu', 'abot_loss']
for metric in val_metrics:
value = evalResult[metric]
try:
value = float(value)
viz.linePlot(
epochId, value, 'val', metric, xlabel='Epochs')
except:
pass
for i in range(params['maxRounds']):
if 'R' + str(i) in evalResult:
viz.linePlot(iterId, float(evalResult['R' + str(i)]), 'val', 'round accuracy ' + str(i))
if qBot and 'langResult' in evalResult and 'metrics' in evalResult['langResult']:
for metric in evalResult['langResult']['metrics']:
for r, result in enumerate(evalResult['langResult']['byRound']):
value = result[metric]
viz.linePlot(
epochId, value, 'val - qBot - lang', 'R' + str(r) + '-' + metric, xlabel='Epochs')
if qBot and scheduler is not None:
scheduler.step(evalResult['predict_loss'])
lRate = min([pg['lr'] for pg in optimizer.param_groups])
# Also evaluate pre-training metrics
if params['trainMode'] == 'fine-qbot' and params.get('dataset', 'VQA') == 'VQA':
print('Evaluating pre-train metrics...')
preEvalResult = evalQBot(params, qBot, pre_eval_dataset, val_split,
aBot=aBot, exampleLimit=params['evalLimit'],
trainMode='pre-qbot')
val_metrics = ['encoder_nll', 'cc_nll', 'predict_loss', 'accuracy']
for metric in val_metrics:
value = preEvalResult[metric]
try:
value = float(value)
viz.linePlot(
epochId, value, 'pre-train val', metric, xlabel='Epochs')
except:
pass
start_t = timer()
# Save the model
params['ckpt_iterid'] = iterId
params['ckpt_lRate'] = lRate
if aBot:
saveFile = os.path.join(params['savePath'],
'abot_ep_%d.vd' % epochId)
print('Saving model: ' + saveFile)
utils.saveModel(aBot, optimizer, saveFile, params)
if qBot:
saveFile = os.path.join(params['savePath'],
'qbot_ep_%d.vd' % epochId)
print('Saving model: ' + saveFile)
utils.saveModel(qBot, optimizer, saveFile, params)
#############################################
# Train
# track compute time
start_t = timer()
if params['useGPU']:
batch = {key: v.cuda() if hasattr(v, 'cuda') \
else v for key, v in batch.items()}
if qBot: qBot.train()
if aBot: aBot.train()
log_N += 1
if 'pre' in trainMode:
numRounds = 1
else:
numRounds = params['maxRounds']
encoder_nll = 0
encoder_kl = 0
cc_nll = 0
cc_kl = 0
predict_loss = | |
"""Test cases for Chapter 07."""
import unittest
from os import path
from algs.table import DataTable, SKIP
try:
import networkx as nx
except ImportError:
import ch07.replacement as nx
from ch07.dependencies import tkinter_error
class TestChapter7(unittest.TestCase):
def assert_equal_edges(self, e1, e2):
"""Compare edges but ignore weights."""
if e1[0] != e2[0]:
self.fail('{} not same as {}'.format(e1,e2))
if e1[1] != e2[1]:
self.fail('{} not same as {}'.format(e1,e2))
def assert_equal_edges_weights(self, e1, e2):
"""Compare edges and also edge weights."""
from ch07.replacement import WEIGHT
self.assert_equal_edges(e1, e2)
if e1[2][WEIGHT] != e2[2][WEIGHT]:
self.fail('{} not same as {}'.format(e1,e2))
def test_distance_to(self):
from ch07.maze import distance_to
one = (2,2)
two = (3,4)
three = (1,0)
self.assertEqual(3, distance_to(one, two))
self.assertEqual(6, distance_to(three, two))
def test_maze(self):
from ch07.maze import Maze, to_networkx
import random
random.seed(15)
m = Maze(3,5)
self.assertEqual((0,2), m.start())
self.assertEqual((2,2), m.end())
G = to_networkx(m)
self.assertEqual([(0, 1), (0, 3), (1, 2)], sorted(list(G[(0,2)])))
def test_bfs_search(self):
from ch07.maze import Maze, to_networkx, solution_graph
from ch07.search import bfs_search, path_to, node_from_field
import random
random.seed(15)
m = Maze(3,5)
G = to_networkx(m)
# BFS search solution
node_from = bfs_search(G, m.start())
self.assertEqual((1,0), node_from[(2,0)])
# Create graph resulting from the BFS search results
F = node_from_field(G, node_from)
self.assertEqual(14, len(list(F.edges())))
# The actual solution is a two-edge, three node straight path
H = solution_graph(G, path_to(node_from, m.start(), m.end()))
self.assertEqual(2, len(list(H.edges())))
def test_guided_search(self):
from ch07.maze import Maze, to_networkx, solution_graph, distance_to
from ch07.search import guided_search, path_to, node_from_field
import random
random.seed(15)
m = Maze(3,5)
G = to_networkx(m)
# BFS search solution
node_from = guided_search(G, m.start(), m.end(), distance_to)
self.assertEqual((1,2), node_from[(2,2)])
# Create graph resulting from the BFS search results
F = node_from_field(G, node_from)
self.assertEqual(14, len(list(F.edges())))
# The actual solution is a two-edge, three node straight path
H = solution_graph(G, path_to(node_from, m.start(), m.end()))
self.assertEqual(2, len(list(H.edges())))
def test_allpairs_sp(self):
from ch07.all_pairs_sp import floyd_warshall, all_pairs_path_to
G = nx.Graph()
G.add_edge('a', 'b', weight=3)
G.add_edge('a', 'c', weight=5)
G.add_edge('b', 'c', weight=9)
G.add_edge('b', 'd', weight=2)
G.add_edge('d', 'c', weight=1)
G.add_edge('e', 'f', weight=1) # separate and disconnected edge...
(dist_to, node_from) = floyd_warshall(G)
path = all_pairs_path_to(node_from, 'b', 'c')
self.assertEqual(3, dist_to['b']['c'])
self.assertEqual(['b', 'd', 'c'], path)
path = all_pairs_path_to(node_from, 'a', 'd')
self.assertEqual(5, dist_to['a']['d'])
self.assertEqual(['a', 'b', 'd'], path)
with self.assertRaises(ValueError):
all_pairs_path_to(node_from, 'a', 'e')
tbl = DataTable([6,6,6,6,6], ['.', 'a', 'b', 'c', 'd'], output=False)
tbl.format('.','s')
for f in 'abcd':
tbl.format(f, 's')
for u in 'abcd':
row = [u]
for v in 'abcd':
if node_from[u][v]:
row.append(node_from[u][v])
else:
row.append(SKIP)
tbl.row(row)
self.assertEqual('d', tbl.entry('b', 'c'))
def test_allpairs_directed_sp(self):
from ch07.all_pairs_sp import floyd_warshall, all_pairs_path_to, debug_state
DG = nx.DiGraph()
DG.add_edge('a', 'b', weight=4)
DG.add_edge('b', 'a', weight=2)
DG.add_edge('a', 'c', weight=3)
DG.add_edge('b', 'd', weight=5)
DG.add_edge('c', 'b', weight=6)
DG.add_edge('d', 'b', weight=1)
DG.add_edge('d', 'c', weight=7)
(dist_to, node_from) = floyd_warshall(DG)
path = all_pairs_path_to(node_from, 'b', 'c')
self.assertEqual(5, dist_to['b']['c'])
self.assertEqual(['b', 'a', 'c'], path)
path = all_pairs_path_to(node_from, 'd', 'c')
self.assertEqual(6, dist_to['d']['c'])
self.assertEqual(['d', 'b', 'a', 'c'], path)
(tbl, tbl_dist_to) = debug_state('test case', DG, node_from, dist_to, output=False)
tbl_path = DataTable([6,12,12,12,12], ['.', 'a', 'b', 'c', 'd'], output=False)
tbl_path.format('.','s')
for f in 'abcd':
tbl_path.format(f, 's')
for u in 'abcd':
path_row = [u]
for v in 'abcd':
if u == v:
path_row.append(SKIP)
else:
path_row.append('->'.join(all_pairs_path_to(node_from, u, v)))
tbl_path.row(path_row)
# edge on shortest path into 'c', when starting from 'd', came from 'a'
self.assertEqual('d->b->a->c', tbl_path.entry('d', 'c'))
self.assertEqual(6, tbl_dist_to.entry('d', 'c'))
self.assertEqual('a', tbl.entry('d', 'c'))
def test_bellman_ford_negative_cycle_sp(self):
from ch07.single_source_sp import bellman_ford
from ch07.challenge import bellman_ford_returns_negative_cycle, NegativeCycleError
neg_cycle = nx.DiGraph()
neg_cycle.add_edge('a', 'b', weight=1)
neg_cycle.add_edge('b', 'd', weight=-3)
neg_cycle.add_edge('d', 'c', weight=5)
neg_cycle.add_edge('c', 'b', weight=-4)
with self.assertRaises(RuntimeError):
bellman_ford(neg_cycle, 'a')
with self.assertRaises(NegativeCycleError):
bellman_ford_returns_negative_cycle(neg_cycle, 'a')
# Validate semantic information in NegativeCycleError: Note that cycle returned is
# implementation-specific, so you could choose to only match regExp on weight=-2
with self.assertRaisesRegex(NegativeCycleError, 'c->d->b->c with weight=-2'):
bellman_ford_returns_negative_cycle(neg_cycle, 'a')
no_neg_cycle = nx.DiGraph()
no_neg_cycle.add_edge('a', 'b', weight=1)
no_neg_cycle.add_edge('b', 'd', weight=-3)
no_neg_cycle.add_edge('d', 'c', weight=5)
no_neg_cycle.add_edge('c', 'b', weight=-1)
bellman_ford(no_neg_cycle, 'a')
bellman_ford_returns_negative_cycle(no_neg_cycle, 'a')
def test_bad_dijkstra_sp(self):
from ch07.single_source_sp import dijkstra_sp
DG = nx.DiGraph()
DG.add_edge('a', 'b', weight=3)
DG.add_edge('a', 'c', weight=1)
DG.add_edge('c', 'd', weight=1)
DG.add_edge('b', 'd', weight=-2)
with self.assertRaises(ValueError):
dijkstra_sp(DG, 'a')
def test_dijkstra_sp(self):
from ch07.single_source_sp import dijkstra_sp, edges_path_to, bellman_ford
DG = nx.DiGraph()
DG.add_edge('a', 'b', weight=3)
DG.add_edge('a', 'c', weight=9)
DG.add_edge('b', 'c', weight=4)
DG.add_edge('b', 'd', weight=2)
DG.add_edge('d', 'c', weight=1)
DG.add_edge('e', 'f', weight=1) # separate and disconnected edge...
(dist_to, edge_to) = dijkstra_sp(DG, 'a')
path = edges_path_to(edge_to, 'a', 'c')
self.assertEqual(6, dist_to['c'])
self.assertEqual(['a', 'b', 'd', 'c'], path)
with self.assertRaises(ValueError): # NO PATH!
edges_path_to(edge_to, 'a', 'f')
(dist_to_bf, edge_to_bf) = bellman_ford(DG, 'a')
path = edges_path_to(edge_to_bf, 'a', 'c')
self.assertEqual(6, dist_to_bf['c'])
self.assertEqual(['a', 'b', 'd', 'c'], path)
def test_topological_example(self):
from ch07.book import topological_example
DG = nx.DiGraph()
topological_example(DG, 5)
print(list(nx.topological_sort(DG)))
from ch07.digraph_search import topological_sort
print(list(topological_sort(DG)))
def test_topological_figure(self):
DG = nx.DiGraph()
DG.add_edges_from([('a', 'b'), ('a', 'c'), ('b', 'c'), ('b', 'd')])
print(list(nx.topological_sort(DG)))
from ch07.digraph_search import topological_sort
print(list(topological_sort(DG)))
DG = nx.DiGraph()
from ch07.book import make_sample_directed_graph
DG = make_sample_directed_graph()
print(list(topological_sort(DG)))
def small_example(self, G):
"""Common example used in chapter 07."""
G.add_node('A2')
G.add_nodes_from(['A3', 'A4', 'A5'])
G.add_edge('A2', 'A3')
G.add_edges_from([('A3', 'A4'), ('A4', 'A5')])
for i in range(2, 6):
G.add_edge('B{}'.format(i), 'C{}'.format(i))
if 2 < i < 5:
G.add_edge('B{}'.format(i), 'B{}'.format(i+1))
if i < 5:
G.add_edge('C{}'.format(i), 'C{}'.format(i+1))
self.assertEqual(12, G.number_of_nodes())
self.assertEqual(12, G.number_of_edges())
self.assertEqual(sorted(['C2', 'B3', 'C4']), sorted(list(G['C3'])))
self.assertEqual(sorted([('C3', 'C2'), ('C3', 'B3'), ('C3', 'C4')]),
sorted(list(G.edges('C3'))))
return G
def test_small_example(self):
from ch07.search import dfs_search, path_to
from ch07.challenge import path_to_recursive
G = nx.Graph()
self.small_example(G)
node_from = dfs_search(G, 'A2')
self.assertEqual(['A2', 'A3', 'A4', 'A5'], path_to(node_from, 'A2', 'A5'))
self.assertEqual(['A2', 'A3', 'A4', 'A5'], list(path_to_recursive(node_from, 'A2', 'A5')))
with self.assertRaises(ValueError):
path_to(node_from, 'A2', 'B2') # No path exists
with self.assertRaises(ValueError):
# No path exists: force issue by list(...)
list(path_to_recursive(node_from, 'A2', 'B2'))
def test_small_example_stub_replacement(self):
import ch07.replacement
G = ch07.replacement.Graph()
self.small_example(G)
def test_list_stack(self):
from ch07.list_stack import Stack
stack = Stack()
self.assertTrue(stack.is_empty())
with self.assertRaises(RuntimeError):
stack.pop()
stack.push(5)
self.assertFalse(stack.is_empty())
self.assertEqual(5, stack.pop())
def test_representations(self):
from ch07.replacement import MatrixUndirectedGraph, UndirectedGraph
self.small_example(UndirectedGraph())
G = self.small_example(MatrixUndirectedGraph())
self.assertEqual(['A3'], list(G['A2']))
def test_dijkstra_replacement(self):
from ch07.replacement import WEIGHT, DiGraph
DG = DiGraph()
DG.add_edge('a', 'b', weight=6)
DG.add_edge('a', 'c', weight=10)
DG.add_edge('b', 'c', weight=2)
from ch07.single_source_sp import dijkstra_sp
(dist_to, edge_to) = dijkstra_sp(DG, 'a')
self.assertEqual(8, dist_to['c'])
self.assert_equal_edges_weights(('b', 'c', {WEIGHT:2}), edge_to['c'])
def test_indexed_min_heap(self):
from ch07.indexed_pq import IndexedMinPQ
impq = IndexedMinPQ(5)
with self.assertRaises(RuntimeError):
impq.peek()
with self.assertRaises(RuntimeError):
impq.dequeue()
impq.enqueue(3, 5)
# can't increase priority
with self.assertRaises(RuntimeError):
impq.decrease_priority(3, 999)
self.assertEqual(3, impq.peek())
impq.enqueue(1, 2)
self.assertEqual(2, len(impq))
self.assertFalse(impq.is_full())
self.assertTrue(3 in impq)
self.assertFalse(999 in impq)
self.assertEqual(1, impq.dequeue())
self.assertEqual(3, impq.dequeue())
self.assertTrue(impq.is_empty())
for i in range(5):
impq.enqueue(i, i+10)
self.assertTrue(impq.is_full())
with self.assertRaises(RuntimeError):
impq.enqueue(98, 999)
def test_imqp_example(self):
from ch07.single_source_sp import dijkstra_sp
G = nx.DiGraph()
G.add_edge('a', 'b', weight=6)
G.add_edge('a', 'c', weight=10)
G.add_edge('b', 'c', weight=2)
(dist_to, edge_to) = dijkstra_sp(G, 'a')
self.assertEqual(8, dist_to['c'])
self.assertEqual('b', edge_to['c'][0])
self.assertEqual('a', edge_to['b'][0])
def test_cycle_detection(self):
"""Deal with inability to have replacement cycle detection."""
from ch07.fibonacci_example import fibonacci_example
if tkinter_error:
pass
else:
import tkinter
from ch07.spreadsheet import Spreadsheet
ss = Spreadsheet(tkinter.Tk(), nx.DiGraph())
fibonacci_example(ss)
if nx.__version__ == 'replacement':
pass
else:
import networkx.algorithms.cycles
try:
networkx.algorithms.cycles.find_cycle(ss.digraph)
self.fail('no cycle yet...')
except networkx.exception.NetworkXNoCycle:
pass
try:
ss.set('B2', '=C5')
self.fail('should have detected cycle')
except RuntimeError:
pass
# just grab the graph and hack it together
ss.digraph.add_edge('C5', 'B2')
if nx.__version__ == 'replacement':
pass
else:
networkx.algorithms.cycles.find_cycle(ss.digraph)
acycle = networkx.algorithms.cycles.find_cycle(ss.digraph)
self.assertTrue(len(acycle) > 1)
def test_has_cycle_none_exists(self):
from ch07.digraph_search import has_cycle, has_cycle_nr
from ch07.digraph_search import recover_cycle, recover_cycle_nr
G = nx.DiGraph()
G.add_edge('a', 'b', weight=6)
G.add_edge('a', 'c', weight=10)
G.add_edge('b', 'c', weight=2)
self.assertFalse(has_cycle(G))
self.assertFalse(has_cycle_nr(G))
# There are multiple cycles, so no way to check with each other...
self.assertTrue(len(recover_cycle(G)) == 0)
self.assertTrue(len(recover_cycle_nr(G)) == 0)
def test_has_cycle(self):
from ch07.digraph_search import has_cycle, has_cycle_nr
from ch07.digraph_search import recover_cycle, recover_cycle_nr
G = nx.DiGraph()
G.add_edge('a', 'b', weight=6)
G.add_edge('a', 'c', weight=10)
G.add_edge('b', 'c', weight=2)
G.add_edge('c', 'd', weight=1)
G.add_edge('d', 'e', weight=1)
self.assertFalse(has_cycle(G))
self.assertFalse(has_cycle_nr(G))
G.add_edge('e', 'a', weight=1)
self.assertTrue(has_cycle(G))
self.assertTrue(has_cycle_nr(G))
# There are multiple cycles, so no way to check with each other...
self.assertTrue(len(recover_cycle(G)) > 0)
self.assertTrue(len(recover_cycle_nr(G)) > 0)
# However, both cycles contain 'e'
self.assertTrue('e' in recover_cycle(G))
self.assertTrue('e' in recover_cycle_nr(G))
def test_topological_table(self):
from ch07.book import table_topological_example
tbl = table_topological_example(max_k=4, output=False)
self.assertEqual(336, tbl.entry(64, 'E'))
def test_table_compare_graph_structures(self):
from ch07.book import table_compare_graph_structures
tbl = table_compare_graph_structures(max_k=12)
self.assertTrue(tbl.entry(2048, 'NetworkX') < tbl.entry(2048, 'Adjacency Matrix'))
def test_generate_guided_search_figure(self):
from ch07.book import generate_guided_search_figure
from ch07.tmg_load import tmg_load, highway_map, bounding_ids
from ch07.dependencies import plt_error
if not plt_error:
(G, positions, _) = tmg_load(highway_map())
(_, EAST, _, WEST) = bounding_ids(positions)
output_file = generate_guided_search_figure(G, positions, WEST, EAST)
self.assertTrue(path.isfile(output_file))
def test_bounding(self):
from ch07.tmg_load import tmg_load, highway_map, bounding_ids
(_, positions, _) = tmg_load(highway_map())
(NORTH, EAST, SOUTH, WEST) = bounding_ids(positions)
self.assertTrue(positions[NORTH][0] > positions[SOUTH][0]) # LAT Is higher for north
self.assertTrue(positions[EAST][1] > positions[WEST][1]) # LONG | |
<filename>sdk/python/pulumi_azure_nextgen/security/v20150601preview/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'JitNetworkAccessPolicyVirtualMachineArgs',
'JitNetworkAccessPortRuleArgs',
'JitNetworkAccessRequestArgs',
'JitNetworkAccessRequestPortArgs',
'JitNetworkAccessRequestVirtualMachineArgs',
'PathRecommendationArgs',
'ProtectionModeArgs',
'PublisherInfoArgs',
'UserRecommendationArgs',
'VmRecommendationArgs',
]
@pulumi.input_type
class JitNetworkAccessPolicyVirtualMachineArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
ports: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessPortRuleArgs']]],
public_ip_address: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: Resource ID of the virtual machine that is linked to this policy
:param pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessPortRuleArgs']]] ports: Port configurations for the virtual machine
:param pulumi.Input[str] public_ip_address: Public IP address of the Azure Firewall that is linked to this policy, if applicable
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "ports", ports)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Resource ID of the virtual machine that is linked to this policy
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessPortRuleArgs']]]:
"""
Port configurations for the virtual machine
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessPortRuleArgs']]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="publicIpAddress")
def public_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Public IP address of the Azure Firewall that is linked to this policy, if applicable
"""
return pulumi.get(self, "public_ip_address")
@public_ip_address.setter
def public_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ip_address", value)
@pulumi.input_type
class JitNetworkAccessPortRuleArgs:
def __init__(__self__, *,
max_request_access_duration: pulumi.Input[str],
number: pulumi.Input[int],
protocol: pulumi.Input[str],
allowed_source_address_prefix: Optional[pulumi.Input[str]] = None,
allowed_source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] max_request_access_duration: Maximum duration requests can be made for. In ISO 8601 duration format. Minimum 5 minutes, maximum 1 day
:param pulumi.Input[str] allowed_source_address_prefix: Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_source_address_prefixes: Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
"""
pulumi.set(__self__, "max_request_access_duration", max_request_access_duration)
pulumi.set(__self__, "number", number)
pulumi.set(__self__, "protocol", protocol)
if allowed_source_address_prefix is not None:
pulumi.set(__self__, "allowed_source_address_prefix", allowed_source_address_prefix)
if allowed_source_address_prefixes is not None:
pulumi.set(__self__, "allowed_source_address_prefixes", allowed_source_address_prefixes)
@property
@pulumi.getter(name="maxRequestAccessDuration")
def max_request_access_duration(self) -> pulumi.Input[str]:
"""
Maximum duration requests can be made for. In ISO 8601 duration format. Minimum 5 minutes, maximum 1 day
"""
return pulumi.get(self, "max_request_access_duration")
@max_request_access_duration.setter
def max_request_access_duration(self, value: pulumi.Input[str]):
pulumi.set(self, "max_request_access_duration", value)
@property
@pulumi.getter
def number(self) -> pulumi.Input[int]:
return pulumi.get(self, "number")
@number.setter
def number(self, value: pulumi.Input[int]):
pulumi.set(self, "number", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="allowedSourceAddressPrefix")
def allowed_source_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
"""
return pulumi.get(self, "allowed_source_address_prefix")
@allowed_source_address_prefix.setter
def allowed_source_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_source_address_prefix", value)
@property
@pulumi.getter(name="allowedSourceAddressPrefixes")
def allowed_source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
"""
return pulumi.get(self, "allowed_source_address_prefixes")
@allowed_source_address_prefixes.setter
def allowed_source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_source_address_prefixes", value)
@pulumi.input_type
class JitNetworkAccessRequestArgs:
def __init__(__self__, *,
requestor: pulumi.Input[str],
start_time_utc: pulumi.Input[str],
virtual_machines: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestVirtualMachineArgs']]],
justification: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] requestor: The identity of the person who made the request
:param pulumi.Input[str] start_time_utc: The start time of the request in UTC
:param pulumi.Input[str] justification: The justification for making the initiate request
"""
pulumi.set(__self__, "requestor", requestor)
pulumi.set(__self__, "start_time_utc", start_time_utc)
pulumi.set(__self__, "virtual_machines", virtual_machines)
if justification is not None:
pulumi.set(__self__, "justification", justification)
@property
@pulumi.getter
def requestor(self) -> pulumi.Input[str]:
"""
The identity of the person who made the request
"""
return pulumi.get(self, "requestor")
@requestor.setter
def requestor(self, value: pulumi.Input[str]):
pulumi.set(self, "requestor", value)
@property
@pulumi.getter(name="startTimeUtc")
def start_time_utc(self) -> pulumi.Input[str]:
"""
The start time of the request in UTC
"""
return pulumi.get(self, "start_time_utc")
@start_time_utc.setter
def start_time_utc(self, value: pulumi.Input[str]):
pulumi.set(self, "start_time_utc", value)
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestVirtualMachineArgs']]]:
return pulumi.get(self, "virtual_machines")
@virtual_machines.setter
def virtual_machines(self, value: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestVirtualMachineArgs']]]):
pulumi.set(self, "virtual_machines", value)
@property
@pulumi.getter
def justification(self) -> Optional[pulumi.Input[str]]:
"""
The justification for making the initiate request
"""
return pulumi.get(self, "justification")
@justification.setter
def justification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "justification", value)
@pulumi.input_type
class JitNetworkAccessRequestPortArgs:
def __init__(__self__, *,
end_time_utc: pulumi.Input[str],
number: pulumi.Input[int],
status: pulumi.Input[str],
status_reason: pulumi.Input[str],
allowed_source_address_prefix: Optional[pulumi.Input[str]] = None,
allowed_source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mapped_port: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] end_time_utc: The date & time at which the request ends in UTC
:param pulumi.Input[str] status: The status of the port
:param pulumi.Input[str] status_reason: A description of why the `status` has its value
:param pulumi.Input[str] allowed_source_address_prefix: Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_source_address_prefixes: Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
:param pulumi.Input[int] mapped_port: The port which is mapped to this port's `number` in the Azure Firewall, if applicable
"""
pulumi.set(__self__, "end_time_utc", end_time_utc)
pulumi.set(__self__, "number", number)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "status_reason", status_reason)
if allowed_source_address_prefix is not None:
pulumi.set(__self__, "allowed_source_address_prefix", allowed_source_address_prefix)
if allowed_source_address_prefixes is not None:
pulumi.set(__self__, "allowed_source_address_prefixes", allowed_source_address_prefixes)
if mapped_port is not None:
pulumi.set(__self__, "mapped_port", mapped_port)
@property
@pulumi.getter(name="endTimeUtc")
def end_time_utc(self) -> pulumi.Input[str]:
"""
The date & time at which the request ends in UTC
"""
return pulumi.get(self, "end_time_utc")
@end_time_utc.setter
def end_time_utc(self, value: pulumi.Input[str]):
pulumi.set(self, "end_time_utc", value)
@property
@pulumi.getter
def number(self) -> pulumi.Input[int]:
return pulumi.get(self, "number")
@number.setter
def number(self, value: pulumi.Input[int]):
pulumi.set(self, "number", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
The status of the port
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="statusReason")
def status_reason(self) -> pulumi.Input[str]:
"""
A description of why the `status` has its value
"""
return pulumi.get(self, "status_reason")
@status_reason.setter
def status_reason(self, value: pulumi.Input[str]):
pulumi.set(self, "status_reason", value)
@property
@pulumi.getter(name="allowedSourceAddressPrefix")
def allowed_source_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
"""
return pulumi.get(self, "allowed_source_address_prefix")
@allowed_source_address_prefix.setter
def allowed_source_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_source_address_prefix", value)
@property
@pulumi.getter(name="allowedSourceAddressPrefixes")
def allowed_source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
"""
return pulumi.get(self, "allowed_source_address_prefixes")
@allowed_source_address_prefixes.setter
def allowed_source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_source_address_prefixes", value)
@property
@pulumi.getter(name="mappedPort")
def mapped_port(self) -> Optional[pulumi.Input[int]]:
"""
The port which is mapped to this port's `number` in the Azure Firewall, if applicable
"""
return pulumi.get(self, "mapped_port")
@mapped_port.setter
def mapped_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mapped_port", value)
@pulumi.input_type
class JitNetworkAccessRequestVirtualMachineArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
ports: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestPortArgs']]]):
"""
:param pulumi.Input[str] id: Resource ID of the virtual machine that is linked to this policy
:param pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestPortArgs']]] ports: The ports that were opened for the virtual machine
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Resource ID of the virtual machine that is linked to this policy
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestPortArgs']]]:
"""
The ports that were opened for the virtual machine
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestPortArgs']]]):
pulumi.set(self, "ports", value)
@pulumi.input_type
class PathRecommendationArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
common: Optional[pulumi.Input[bool]] = None,
configuration_status: Optional[pulumi.Input[str]] = None,
file_type: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
publisher_info: Optional[pulumi.Input['PublisherInfoArgs']] = None,
type: Optional[pulumi.Input[str]] = None,
user_sids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
usernames: Optional[pulumi.Input[Sequence[pulumi.Input['UserRecommendationArgs']]]] = None):
"""
Represents a path that is recommended to be allowed and its properties
:param pulumi.Input[str] action: The recommendation action of the VM/server or rule
:param pulumi.Input[bool] common: Whether the path is commonly run on the machine
:param pulumi.Input[str] configuration_status: The configuration status of the VM/server group or machine or rule on the machine
:param pulumi.Input[str] file_type: The type of the file (for Linux files - Executable is used)
:param pulumi.Input[str] path: The full path to whitelist
:param pulumi.Input['PublisherInfoArgs'] publisher_info: Represents the publisher information of a process/rule
:param pulumi.Input[str] type: The type of the rule to be allowed
"""
if action is not None:
pulumi.set(__self__, "action", action)
if common is not None:
pulumi.set(__self__, "common", common)
if configuration_status is not None:
pulumi.set(__self__, "configuration_status", configuration_status)
if file_type is not None:
pulumi.set(__self__, "file_type", file_type)
if path is not None:
pulumi.set(__self__, "path", path)
if publisher_info is not None:
pulumi.set(__self__, "publisher_info", publisher_info)
if type is not None:
pulumi.set(__self__, "type", type)
if user_sids is not None:
pulumi.set(__self__, "user_sids", user_sids)
if usernames is not None:
pulumi.set(__self__, "usernames", usernames)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The recommendation action of the VM/server or rule
"""
return pulumi.get(self, "action")
@action.setter
| |
action="BUY" if o.isBuy() else "SELL",
exchange=c.exchange,
)
legs.append(leg)
return Bag(
symbol=contractUnderlying,
exchange=useExchange or exchange,
comboLegs=legs,
currency=currency,
)
async def placeOrderForContract(
self,
sym: str,
isLong: bool,
contract: Contract,
qty: float,
price: float,
orderType: str,
):
"""Place a BUY (isLong) or SELL (!isLong) for qualified 'contract' at qty/price.
If 'qty' is negative we calculate a live quantity based
on the live quote price (live hours only, obvs).
If 'price' is zero, we snap to midpoint for the limit price."""
# Immediately ask to add quote to live quotes for this trade positioning...
# need to replace underlying if is "fake settled underlying"
quotesym = (
sym.replace("SPXW", "SPX").replace("RUTW", "RUT").replace("NDXP", "NDX")
)
await self.dispatch.runop("add", f'"{quotesym}"', self.opstate)
if not contract.conId:
# spead contracts don't have IDs, so only reject if NOT a spread here.
if contract.tradingClass != "COMB":
logger.error("Not submitting order because contract not qualified!")
return None
if isinstance(contract, (Option, Bag)) or contract.tradingClass == "COMB":
# Purpose: don't trigger warning about "RTH option has no effect" with options...
# TODO: check if RTH includes extended late 4:15 ending options SPY / SPX / QQQ / IWM / etc?
if contract.localSymbol[0:3] in {"SPX", "VIX"}:
# SPX and VIX options now trade basically 24/7 but anything not 0930-1600 (-1615?) is
# considered "outside RTH"
outsideRth = True
else:
outsideRth = False
else:
outsideRth = True
if isinstance(contract, Crypto) and isLong:
# Crypto can only use IOC or Minutes for tif BUY
# (but for SELL, can use IOC, Minutes, Day, GTC)
tif = "Minutes"
else:
tif = "GTC"
if qty < 0:
# we treat negative quantities as dollar amounts (because
# we convert 'qty' to float, so we can't pass through $3000, so
# instead we do -3000 for $3,000).
# also note: negative quantites are not shorts, shorts are specified
# by SELL QTY, not SELL -QTY, not BUY -QTY.
quoteKey = lang.lookupKey(contract)
# if this is a new quote just requested, we may need to wait
# for the system to populate it...
loopFor = 10
while not (currentQuote := self.currentQuote(quoteKey)):
logger.warning(
"[{} :: {}] Waiting for quote to populate...", quoteKey, loopFor
)
try:
await asyncio.sleep(0.133)
except:
logger.warning("Cancelled waiting for quote...")
return
if (loopFor := loopFor - 1) == 0:
# if we exhausted the loop, we didn't get a usable quote so we can't
# do the requested price-based position sizing.
logger.error("Never received valid quote prices: {}", currentQuote)
return
bid, ask = currentQuote
# TODO: need customizable aggressiveness levels
# - midpoint (default)
# - ask + X% for aggressive time sensitive buys
# - bid - X% for aggressive time sensitive sells
# TODO: need to create active management system to track growing/shrinking
# midpoint for buys (+price, -qty) or sell (-price) targeting.
# calculate current midpoint of spread rounded to 2 decimals.
mid = round((bid + ask) / 2, 2)
price = mid
# negative quantities are whole dollar amounts to use for
# the buy/sell here.
amt = abs(qty)
qty = self.quantityForAmount(contract, amt, mid)
if not isinstance(contract, Crypto):
# only crypto orders support fractional quantities over the API.
# TODO: if IBKR ever enables fractional shares over the API,
# we can make the above Crypto check for (Crypto, Stock).
qty = math.floor(qty)
# If integer, show integer, else show fractions.
logger.info(
"Ordering {:,} {} at ${:,.2f} for ${:,.2f}",
qty,
sym,
price,
qty * price,
)
order = orders.IOrder(
"BUY" if isLong else "SELL", qty, price, outsiderth=outsideRth, tif=tif # type: ignore
).order(orderType)
logger.info("Ordering {} via {}", contract, order)
trade = self.ib.placeOrder(contract, order)
# TODO: add agent-like feature to modify order in steps for buys (+price, -qty)
# or for sells (-price)
logger.info("Placed: {}", pp.pformat(trade))
return order, trade
def amountForTrade(
self, trade: Trade
) -> tuple[float, float, float, Union[float, int]]:
"""Return dollar amount of trade given current limit price and quantity.
Also compensates for contract multipliers correctly.
Returns:
- calculated remaining amount
- calculated total amount
- current limit price
- current quantity remaining
"""
currentPrice = trade.order.lmtPrice
currentQty = trade.orderStatus.remaining
totalQty = currentQty + trade.orderStatus.filled
avgFillPrice = trade.orderStatus.avgFillPrice
# If contract has multiplier (like 100 underlying per option),
# calculate total spend with mul * p * q.
# The default "no multiplier" value is '', so this check should be fine.
mul = int(trade.contract.multiplier) if trade.contract.multiplier else 1
# use average price IF fills have happened, else use current limit price
return (
currentQty * currentPrice * mul,
totalQty * (avgFillPrice or currentPrice) * mul,
currentPrice,
currentQty,
)
def quantityForAmount(
self, contract: Contract, amount: float, limitPrice: float
) -> Union[int, float]:
"""Return valid quantity for contract using total dollar amount 'amount'.
Also compensates for limitPrice being a contract quantity.
Also compensates for contracts allowing fractional quantities (Crypto)
versus only integer quantities (everything else)."""
mul = int(contract.multiplier) if contract.multiplier else 1
qty = amount / (limitPrice * mul)
if not isinstance(contract, Crypto):
# only crypto orders support fractional quantities over the API.
# TODO: if IBKR ever enables fractional shares over the API,
# we can make the above Crypto check for (Crypto, Stock).
qty = math.floor(qty)
return qty
def midpointBracketBuyOrder(
self,
isLong: bool,
qty: int,
ask: float,
stopPct: float,
profitPts: float = None,
stopPts: float = None,
):
"""Place a 3-sided order:
- Market with Protection to buy immediately (long)
- Profit taker: TRAIL LIT with trailStopPrice = (current ask + profitPts)
- Stop loss: STP PRT with trailStopPrice = (current ask - stopPts)
"""
lower, upper = boundsByPercentDifference(ask, stopPct)
if isLong:
lossPrice = lower
trailStop = makeQuarter(ask - lower)
openLimit = ask + 1
openAction = "BUY"
closeAction = "SELL"
else:
lossPrice = upper
trailStop = makeQuarter(upper - ask)
openLimit = ask - 1
openAction = "SELL"
closeAction = "BUY"
# TODO: up/down One-Cancels-All brackets:
# BUY if +5 pts, TRAIL STOP 3 PTS
# SELL if -5 pts, TRAIL STOP 3 PTS
if True:
# Note: these orders require MANUAL order ID because by default,
# the order ID is populated on .placeOrder(), but we need to
# reference it here for the seconday order to reference
# the parent order!
parent = Order(
orderId=self.ib.client.getReqId(),
action=openAction,
totalQuantity=qty,
transmit=False,
# orderType="MKT PRT",
orderType="LMT",
lmtPrice=openLimit,
outsideRth=True,
tif="GTC",
)
profit = Order(
orderId=self.ib.client.getReqId(),
action=closeAction,
totalQuantity=qty,
parentId=parent.orderId,
transmit=True,
orderType="TRAIL LIMIT",
outsideRth=True,
tif="GTC",
trailStopPrice=lossPrice, # initial trigger level if price falls immediately
lmtPriceOffset=0.75, # price offset for the limit order when stop triggers
auxPrice=trailStop, # trailing amount before stop triggers
)
loss = Order(
action=closeAction,
totalQuantity=qty,
parentId=parent.orderId,
transmit=True,
orderType="STP PRT",
auxPrice=lossPrice,
)
return [parent, profit] # , loss]
def orderPriceForSpread(self, contracts: Sequence[Contract], positionSize: int):
"""Given a set of contracts, attempt to find the closing order."""
ot = self.ib.openTrades()
contractIds = set([c.conId for c in contracts])
# Use a list so we can collect multiple exit points for the same position.
ts = []
for t in ot:
if not isinstance(t.contract, Bag):
continue
legIds = set([c.conId for c in t.contract.comboLegs])
if legIds == contractIds:
qty, price = t.orderStatus.remaining, t.order.lmtPrice
ts.append((qty, price))
# if only one and it's the full position, return without formatting
if len(ts) == 1:
if abs(int(positionSize)) == abs(ts[0][0]):
return ts[0][1]
# else, break out by order size, sorted from smallest to largest exit prices
return sorted(ts, key=lambda x: abs(x[1]))
def orderPriceForContract(self, contract: Contract, positionSize: int):
"""Attempt to match an active closing order to an open position.
Works for both total quantity closing and partial scale-out closing."""
ot = self.ib.openTrades()
# Use a list so we can collect multiple exit points for the same position.
ts = []
for t in ot:
# t.order.action is "BUY" or "SELL"
opposite = "SELL" if positionSize > 0 else "BUY"
if (
t.order.action == opposite
and t.contract.localSymbol == contract.localSymbol
):
# Closing price is opposite sign of the holding quantity.
# (i.e. LONG positions | |
# Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse brief and detailed descriptions from Doxygen XML."""
import logging
import xml.etree.ElementTree as ET
from abc import ABC, abstractmethod
from typing import Iterator, List, Mapping, Optional, Tuple, Type, TypeVar, Union
logger = logging.getLogger(__name__)
def parse_description(xml_root: Optional[ET.Element], language_tag: str) -> "ParaContainer":
"""Parse a description from Doxygen XML.
Expects either `briefdescription` or `detaileddescription`. In case of `detaileddescription` it
can contain additional sections documenting parameters and return types.
Args:
xml_root: Element to start processing from.
language_tag: Tag indicating the programming language.
Returns:
A container with description paragraphs and sections.
"""
try:
contents = ParaContainer(language_tag)
if xml_root is not None:
for xml_element in xml_root:
_parse_description(xml_element, contents, language_tag)
contents.normalize()
return contents
except AssertionError:
if xml_root is not None:
ET.dump(xml_root)
raise
def select_descriptions(brief: "ParaContainer", detailed: "ParaContainer") -> Tuple[str, str]:
"""Select the approprate brief and detailed descriptions.
Sometimes one of the descriptions is missing. This method makes sure there is always at least
a brief description.
Args:
brief: Brief description as found in the XML.
detailed: Detailed description as found in the XML.
Returns:
brief: Brief description to use.
detailed: Detailed description to use.
"""
brief_adoc = brief.to_asciidoc()
if brief_adoc:
return brief_adoc, detailed.to_asciidoc()
if detailed.contents:
brief.contents.append(detailed.contents.pop(0))
return brief.to_asciidoc(), detailed.to_asciidoc()
###################################################################################################
# Core elements and interfaces
###################################################################################################
class AsciiDocContext:
"""Context for generating AsciiDoc.
Some elements are context-aware. They need to adapt to the elements they are nested in.
"""
def __init__(self):
self.table_separators = []
self.list_markers = []
class DescriptionElement(ABC):
"""A description in Doxygen XML is made up of several different XML elements. Each element
requires its own conversion into AsciiDoc format.
"""
language_tag: str
def __init__(self, language_tag: str):
self.language_tag = language_tag
@abstractmethod
def to_asciidoc(self, context: AsciiDocContext = None) -> str:
"""Convert the element, and all contained elements, to AsciiDoc format."""
def __repr__(self) -> str:
return f"{self.__class__.__name__}"
@classmethod
def from_xml(cls, xml_element: ET.Element, language_tag: str) -> "DescriptionElement":
"""Generate a description element from its XML counterpart.
Information and attributes from XML can be used, but the contained text and the tail text
are handled separately.
"""
return cls(language_tag)
def update_from_xml(self, xml_element: ET.Element):
"""Update the current element with information from another XML element.
By default this method does nothing. To be implemented only by subclasses that support or
require information from some if its children, without adding a new element for the child.
"""
def add_text(self, text: str) -> None:
"""Add the text inside the XML element.
By default the text is ignored. To be implemented only by subclasses that use the text.
"""
def add_tail(self, parent: "NestedDescriptionElement", text: str) -> None:
"""Add the text after the closing tag of the element.
By default the tail is ignored. To be used by subclasses that support tail text. The parent
can be used to create additional elements for the tail text after the current element.
"""
class NestedDescriptionElement(DescriptionElement):
"""A description element that contains additional description elements.
Attributes:
contents: Additional description elements inside this element.
"""
contents: List[DescriptionElement]
def __init__(self, language_tag: str, *contents: DescriptionElement):
super().__init__(language_tag)
self.contents = list(contents)
def append(self, content: DescriptionElement) -> None:
if content:
self.contents.append(content)
def to_asciidoc(self, context: AsciiDocContext = None) -> str:
return "".join(element.to_asciidoc(context) for element in self.contents)
def normalize(self) -> None:
for child in self.children_of_type(NestedDescriptionElement):
child.normalize()
ChildT = TypeVar("ChildT", bound="DescriptionElement")
def children_of_type(self, child_type: Type[ChildT]) -> Iterator[ChildT]:
return (child for child in self.contents if isinstance(child, child_type))
def first_child_of_type(self, child_type: Type[ChildT]) -> Optional[ChildT]:
return next(self.children_of_type(child_type), None)
class NamedSection:
"""Special paragraph indicating a section that can be retrieved by name.
Attributes:
name: Name of the section.
"""
name: str
def __init__(self, name: str = ""):
self.name = name
###################################################################################################
# Simple elements without nested content
###################################################################################################
class PlainText(DescriptionElement):
"""Plain text.
Formatting may be applied by parent elements.
Attributes:
text: The plain text.
"""
text: str
def __init__(self, language_tag: str, text: str = ""):
super().__init__(language_tag)
self.text = text
def to_asciidoc(self, context: AsciiDocContext = None) -> str:
return self.text.strip("\r\n")
def __repr__(self) -> str:
return f"{self.__class__.__name__}: {repr(self.text)}"
def add_text(self, text: str) -> None:
self.text += text
class Formula(DescriptionElement):
"""Formula in LatexMath format.
Attributes:
text: Contents of the formula.
"""
text: str
def __init__(self, language_tag: str, text: str = ""):
super().__init__(language_tag)
self.text = text
def to_asciidoc(self, context: AsciiDocContext = None) -> str:
stripped_text = self.text.strip("\r\n")
if stripped_text.startswith(r"\[") and stripped_text.endswith(r"\]"):
stripped_text = stripped_text[3:-3].strip()
return f"latexmath:[{stripped_text}]"
def __repr__(self) -> str:
return f"{self.__class__.__name__}: {repr(self.text)}"
def add_text(self, text: str) -> None:
self.text += text
def add_tail(self, parent: NestedDescriptionElement, text: str):
parent.append(PlainText(self.language_tag, text))
class Image(DescriptionElement):
"""Insert an image.
Attributes:
output_type: Output document type the image is meant for. For now we only support `html`.
file_name: Name if the image file. Must be available in the images of the package.
alt_text: Alternative text when the image cannot be loaded, or for accessibility.
width: Optional width to show the image with.
height: Optional height to show the image with.
inline: Yes if the image needs to be inlined in the text.
"""
output_type: str
file_name: str
alt_text: str
width: str
height: str
inline: str
def __init__(self, language_tag: str, output_type: str, file_name: str, width: str, height: str,
inline: str):
super().__init__(language_tag)
self.output_type = output_type
self.file_name = file_name
self.alt_text = ""
self.width = width
self.height = height
self.inline = inline
def to_asciidoc(self, context: AsciiDocContext = None) -> str:
if self.output_type != "html":
return ""
if self.width or self.height:
options = f'"{self.alt_text}",{self.width},{self.height}'
elif self.alt_text:
options = f'"{self.alt_text}"'
else:
options = ""
if self.inline == "yes":
separator = ":"
else:
separator = "::"
return f"image{separator}{self.file_name}[{options}]"
def __repr__(self) -> str:
return (f"{self.__class__.__name__}: {self.output_type}->{self.file_name}, "
f"{repr(self.alt_text)}, width={self.width}, height={self.height}, "
f"inline={self.inline}")
@classmethod
def from_xml(cls, xml_element: ET.Element, language_tag: str) -> "Image":
return cls(language_tag, xml_element.get("type", ""), xml_element.get("name", ""),
xml_element.get("width", ""), xml_element.get("height", ""),
xml_element.get("inline", "no"))
def add_text(self, text: str) -> None:
self.alt_text += text
def add_tail(self, parent: NestedDescriptionElement, text: str):
parent.append(PlainText(self.language_tag, text))
class SpecialCharacter(PlainText):
"""Special character represented by an XML tag.
Attributes:
tag: Original XML tag.
"""
tag: str
SPECIAL_CHARACTERS = {
"sp": " ",
"linebreak": " +\n",
"nonbreakablespace": " ",
"iexcl": "",
"cent": "",
"pound": "",
"curren": "",
"yen": "",
"brvbar": "",
"sect": "",
"umlaut": "",
"copy": "",
"ordf": "",
"laquo": "",
"not": "",
"shy": "",
"registered": "",
"macr": "",
"deg": "",
"plusmn": "",
"sup2": "",
"sup3": "",
"acute": "",
"micro": "",
"middot": "",
"cedil": "",
"sup1": "",
"ordm": "",
"raquo": "",
"frac14": "",
"frac12": "",
"frac34": "",
"iquest": "",
"Agrave": "",
"Aacute": "",
"Acirc": "",
"Atilde": "",
"Aumlaut": "",
"Aring": "",
"AElig": "",
"Ccedil": "",
"Egrave": "",
"Eacute": "",
"Ecirc": "",
"Eumlaut": "",
"Igrave": "",
"Iacute": "",
"Icirc": "",
"Iumlaut": "",
"ETH": "",
"Ntilde": "",
"Ograve": "",
"Oacute": "",
"Ocirc": "",
"Otilde": "",
"Oumlaut": "",
"times": "",
"Oslash": "",
"Ugrave": "",
"Uacute": "",
"Ucirc": "",
"Uumlaut": "",
"Yacute": "",
"THORN": "",
"szlig": "",
"agrave": "",
"aacute": "",
"acirc": "",
"atilde": "",
"aumlaut": "",
"aring": "",
"aelig": "",
"ccedil": "",
"egrave": "",
"eacute": "",
"ecirc": "",
"eumlaut": "",
"igrave": "",
"iacute": "",
"icirc": "",
"iumlaut": "",
"eth": "",
"ntilde": "",
"ograve": "",
"oacute": "",
"ocirc": "",
"otilde": "",
"oumlaut": "",
"divide": "",
"oslash": "",
"ugrave": "",
"uacute": "",
"ucirc": "",
"uumlaut": "",
"yacute": "",
"thorn": "",
"yumlaut": "",
"fnof": "",
"Alpha": "",
"Beta": "",
"Gamma": "",
"Delta": "",
"Epsilon": "",
"Zeta": "",
"Eta": "",
"Theta": "",
"Iota": "",
"Kappa": "",
"Lambda": "",
"Mu": "",
"Nu": "",
"Xi": "",
"Omicron": "",
"Pi": "",
"Rho": "",
"Sigma": "",
"Tau": "",
"Upsilon": "",
"Phi": "",
"Chi": "",
"Psi": "",
"Omega": "",
"alpha": "",
"beta": "",
"gamma": "",
"delta": "",
"epsilon": "",
"zeta": "",
"eta": "",
"theta": "",
"iota": "",
"kappa": "",
"lambda": "",
"mu": "",
"nu": | |
<reponame>trisadmeslek/V-Sekai-Blender-tools<gh_stars>0
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Proxy of a bpy.types.Struct collection, excluding bpy.types.ID collections that are implemented
in datablock_collection_proxy.py
See synchronization.md
"""
from __future__ import annotations
from collections import defaultdict
import logging
from typing import Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, TypeVar, Union
import bpy.types as T # noqa
from mixer.blender_data import specifics
from mixer.blender_data.attributes import apply_attribute, diff_attribute, read_attribute, write_attribute
from mixer.blender_data.json_codec import serialize
from mixer.blender_data.proxy import AddElementFailed, Delta, DeltaAddition, DeltaReplace, DeltaUpdate, Proxy
from mixer.blender_data.struct_proxy import StructProxy
if TYPE_CHECKING:
from mixer.blender_data.datablock_ref_proxy import DatablockRefProxy
from mixer.blender_data.misc_proxies import NonePtrProxy
from mixer.blender_data.proxy import Context
logger = logging.getLogger(__name__)
def _proxy_factory(attr) -> Union[DatablockRefProxy, NonePtrProxy, StructProxy]:
if isinstance(attr, T.ID) and not attr.is_embedded_data:
from mixer.blender_data.datablock_ref_proxy import DatablockRefProxy
return DatablockRefProxy()
elif attr is None:
from mixer.blender_data.misc_proxies import NonePtrProxy
return NonePtrProxy()
else:
return StructProxy.make(attr)
T_ = TypeVar("T_")
class Resolver:
"""Helper to defer item reference resolution after the referenced item creation.
An array element may reference an item with a larger index. As the arrays are created depth wise, the item
with the larger index does not exist when the item with the smaller item stored a reference. This situation
occurs when bone parenting is reversed.
TODO use this class for DatablockRefCollectionProxy as well
"""
def __init__(self):
self._items: Dict[T_, List[Callable[[], None]]] = defaultdict(list)
def __bool__(self):
return bool(self._items)
def append(self, key: T_, func: Callable[[], None]):
"""Add func() to be called by resolve() for item at key"""
self._items[key].append(func)
def resolve(self, key: T_):
"""Resolve the references to item identified by key by calling the closures registered for it."""
try:
funcs = self._items.pop(key)
except IndexError:
return
for f in funcs:
f()
@serialize
class StructCollectionProxy(Proxy):
"""
Proxy to a bpy_prop_collection of non-datablock Struct.
It can track an array (int keys) or a dictionnary(string keys). Both implementation are
in the same class as it is not possible to know at creation time the type of an empty collection
"""
_serialize = ("_sequence", "_diff_additions", "_diff_deletions", "_diff_updates")
def __init__(self):
self._diff_updates: List[Tuple[int, Delta]] = []
self._diff_deletions: int = 0
self._diff_additions: List[DeltaAddition] = []
self._sequence: List[Proxy] = []
self._resolver: Optional[Resolver] = None
@classmethod
def make(cls, attr_property: T.Property):
if attr_property.srna == T.NodeLinks.bl_rna:
from mixer.blender_data.node_proxy import NodeLinksProxy
return NodeLinksProxy()
return StructCollectionProxy()
def __len__(self):
return len(self._sequence)
def __iter__(self):
return iter(self._sequence)
def __getitem__(self, i: int):
return self._sequence[i]
@property
def length(self) -> int:
return len(self._sequence)
def register_unresolved(self, i: int, func: Callable[[], None]):
if self._resolver is None:
self._resolver = Resolver()
self._resolver.append(i, func)
def data(self, key: int, resolve_delta=True) -> Optional[Union[Delta, Proxy]]:
"""Return the data at key, which may be a struct member, a dict value or an array value,
Args:
key: Integer or string to be used as index or key to the data
resolve_delta: If True, and the data is a Delta, will return the delta value
"""
# shaky and maybe not useful
length = self.length
if key < length:
delta_update = next((delta for i, delta in self._diff_updates if i == key), None)
if delta_update is None:
return self._sequence[key]
if resolve_delta:
return delta_update.value
return delta_update
else:
try:
delta_addition = self._diff_additions[key - length]
except IndexError:
return None
if resolve_delta:
return delta_addition.value
return delta_addition
def load(
self,
bl_collection: T.bpy_prop_collection,
context: Context,
):
self._sequence.clear()
for i, v in enumerate(bl_collection.values()):
context.visit_state.push(v, i)
try:
self._sequence.append(_proxy_factory(v).load(v, context))
except Exception as e:
logger.error(f"Exception during load at {context.visit_state.display_path()} ...")
logger.error(f"... {e!r}")
finally:
context.visit_state.pop()
return self
def save(self, collection: T.bpy_prop_collection, parent: T.bpy_struct, key: str, context: Context):
"""
Save this proxy into collection
Args:
collection: the collection into which this proxy is saved
parent: the attribute that contains collection (e.g. a Scene instance)
key: the name of the collection in parent (e.g "background_images")
context: the proxy and visit state
"""
sequence = self._sequence
# Using clear_from ensures that sequence data is compatible with remaining elements after
# truncate_collection. This addresses an issue with Nodes, for which the order of default nodes (material
# output and principled in collection) may not match the order of incoming nodes. Saving node data into a
# node of the wrong type can lead to a crash.
clear_from = specifics.clear_from(collection, sequence, context)
specifics.truncate_collection(collection, clear_from)
# For collections like `IDMaterials`, the creation API (`.new(datablock_ref)`) also writes the value.
# For collections like `Nodes`, the creation API (`.new(name)`) does not write the item value.
# So the value must always be written for all collection types.
collection_length = len(collection)
for i, item_proxy in enumerate(sequence[:collection_length]):
write_attribute(collection, i, item_proxy, context)
for i, item_proxy in enumerate(sequence[collection_length:], collection_length):
try:
specifics.add_element(collection, item_proxy, i, context)
if self._resolver:
self._resolver.resolve(i)
except AddElementFailed:
break
# Must write at once, otherwise the default item name might conflit with a later item name
write_attribute(collection, i, item_proxy, context)
def apply(
self,
collection: T.bpy_prop_collection,
parent: T.bpy_struct,
key: Union[int, str],
delta: Delta,
context: Context,
to_blender=True,
) -> StructCollectionProxy:
"""
Apply delta to this proxy and optionally to the Blender attribute its manages.
Args:
attribute: the collection to update (e.g. a_mesh.material)
parent: the attribute that contains attribute (e.g. a a Mesh instance)
key: the key that identifies attribute in parent (e.g "materials")
delta: the delta to apply
context: proxy and visit state
to_blender: update the managed Blender attribute in addition to this Proxy
"""
assert isinstance(key, str)
update = delta.value
assert type(update) == type(self)
if isinstance(delta, DeltaReplace):
# The collection must be replaced as a whole
self._sequence = update._sequence
if to_blender:
specifics.truncate_collection(collection, 0)
self.save(collection, parent, key, context)
else:
# a sparse update
try:
sequence = self._sequence
# Delete before update and process updates in reverse order to avoid spurious renames.
# Starting with sequence A, B, C, D and delete B causes :
# - an update for items 1 and 2 to be renamed into C and D
# - one delete
# If the update is processed first, Blender renames item 3 into D.001
# If the deletes are processed first but the updates are processed in order, Blender renames item 1
# into C.001
delete_count = update._diff_deletions
if delete_count > 0:
if to_blender:
specifics.truncate_collection(collection, len(collection) - delete_count)
del sequence[-delete_count:]
for i, delta_update in reversed(update._diff_updates):
sequence[i] = apply_attribute(collection, i, sequence[i], delta_update, context, to_blender)
for i, delta_addition in enumerate(update._diff_additions, len(sequence)):
if to_blender:
item_proxy = delta_addition.value
try:
specifics.add_element(collection, item_proxy, i, context)
if self._resolver:
self._resolver.resolve(i)
except AddElementFailed:
break
write_attribute(collection, i, item_proxy, context)
sequence.append(delta_addition.value)
except Exception as e:
logger.warning("apply: Exception while processing attribute ...")
logger.warning(f"... {context.visit_state.display_path()}.{key}")
logger.warning(f"... {e!r}")
return self
def diff(
self, collection: T.bpy_prop_collection, key: Union[int, str], collection_property: T.Property, context: Context
) -> Optional[Union[DeltaUpdate, DeltaReplace]]:
"""
Computes the difference between the state of an item tracked by this proxy and its Blender state.
This proxy tracks a collection of items indexed by string (e.g Scene.render.views) or int.
The result will be a ProxyDiff that contains a Delta item per added, deleted or updated item
Args:
collection; the collection that must be diffed agains this proxy
key: the name of the collection, to record in the visit path
collection_property; the property os collection as found in its enclosing object
"""
sequence = self._sequence
if len(sequence) == 0 and len(collection) == 0:
return None
if specifics.diff_must_replace(collection, sequence, collection_property):
# A collection cannot be updated because either:
# - some of its members cannot be updated :
# SplineBezierPoints has no API to remove points, so Curve.splines cannot be update and must be replaced
# - updating the name of members will cause unsolicited renames.
# When swapping layers A and B in a GreasePencilLayers, renaming layer 0 into B cause an | |
<gh_stars>1-10
import math
import random
from typing import Optional
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqModelOutput,
)
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.modeling_bart import (
BartPretrainedModel,
BartLearnedPositionalEmbedding,
BartEncoderLayer,
BartDecoderLayer,
_expand_mask,
_make_causal_mask,
shift_tokens_right,
logger
)
class BartEncoder(BartPretrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`BartEncoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.init_weights()
def forward(
self,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if inputs_embeds is None:
raise ValueError("inputs_embeds is None")
else:
input_shape = inputs_embeds.size()[:-1]
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BartDecoder(BartPretrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length,
hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`,
`optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2
tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1,
embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if inputs_embeds is None:
raise ValueError("inputs_embeds is None")
else:
input_shape = inputs_embeds.size()[:-1]
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
"""
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
"""
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and | |
variable value matches the expected type (e.g. string, integer, list, Boolean, etc.)
if type(variable["Value"]) != variable["Expected Type"]:
print(f'The value provided for the {variable["Name"]} variable is not in '
'the correct format.\n'
f'Please provide a value in the {variable["Expected Type"]} format '
'and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable value contains any required characters
if variable["Required Character"]:
if variable["Required Character"] not in variable["Value"]:
print(f'The value provided for the {variable["Name"]} variable is '
f'missing the required character "{variable["Required Character"]}" '
'in the value.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable values for single IP address strings are valid IP addresses
if variable["Supplemental Type"] == "ip_string":
try:
ipaddress.ip_address(variable["Value"])
except Exception:
print(f'The value provided for the {variable["Name"]} variable is a not '
'a correctly formatted IPv4 address.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable values for lists of IP address strings contain valid IP addresses
if variable["Supplemental Type"] == "ip_list":
try:
for variable_ip in variable["Value"]:
ipaddress.ip_address(variable_ip)
except Exception:
print(f'The value provided for the {variable["Name"]} variable contains '
'an entry that is not a correctly formatted IPv4 address.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable value matches any restricted values
if variable["Restricted Value"]:
if variable["Value"] not in variable["Restricted Value"]:
print(f'The value provided for the {variable["Name"]} variable is not an '
'accepted value.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable value does not exceed any set maximum length
if variable["Max Length"]:
if len(variable["Value"]) > variable["Max Length"]:
print(f'The value provided for the {variable["Name"]} variable exceeds the '
'maximum accepted length.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided variable value for lists are not empty
if variable["Expected Type"] == list:
if not variable["Value"]:
print(f'The value provided for the {variable["Name"]} variable is an '
'empty list.\n'
'Please update the value and restart the HX Auto Deploy Tool.\n'
f'The {variable["Name"]} variable can be found under '
f'{variable["Configuration Section"]}.\n'
f'An example value would be {variable["Example Value"]}\n')
sys.exit(0)
# Verify the provided HyperFlex node attribute list can support the provided HyperFlex cluster size
if len(hx_node_attribute_list) < hx_cluster_size:
print("The provided HyperFlex node attribute list has a total of "
f"{len(hx_node_attribute_list)} entries. This is less than the provided "
f"HyperFlex cluster size of {hx_cluster_size}. Please increase the "
"provided HyperFlex node attributes then restart "
"the HX Auto Deploy Tool.\n")
sys.exit(0)
# Establish function to create IP address range lists
def create_ip_list(starting_ip_address,ending_ip_address):
"""This is a function to create a list range of IP addresses based on the
provided starting IP address and ending IP address.
"""
format_starting_ip_address = ipaddress.ip_address(starting_ip_address)
format_ending_ip_address = ipaddress.ip_address(ending_ip_address)
ip_range_staging_list = list(range(int(format_starting_ip_address), int(format_ending_ip_address + 1)))
ip_range_production_list = []
for ip in ip_range_staging_list:
converted_ip = ipaddress.ip_address(ip)
ip_range_production_list.append(converted_ip.compressed)
return ip_range_production_list
# Verify the VMware ESXi hypervisor management starting and ending IP addresses do not conflict
if esxi_mgmt_ip_range_start_address == esxi_mgmt_ip_range_end_address:
print("For the VMware ESXi hypervisor management IP range, the provided "
f"ending IP address of {esxi_mgmt_ip_range_end_address} is the same as "
"the provided starting IP address of "
f"{esxi_mgmt_ip_range_start_address}. Please provide a different ending "
"IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the VMware ESXi hypervisor management starting IP address and subnet mask can be configured on an IP interface
try:
esxi_mgmt_ip_range_start_address_ip_interface = ipaddress.ip_interface(f"{esxi_mgmt_ip_range_start_address}/{esxi_mgmt_ip_range_subnet_mask}")
except Exception as exception_message:
print("There was an issue with testing the IP interface configuration for "
"the provided VMware ESXi hypervisor management starting IP address of "
f"{esxi_mgmt_ip_range_start_address} and the associated subnet mask of "
f"{esxi_mgmt_ip_range_subnet_mask}.\n"
"Please review the error message below, repair the provided IP address "
"settings, then re-run the HX Auto Deploy Tool.\n")
print(exception_message)
sys.exit(0)
# Determine VMware ESXi hypervisor management IP network from provided starting IP address and subnet mask
esxi_mgmt_ip_range_start_address_network = esxi_mgmt_ip_range_start_address_ip_interface.network
# Verify the VMware ESXi hypervisor management ending IP address is in the same network as the starting IP address
if ipaddress.ip_address(esxi_mgmt_ip_range_end_address) not in esxi_mgmt_ip_range_start_address_network:
print("For the VMware ESXi hypervisor management IP range, the provided "
f"ending IP address of {esxi_mgmt_ip_range_end_address} is not in the "
"same subnet as the provided starting IP address of "
f"{esxi_mgmt_ip_range_start_address}. Please provide a different ending "
"IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the VMware ESXi hypervisor management gateway IP address is in the same network as the starting IP address
if ipaddress.ip_address(esxi_mgmt_ip_range_gateway) not in esxi_mgmt_ip_range_start_address_network:
print("For the VMware ESXi hypervisor management IP range, the provided "
f"gateway IP address of {esxi_mgmt_ip_range_gateway} is not in the "
"same subnet as the provided starting IP address of "
f"{esxi_mgmt_ip_range_start_address}. Please provide a different "
"gateway IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Create the VMware ESXi hypervisor management IP address range list
esxi_mgmt_ip_range_list = create_ip_list(esxi_mgmt_ip_range_start_address, esxi_mgmt_ip_range_end_address)
# Verify the created VMware ESXi hypervisor management IP address range list can support the provided HyperFlex cluster size
if len(esxi_mgmt_ip_range_list) < hx_cluster_size:
print("The VMware ESXi hypervisor management IP address range has a total "
f"of {len(esxi_mgmt_ip_range_list)} usable addresses. This is less than "
f"the provided HyperFlex cluster size of {hx_cluster_size}. Please "
"increase the size of the VMware ESXi hypervisor management IP address "
"range then restart the HX Auto Deploy Tool. The provided value for "
"the starting or ending IP and subnet mask may need to be adjusted.\n"
f"Current starting IP address: {esxi_mgmt_ip_range_start_address}\n"
f"Current ending IP address: {esxi_mgmt_ip_range_end_address}\n"
f"Current subnet mask: {esxi_mgmt_ip_range_subnet_mask}\n")
sys.exit(0)
# Verify the created VMware ESXi hypervisor management IP address range list does not conflict with any entries in the HX node attribute list
for esxi_mgmt_ip in esxi_mgmt_ip_range_list:
if esxi_mgmt_ip in hx_node_attribute_list:
print(f"The ESXi hypervisor management IP address of {esxi_mgmt_ip} "
"created from the IP address range "
f"{esxi_mgmt_ip_range_start_address} - {esxi_mgmt_ip_range_end_address}, "
"conflicts with an IP address entry in the HX node attribute list "
"variable named hx_node_attribute_list.\n"
"The hx_node_attribute_list variable contains the following entries: \n"
f"{hx_node_attribute_list}\n"
"Please resolve the IP address conflict and restart "
"the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP storage controller VM management starting IP address does not conflict with any entries in the VMware ESXi hypervisor management IP address range list
if storage_controller_vm_ip_range_start_address in esxi_mgmt_ip_range_list:
print("For the HXDP storage controller VM management IP range, the provided "
f"starting IP address of {storage_controller_vm_ip_range_start_address} "
"is in the same range of IP addresses allocated for the VMware ESXi "
"hypervisor management interfaces. Please provide a different starting "
"IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP storage controller VM management ending IP address does not conflict with any entries in the VMware ESXi hypervisor management IP address range list
if storage_controller_vm_ip_range_end_address in esxi_mgmt_ip_range_list:
print("For the HXDP storage controller VM management IP range, the provided "
f"ending IP address of {storage_controller_vm_ip_range_end_address} is "
"in the same range of IP addresses allocated for the VMware ESXi "
"hypervisor management interfaces. Please provide a different ending "
"IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP storage controller VM management starting and ending IP addresses do not conflict
if storage_controller_vm_ip_range_start_address == storage_controller_vm_ip_range_end_address:
print("For the HXDP storage controller VM management | |
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from fastapi.testclient import TestClient
from kedro.io import DataCatalog
from kedro.pipeline import Pipeline
from kedro_viz.api import apps
from kedro_viz.api.graphql import schema
from kedro_viz.data_access.managers import DataAccessManager
from kedro_viz.models.graph import TaskNode
from kedro_viz.server import populate_data
@pytest.fixture
def example_api(
data_access_manager: DataAccessManager,
example_pipelines: Dict[str, Pipeline],
example_catalog: DataCatalog,
example_session_store_location: Optional[Path],
):
api = apps.create_api_app_from_project(mock.MagicMock())
populate_data(
data_access_manager,
example_catalog,
example_pipelines,
example_session_store_location,
)
with mock.patch(
"kedro_viz.api.responses.data_access_manager", new=data_access_manager
), mock.patch("kedro_viz.api.router.data_access_manager", new=data_access_manager):
yield api
@pytest.fixture
def example_api_no_session_store(
data_access_manager: DataAccessManager,
example_pipelines: Dict[str, Pipeline],
example_catalog: DataCatalog,
):
api = apps.create_api_app_from_project(mock.MagicMock())
populate_data(data_access_manager, example_catalog, example_pipelines, None)
with mock.patch(
"kedro_viz.api.responses.data_access_manager", new=data_access_manager
), mock.patch("kedro_viz.api.router.data_access_manager", new=data_access_manager):
yield api
@pytest.fixture
def example_transcoded_api(
data_access_manager: DataAccessManager,
example_transcoded_pipelines: Dict[str, Pipeline],
example_transcoded_catalog: DataCatalog,
example_session_store_location: Optional[Path],
):
api = apps.create_api_app_from_project(mock.MagicMock())
populate_data(
data_access_manager,
example_transcoded_catalog,
example_transcoded_pipelines,
example_session_store_location,
)
with mock.patch(
"kedro_viz.api.responses.data_access_manager", new=data_access_manager
), mock.patch("kedro_viz.api.router.data_access_manager", new=data_access_manager):
yield api
@pytest.fixture
def client(example_api):
yield TestClient(example_api)
def test_graphql_run_query():
query = """
query TestQuery($runId: ID!) {
run(runId: $runId) {
id
bookmark
timestamp
title
metadata {
author
gitBranch
}
details {
name
details
}
}
}
"""
result = schema.execute_sync(
query,
variable_values={"runId": "123"},
)
assert result.errors is None
assert result.data["run"] == {
"id": "123",
"bookmark": True,
"timestamp": "2021-09-08T10:55:36.810Z",
"title": "Sprint 5",
"metadata": {"author": "author", "gitBranch": "my-branch"},
"details": {"details": "{json:details}", "name": "name"},
}
def test_graphql_runs_query():
query = """
query TestQuery{
runs {
id
bookmark
timestamp
title
}
}
"""
result = schema.execute_sync(
query,
)
assert result.errors is None
assert result.data["runs"] == [
{
"id": "123",
"bookmark": True,
"timestamp": "2021-09-08T10:55:36.810Z",
"title": "Sprint 5",
}
]
def assert_nodes_equal(response_nodes, expected_nodes):
node_sort_keys = operator.itemgetter("id")
for response_node, expected_node in zip(
sorted(response_nodes, key=node_sort_keys),
sorted(expected_nodes, key=node_sort_keys),
):
# since tags and pipelines are Sets, which are unordered,
# to assert them, we have to sort first
response_node_tags = response_node.pop("tags")
expected_node_tags = expected_node.pop("tags")
assert sorted(response_node_tags) == sorted(expected_node_tags)
response_node_pipelines = response_node.pop("pipelines")
expected_node_pipelines = expected_node.pop("pipelines")
assert sorted(response_node_pipelines) == sorted(expected_node_pipelines)
assert response_node == expected_node
def _is_dict_list(collection: Any) -> bool:
if isinstance(collection, list):
return isinstance(collection[0], dict) if len(collection) > 0 else True
return False
def assert_dict_list_equal(
response: List[Dict], expected: List[Dict], sort_keys: Iterable[str]
):
"""Assert two list of dictionaries with undeterministic order
to be equal by sorting them first based on a sort key.
"""
if len(response) == 0:
assert len(expected) == 0
return
assert sorted(response, key=operator.itemgetter(*sort_keys)) == sorted(
expected, key=operator.itemgetter(*sort_keys)
)
def assert_modular_pipelines_tree_equal(response: Dict, expected: Dict):
"""Assert if modular pipelines tree are equal."""
# first assert that they have the same set of keys
assert sorted(response.keys()) == sorted(expected.keys())
# then compare the dictionary at each key recursively
for key in response:
if isinstance(response[key], dict):
assert_modular_pipelines_tree_equal(response[key], expected[key])
elif _is_dict_list(response[key]):
assert_dict_list_equal(response[key], expected[key], sort_keys=("id",))
elif isinstance(response[key], list):
assert sorted(response[key]) == sorted(expected[key])
else:
assert response[key] == expected[key]
def assert_example_data(response_data):
"""Assert graph response for the `example_pipelines` and `example_catalog` fixtures."""
expected_edges = [
{"source": "7b140b3f", "target": "d5a8b994"},
{"source": "56118ad8", "target": "0ecea0de"},
{"source": "13399a82", "target": "56118ad8"},
{"source": "f1f1425b", "target": "7b140b3f"},
{"source": "0ecea0de", "target": "7b140b3f"},
{"source": "c506f374", "target": "56118ad8"},
{"source": "13399a82", "target": "uk.data_processing"},
{"source": "uk.data_processing", "target": "0ecea0de"},
{"source": "c506f374", "target": "uk.data_processing"},
{"source": "f1f1425b", "target": "uk"},
{"source": "13399a82", "target": "uk"},
{"source": "f1f1425b", "target": "uk.data_science"},
{"source": "c506f374", "target": "uk"},
{"source": "uk.data_science", "target": "d5a8b994"},
{"source": "0ecea0de", "target": "uk.data_science"},
{"source": "uk", "target": "d5a8b994"},
]
assert_dict_list_equal(
response_data.pop("edges"), expected_edges, sort_keys=("source", "target")
)
# compare nodes
expected_nodes = [
{
"id": "56118ad8",
"name": "Process Data",
"full_name": "process_data",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": ["uk", "uk.data_processing"],
"type": "task",
"parameters": {"train_test_split": 0.1},
},
{
"id": "13399a82",
"name": "Raw Data",
"full_name": "uk.data_processing.raw_data",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": ["uk", "uk.data_processing"],
"type": "data",
"layer": "raw",
"dataset_type": "kedro.extras.datasets.pandas.csv_dataset.CSVDataSet",
},
{
"id": "c506f374",
"name": "Params: Train Test Split",
"full_name": "params:train_test_split",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "parameters",
"layer": None,
"dataset_type": None,
},
{
"id": "0ecea0de",
"name": "Model Inputs",
"full_name": "model_inputs",
"tags": ["train", "split"],
"pipelines": ["__default__", "data_science", "data_processing"],
"modular_pipelines": [],
"type": "data",
"layer": "model_inputs",
"dataset_type": "kedro.extras.datasets.pandas.csv_dataset.CSVDataSet",
},
{
"id": "7b140b3f",
"name": "Train Model",
"full_name": "train_model",
"tags": ["train"],
"pipelines": ["__default__", "data_science"],
"modular_pipelines": ["uk", "uk.data_science"],
"type": "task",
"parameters": {"train_test_split": 0.1, "num_epochs": 1000},
},
{
"id": "f1f1425b",
"name": "Parameters",
"full_name": "parameters",
"tags": ["train"],
"pipelines": ["__default__", "data_science"],
"modular_pipelines": [],
"type": "parameters",
"layer": None,
"dataset_type": None,
},
{
"id": "d5a8b994",
"name": "Model",
"full_name": "uk.data_science.model",
"tags": ["train"],
"pipelines": ["__default__", "data_science"],
"modular_pipelines": ["uk", "uk.data_science"],
"type": "data",
"layer": None,
"dataset_type": "kedro.io.memory_data_set.MemoryDataSet",
},
{
"id": "uk.data_processing",
"name": "Data Processing",
"full_name": "uk.data_processing",
"tags": [],
"pipelines": ["__default__", "data_processing"],
"type": "modularPipeline",
"modular_pipelines": None,
"layer": None,
"dataset_type": None,
},
{
"id": "uk.data_science",
"name": "Data Science",
"full_name": "uk.data_science",
"tags": [],
"pipelines": ["__default__", "data_science"],
"type": "modularPipeline",
"modular_pipelines": None,
"layer": None,
"dataset_type": None,
},
{
"id": "uk",
"name": "Uk",
"full_name": "uk",
"tags": [],
"pipelines": ["__default__", "data_processing", "data_science"],
"type": "modularPipeline",
"modular_pipelines": None,
"layer": None,
"dataset_type": None,
},
]
assert_nodes_equal(response_data.pop("nodes"), expected_nodes)
# compare modular pipelines
expected_modular_pipelines = {
"__root__": {
"children": [
{"id": "0ecea0de", "type": "data"},
{"id": "f1f1425b", "type": "parameters"},
{"id": "c506f374", "type": "parameters"},
{"id": "uk", "type": "modularPipeline"},
],
"id": "__root__",
"inputs": [],
"name": "Root",
"outputs": [],
},
"uk": {
"children": [
{"id": "uk.data_science", "type": "modularPipeline"},
{"id": "uk.data_processing", "type": "modularPipeline"},
],
"id": "uk",
"inputs": ["c506f374", "13399a82", "f1f1425b"],
"name": "Uk",
"outputs": ["d5a8b994"],
},
"uk.data_processing": {
"children": [
{"id": "13399a82", "type": "data"},
{"id": "56118ad8", "type": "task"},
],
"id": "uk.data_processing",
"inputs": ["c506f374", "13399a82"],
"name": "Data Processing",
"outputs": ["0ecea0de"],
},
"uk.data_science": {
"children": [
{"id": "7b140b3f", "type": "task"},
{"id": "d5a8b994", "type": "data"},
],
"id": "uk.data_science",
"inputs": ["0ecea0de", "f1f1425b"],
"name": "Data Science",
"outputs": ["d5a8b994"],
},
}
assert_modular_pipelines_tree_equal(
response_data.pop("modular_pipelines"), expected_modular_pipelines
)
# compare the rest
assert response_data == {
"tags": [{"id": "split", "name": "Split"}, {"id": "train", "name": "Train"}],
"layers": ["raw", "model_inputs"],
"pipelines": [
{"id": "__default__", "name": "Default"},
{"id": "data_science", "name": "Data Science"},
{"id": "data_processing", "name": "Data Processing"},
],
"selected_pipeline": "__default__",
}
def assert_example_transcoded_data(response_data):
"""Assert graph response for the `example_transcoded_pipelines`
and `example_transcoded_catalog` fixtures."""
expected_edges = [
{"source": "f1f1425b", "target": "2302ea78"},
{"source": "dbad7c24", "target": "0ecea0de"},
{"source": "c506f374", "target": "dbad7c24"},
{"source": "7c58d8e6", "target": "dbad7c24"},
{"source": "2302ea78", "target": "1d06a0d7"},
{"source": "0ecea0de", "target": "2302ea78"},
]
assert_dict_list_equal(
response_data.pop("edges"), expected_edges, sort_keys=("source", "target")
)
# compare nodes
expected_nodes = [
{
"id": "dbad7c24",
"name": "Process Data",
"full_name": "process_data",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "task",
"parameters": {"train_test_split": 0.1},
},
{
"id": "7c58d8e6",
"name": "Raw Data",
"full_name": "raw_data",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "data",
"layer": None,
"dataset_type": None,
},
{
"id": "c506f374",
"name": "Params: Train Test Split",
"full_name": "params:train_test_split",
"tags": ["split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "parameters",
"layer": None,
"dataset_type": None,
},
{
"id": "0ecea0de",
"name": "Model Inputs",
"full_name": "model_inputs",
"tags": ["train", "split"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "data",
"layer": None,
"dataset_type": None,
},
{
"id": "2302ea78",
"name": "Train Model",
"full_name": "train_model",
"tags": ["train"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "task",
"parameters": {"train_test_split": 0.1, "num_epochs": 1000},
},
{
"id": "f1f1425b",
"name": "Parameters",
"full_name": "parameters",
"tags": ["train"],
"pipelines": ["__default__", "data_processing"],
"modular_pipelines": [],
"type": "parameters",
"layer": None,
"dataset_type": None,
},
{
"id": "1d06a0d7",
"name": "Model",
| |
self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StormTopology:
"""
Attributes:
- spouts
- bolts
- state_spouts
- worker_hooks
- storm_version
- jdk_version
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'spouts', (TType.STRING,None,TType.STRUCT,(SpoutSpec, SpoutSpec.thrift_spec)), None, ), # 1
(2, TType.MAP, 'bolts', (TType.STRING,None,TType.STRUCT,(Bolt, Bolt.thrift_spec)), None, ), # 2
(3, TType.MAP, 'state_spouts', (TType.STRING,None,TType.STRUCT,(StateSpoutSpec, StateSpoutSpec.thrift_spec)), None, ), # 3
(4, TType.LIST, 'worker_hooks', (TType.STRING,None), None, ), # 4
None, # 5
None, # 6
(7, TType.STRING, 'storm_version', None, None, ), # 7
(8, TType.STRING, 'jdk_version', None, None, ), # 8
)
def __init__(self, spouts=None, bolts=None, state_spouts=None, worker_hooks=None, storm_version=None, jdk_version=None,):
self.spouts = spouts
self.bolts = bolts
self.state_spouts = state_spouts
self.worker_hooks = worker_hooks
self.storm_version = storm_version
self.jdk_version = jdk_version
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.spouts = {}
(_ktype40, _vtype41, _size39 ) = iprot.readMapBegin()
for _i43 in xrange(_size39):
_key44 = iprot.readString().decode('utf-8')
_val45 = SpoutSpec()
_val45.read(iprot)
self.spouts[_key44] = _val45
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.bolts = {}
(_ktype47, _vtype48, _size46 ) = iprot.readMapBegin()
for _i50 in xrange(_size46):
_key51 = iprot.readString().decode('utf-8')
_val52 = Bolt()
_val52.read(iprot)
self.bolts[_key51] = _val52
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.state_spouts = {}
(_ktype54, _vtype55, _size53 ) = iprot.readMapBegin()
for _i57 in xrange(_size53):
_key58 = iprot.readString().decode('utf-8')
_val59 = StateSpoutSpec()
_val59.read(iprot)
self.state_spouts[_key58] = _val59
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.worker_hooks = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in xrange(_size60):
_elem65 = iprot.readString()
self.worker_hooks.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.storm_version = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.jdk_version = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StormTopology')
if self.spouts is not None:
oprot.writeFieldBegin('spouts', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.spouts))
for kiter66,viter67 in self.spouts.items():
oprot.writeString(kiter66.encode('utf-8'))
viter67.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.bolts is not None:
oprot.writeFieldBegin('bolts', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.bolts))
for kiter68,viter69 in self.bolts.items():
oprot.writeString(kiter68.encode('utf-8'))
viter69.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.state_spouts is not None:
oprot.writeFieldBegin('state_spouts', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.state_spouts))
for kiter70,viter71 in self.state_spouts.items():
oprot.writeString(kiter70.encode('utf-8'))
viter71.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.worker_hooks is not None:
oprot.writeFieldBegin('worker_hooks', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.worker_hooks))
for iter72 in self.worker_hooks:
oprot.writeString(iter72)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.storm_version is not None:
oprot.writeFieldBegin('storm_version', TType.STRING, 7)
oprot.writeString(self.storm_version.encode('utf-8'))
oprot.writeFieldEnd()
if self.jdk_version is not None:
oprot.writeFieldBegin('jdk_version', TType.STRING, 8)
oprot.writeString(self.jdk_version.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.spouts is None:
raise TProtocol.TProtocolException(message='Required field spouts is unset!')
if self.bolts is None:
raise TProtocol.TProtocolException(message='Required field bolts is unset!')
if self.state_spouts is None:
raise TProtocol.TProtocolException(message='Required field state_spouts is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.spouts)
value = (value * 31) ^ hash(self.bolts)
value = (value * 31) ^ hash(self.state_spouts)
value = (value * 31) ^ hash(self.worker_hooks)
value = (value * 31) ^ hash(self.storm_version)
value = (value * 31) ^ hash(self.jdk_version)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyAliveException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyAliveException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotAliveException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotAliveException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthorizationException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthorizationException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class InvalidTopologyException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidTopologyException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
| |
<reponame>schardong/visual-scenario-reduction
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Application main window plus tests.
"""
import os
import sys
from PyQt5.QtCore import QDir, Qt, QSize
from PyQt5.QtGui import QIntValidator, QPixmap
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox,
QDesktopWidget, QFileDialog, QFormLayout,
QGridLayout, QGroupBox, QHBoxLayout, QLabel,
QLineEdit, QMainWindow, QMenu, QMessageBox,
QPushButton, QSpinBox, QStyleFactory, QVBoxLayout,
QWidget, QProxyStyle, QStyle)
from fieldensemble import FieldEnsemble
from plotwidget import PlotWidget
class SpinBoxCustomStyle(QProxyStyle):
"""
Workaround to avoid double event triggering by the QSpinBox UI items.
"""
def styleHint(self, hint, option=None, widget=None, returnData=None):
if hint == QStyle.SH_SpinBox_KeyPressAutoRepeatRate:
return 10**10
elif hint == QStyle.SH_SpinBox_ClickAutoRepeatRate:
return 10**10
elif hint == QStyle.SH_SpinBox_ClickAutoRepeatThreshold:
# You can use only this condition to avoid the auto-repeat,
# but better safe than sorry ;-)
return 10**10
else:
return super().styleHint(hint, option, widget, returnData)
class MainWindow(QMainWindow):
"""
The main window of our application.
"""
WELL_TYPES = ['P', 'I']
FANCHART_COLOR_OPTIONS_MAPPING = {
'Grayscale': 'gray_r',
'Shades of Blue': 'Blues',
'Shades of Red': 'Reds',
'Heat': 'hot_r'
}
FANCHART_OPTIONS_ORDERING = [
'Grayscale',
'Shades of Blue',
'Shades of Red',
'Heat',
]
DATA_COLOR_OPTIONS_MAPPING = {
'Winter': 'winter',
'Summer': 'summer',
'Topological': 'gist_earth',
'Ocean': 'ocean',
'Gist Stern': 'gist_stern',
'Terrain': 'terrain',
'Blue to Magenta': 'cool',
}
DATA_OPTIONS_ORDERING = [
'Topological',
'Ocean',
'Terrain',
'Blue to Magenta',
'Gist Stern',
'Winter',
'Summer',
]
def __init__(self, width, height):
super(MainWindow, self).__init__()
self.setAttribute(Qt.WA_DeleteOnClose)
self._plt_widget = None
self._left, self._top = 0, 0
self._width, self._height = width, height
self._title = 'Time-series Scenario Selection'
# Data setup
self._ensemble = None
self._properties = []
self._base_data_path = ''
self._full_data_path = ''
self._curr_property = None
self._curr_baseline = 'P50'
self._well_type = self.WELL_TYPES[0]
# UI elements
self._file_menu = None
self._help_menu = None
self._combo_data_colormap = None
self._combo_property = None
self._combo_baseline = None
self._combo_fan_color_pallete = None
self._chk_show_p10 = None
self._chk_show_p50 = None
self._chk_show_p90 = None
self._chk_show_p10_lamp = None
self._chk_show_p50_lamp = None
self._chk_show_p90_lamp = None
self._spin_start_ts = None
self._spin_end_ts = None
self._main_widget = None
self._panel_widget = None
self._build_ui()
def __del__(self):
del self._plt_widget
del self._ensemble
@property
def properties(self):
"""
Returns the time series properties available.
Returns
-------
out: list
A list with the properties.
"""
return self._properties
@property
def current_property(self):
"""
Returns the current property of the curves being shown.
Returns
-------
out: str
The current property.
"""
return self._curr_property
@property
def current_baseline(self):
"""
Returns the ID of the current baseline curve.
Returns
-------
out: str
The ID of the current baseline (p10, p50 or p90).
"""
return self._curr_baseline
def resizeEvent(self, event):
"""
Processes a Qt resize event.
Parameters
----------
event: Qt
"""
new_size = event.size()
self._panel_widget.setMaximumSize(
400, new_size.height())
def set_current_property(self, new_prop):
"""
Sets the current property used by the plots. Raises a ValueError
exception if the new property is not in the list of known properties.
Parameters
----------
new_prop: str
The new property to be used by the plots.
"""
if new_prop not in self.properties:
raise ValueError('New property (%s) is unknown.' % new_prop)
self._curr_property = new_prop
def set_current_baseline(self, new_baseline):
"""
Sets the current baseline curve for the rank and distance plots. Raises
a ValueError exception if the baseline is unknown.
Parameters
----------
new_baseline: str
The ID of the new baseline. Possible values are: 'p10', 'p50' and
'p90'.
"""
if new_baseline not in ['P10', 'P50', 'P90']:
raise ValueError('New baseline (%s) is unknown.' % new_baseline)
self._curr_baseline = new_baseline
def fileLoadData(self):
"""
Opens a popup file dialog and asks for the location of the data files,
then loads them.
TO-DO: Load initial data if the user chooses to.
"""
data_path = QFileDialog.getExistingDirectory(
self, 'Open data directory', QDir.homePath())
if not data_path:
return
QApplication.setOverrideCursor(Qt.WaitCursor)
self._base_data_path = data_path
self._full_data_path = os.path.join(self._base_data_path, 'ajustado')
self._properties = os.listdir(self._full_data_path)
cprop = self.current_property
if not cprop or cprop not in self.properties:
self.set_current_property(self.properties[0])
self._ensemble = FieldEnsemble(well_data_path=self._full_data_path,
prop_list=self.properties,
well_type_path=self._base_data_path)
QApplication.restoreOverrideCursor()
msg = QMessageBox(QMessageBox.Information, 'Load data',
'Data loaded successfully.', QMessageBox.Ok, self)
msg.exec()
# Adding the loaded curves to the plot widget
self.update_data(data_changed=True)
# Adding the properties to the UI.
self._combo_property.clear()
for prop in self._properties:
self._combo_property.addItem(prop)
# Enabling the UI
self._alg_box.setEnabled(True)
self._global_graphical_box.setEnabled(True)
self._timestep_box.setEnabled(True)
self._graphics_box.setEnabled(True)
self._tlchart_box.setEnabled(True)
self._fanchart_box.setEnabled(True)
self._rankchart_box.setEnabled(True)
self._distchart_box.setEnabled(True)
def fileQuit(self):
"""
Method called when the application is about to quit. Any cleanups are
performed here.
"""
self.close()
def helpAbout(self):
"""
Method called whenever the 'About' option is selected. Opens a dialog
with some information about this software.
"""
QMessageBox.about(self, 'About this software', 'This software is a prototype time-series visualization tool that implements a series of charts in order to help on the task of selecting representative time-series. To accomplish this task, the software uses the brushing and linking technique to enable the selection of a series (or group of series) in one chart and this seleciton is reflected on the others. This tool also implements two new charts proposed by our research work, the Time-lapsed Multidimensional Projection chart and the Rank chart.\n\nThis is a prototype, meaning that there are bugs lying around and the user interface is definetly not polished. We provide no warranty whatsoever, so use this software at your own risk.')
def closeEvent(self, _):
"""
Method called when the window is about to be closed.
"""
self.fileQuit()
def baseline_changed(self):
"""
Slot method called when the baseline option is changed in the UI.
Repasses the new value to the plots.
"""
self._curr_baseline = self._combo_baseline.currentText()
self.update_data(baseline_changed=True)
def property_changed(self):
"""
Slot method called when the property is changed in the UI. Repasses
this new value to the plots.
"""
self._curr_property = self._combo_property.currentText()
self.update_data(data_changed=True)
def clear_selected_data(self):
"""
Slot method called when the clear data button is pressed in the UI.
Clears the highlighted curves from all plots.
"""
self._plt_widget.clear_selected_data()
def set_plot_points_tlchart(self, state):
"""
Slot method to set the plot points option in the time lapse projection
plot.
"""
checked = (state == Qt.Checked)
self._plt_widget.set_plot_points_tlchart(checked)
if not checked and not self._chk_plot_lines.isChecked():
self._chk_plot_lines.setChecked(True)
def set_plot_lines_tlchart(self, state):
"""
Slot method to set the plot lines option in the time lapse projection
plot.
"""
checked = (state == Qt.Checked)
self._plt_widget.set_plot_lines_tlchart(checked)
if not checked and not self._chk_plot_points.isChecked():
self._chk_plot_points.setChecked(True)
def set_ts_highlight_tlchart(self, state):
"""
Sets wheter the timestep highlight is enabled for the projection chart.
"""
pass
# checked = (state == Qt.Checked)
# self._plt_widget.set_timestep_highlight_enabled(checked)
def set_log_scale_distchart(self, state):
"""
Slot method to set the log-scale option in the scenario/distance plot.
"""
checked = (state == Qt.Checked)
self._plt_widget.set_log_scale_distchart(checked)
def fan_color_pallete_changed(self):
"""
Sets the color pallete of the fanchart.
"""
opt = self._combo_fan_color_pallete.currentText()
pallete = self.FANCHART_COLOR_OPTIONS_MAPPING[opt]
self._plt_widget.set_fan_color_pallete(pallete)
def data_colormap_changed(self):
"""
Sets the color pallete of the data in all plots.
"""
opt = self._combo_data_colormap.currentText()
pallete = self.DATA_COLOR_OPTIONS_MAPPING[opt]
self._plt_widget.set_data_color_pallete(pallete)
def save_plots(self):
"""
Developer only method to save the current plots to PDF images.
"""
self._plt_widget.save_plots()
def set_group_selection_distchart(self, state):
"""
Sets wheter the group selection mode for the distance chart is
activated or not.
"""
checked = state == Qt.Checked
self._plt_widget.set_group_selection_distchart(checked)
def set_group_selection_rankchart(self, state):
"""
Sets wheter the group selection mode for the bump chart is
activated or not.
"""
checked = state == Qt.Checked
self._plt_widget.set_group_selection_rankchart(checked)
def set_start_timestep(self, value):
"""
Slot called when the start timestep is changed. When that happens, the
new value is tested, and, if larger than the end timestep, then the
end timestep is set as value + 1.
"""
start_min, end_max = self._plt_widget.max_timerange
_, end_ts = self._plt_widget.timerange
if value >= (end_ts - 5):
if end_ts == end_max:
sys.stdout.write('\a')
sys.stdout.flush()
return
self._spin_end_ts.valueChanged.disconnect()
if value < end_max:
end_ts = value + 5
self._spin_end_ts.setValue(end_ts)
self._spin_end_ts.valueChanged.connect(self.set_end_timestep)
self._plt_widget.set_timestep_range(value, end_ts)
def set_end_timestep(self, value):
"""
Slot called when the end timestep is changed. When that happens, the
new value is tested against the start timestep and, if smaller, then
the start timestep is set as value - 1.
"""
start_min, end_max = self._plt_widget.max_timerange
start_ts, _ = self._plt_widget.timerange
if value <= (start_ts + 5):
if start_ts == start_min:
sys.stdout.write('\a')
sys.stdout.flush()
return
self._spin_start_ts.valueChanged.disconnect()
if value > start_min:
start_ts = value - 5
self._spin_start_ts.setValue(start_ts)
self._spin_start_ts.valueChanged.connect(self.set_start_timestep)
self._plt_widget.set_timestep_range(start_ts, value)
def update_data(self, **kwargs):
"""
Gets the newly loaded data from the Time series ensemble and passes
them to the plot widget.
"""
if 'data_changed' in kwargs:
# Since changing the data is a lengthy operation, we change the
# mouse cursor to indicate this to | |
int timeout_headers: Defines the timeout (seconds) while waiting for http headers.
Default: `socket_timeout`.
:param int timeout_backend: Defines the timeout (seconds) when connecting to backend instances.
Default: `socket_timeout`.
"""
super().set_connections_params(
**filter_locals(locals(), ['timeout_headers', 'timeout_backend']))
self._set_aliased('headers-timeout', timeout_headers)
self._set_aliased('connect-timeout', timeout_backend)
return self
def set_manage_params(
self, chunked_input=None, chunked_output=None, gzip=None, websockets=None, source_method=None,
rtsp=None, proxy_protocol=None):
"""Allows enabling various automatic management mechanics.
* http://uwsgi.readthedocs.io/en/latest/Changelog-1.9.html#http-router-keepalive-auto-chunking-auto-gzip-and-transparent-websockets
:param bool chunked_input: Automatically detect chunked input requests and put the session in raw mode.
:param bool chunked_output: Automatically transform output to chunked encoding
during HTTP 1.1 keepalive (if needed).
:param bool gzip: Automatically gzip content if uWSGI-Encoding header is set to gzip,
but content size (Content-Length/Transfer-Encoding) and Content-Encoding are not specified.
:param bool websockets: Automatically detect websockets connections and put the session in raw mode.
:param bool source_method: Automatically put the session in raw mode for `SOURCE` HTTP method.
* http://uwsgi.readthedocs.io/en/latest/Changelog-2.0.5.html#icecast2-protocol-helpers
:param bool rtsp: Allow the HTTP router to detect RTSP and chunked requests automatically.
:param bool proxy_protocol: Allows the HTTP router to manage PROXY1 protocol requests,
such as those made by Haproxy or Amazon Elastic Load Balancer (ELB).
"""
self._set_aliased('chunked-input', chunked_input, cast=bool)
self._set_aliased('auto-chunked', chunked_output, cast=bool)
self._set_aliased('auto-gzip', gzip, cast=bool)
self._set_aliased('websockets', websockets, cast=bool)
self._set_aliased('manage-source', source_method, cast=bool)
self._set_aliased('manage-rtsp', rtsp, cast=bool)
self._set_aliased('enable-proxy-protocol', proxy_protocol, cast=bool)
return self
def set_owner_params(self, uid=None, gid=None):
"""Drop http router privileges to specified user and group.
:param str|int uid: Set uid to the specified username or uid.
:param str|int gid: Set gid to the specified groupname or gid.
"""
self._set_aliased('uid', uid)
self._set_aliased('gid', gid)
return self
class RouterHttps(RouterHttp):
"""uWSGI includes an HTTPS router/proxy/load-balancer that can forward requests to uWSGI workers.
The server can be used in two ways:
* embedded - automatically spawn workers and setup the communication socket
* standalone - you have to specify the address of a uwsgi socket to connect to
See `subscribe_to` argument to `.set_basic_params()`
.. note:: If you want to go massive (virtualhosting and zero-conf scaling) combine the HTTP router
with the uWSGI Subscription Server.
"""
alias = 'http' # Shares options with http.
plugin = alias
on_command = 'https2'
def __init__(
self, on, cert, key, ciphers=None, client_ca=None, session_context=None, use_spdy=None,
export_cert_var=None):
"""Binds https router to run on the given address.
:param SocketShared|str on: Activates the router on the given address.
:param str cert: Certificate file.
:param str key: Private key file.
:param str ciphers: Ciphers [alias] string.
Example:
* DEFAULT
* HIGH
* DHE, EDH
* https://www.openssl.org/docs/man1.1.0/apps/ciphers.html
:param str client_ca: Client CA file for client-based auth.
.. note: You can prepend ! (exclamation mark) to make client certificate
authentication mandatory.
:param str session_context: Session context identifying string. Can be set to static shared value
to avoid session rejection.
Default: a value built from the HTTP server address.
* http://uwsgi.readthedocs.io/en/latest/SSLScaling.html#setup-2-synchronize-caches-of-different-https-routers
:param bool use_spdy: Use SPDY.
:param bool export_cert_var: Export uwsgi variable `HTTPS_CC` containing the raw client certificate.
"""
on = KeyValue(
filter_locals(locals(), drop=['session_context']),
aliases={'on': 'addr', 'use_spdy': 'spdy'},
bool_keys=['use_spdy'],
)
super().__init__(on)
self._set_aliased('session-context', session_context)
class RouterSsl(_RouterWithForwarders):
"""Works in the same way as the RouterRaw, but will terminate ssl connections.
Supports SNI for implementing virtual hosting.
"""
alias = 'sslrouter'
plugin = alias
on_command = 'sslrouter2'
def __init__(self, on, cert, key, forward_to=None, ciphers=None, client_ca=None, session_context=None, use_sni=None):
"""Activates the router on the given address.
:param SocketShared|str on: Activates the router on the given address.
:param str cert: Certificate file.
:param str key: Private key file.
:param Forwarder|str|list[str] forward_to: Where to forward requests.
Expects a forwarder instance or one or more node names.
:param str ciphers: Ciphers [alias] string.
Example:
* DEFAULT
* HIGH
* DHE, EDH
* https://www.openssl.org/docs/man1.1.0/apps/ciphers.html
:param str client_ca: Client CA file for client-based auth.
:param str session_context: Session context identifying string. Can be set to static shared value
to avoid session rejection.
Default: a value built from the HTTP server address.
* http://uwsgi.readthedocs.io/en/latest/SSLScaling.html#setup-2-synchronize-caches-of-different-https-routers
:param bool use_sni: Use SNI to route requests.
"""
on = KeyValue(
filter_locals(locals(), drop=['session_context', 'use_sni']),
aliases={'on': 'addr'},
)
self._set_aliased('session-context', session_context)
self._set_aliased('sni', use_sni, cast=bool)
super().__init__(on, forward_to)
def set_connections_params(self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None):
"""Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
:param int retry_delay: Retry connections to dead static nodes after the specified
amount of seconds. Default: 30.
:param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3.
"""
super().set_connections_params(**filter_locals(locals(), ['retry_max']))
self._set_aliased('max-retries', retry_max)
return self
class RouterFast(_RouterWithForwarders):
"""A proxy/load-balancer/router speaking the uwsgi protocol.
You can put it between your webserver and real uWSGI instances to have more control
over the routing of HTTP requests to your application servers.
"""
alias = 'fastrouter'
plugin = alias
def set_basic_params(
self, workers=None, zerg_server=None, fallback_node=None, concurrent_events=None,
cheap_mode=None, stats_server=None, quiet=None, buffer_size=None,
fallback_nokey=None, subscription_key=None, emperor_command_socket=None):
"""
:param int workers: Number of worker processes to spawn.
:param str zerg_server: Attach the router to a zerg server.
:param str fallback_node: Fallback to the specified node in case of error.
:param int concurrent_events: Set the maximum number of concurrent events router can manage.
Default: system dependent.
:param bool cheap_mode: Enables cheap mode. When the router is in cheap mode,
it will not respond to requests until a node is available.
This means that when there are no nodes subscribed, only your local app (if any) will respond.
When all of the nodes go down, the router will return in cheap mode.
:param str stats_server: Router stats server address to run at.
:param bool quiet: Do not report failed connections to instances.
:param int buffer_size: Set internal buffer size in bytes. Default: page size.
:param bool fallback_nokey: Move to fallback node even if a subscription key is not found.
:param str subscription_key: Skip uwsgi parsing and directly set a key.
:param str emperor_command_socket: Set the emperor command socket that will receive spawn commands.
See `.empire.set_emperor_command_params()`.
"""
super().set_basic_params(**filter_locals(locals(), [
'fallback_nokey',
'subscription_key',
'emperor_command_socket',
]))
self._set_aliased('fallback-on-no-key', fallback_nokey, cast=bool)
self._set_aliased('force-key', subscription_key)
self._set_aliased('emperor-socket', emperor_command_socket)
return self
def set_resubscription_params(self, addresses=None, bind_to=None):
"""You can specify a dgram address (udp or unix) on which all of the subscriptions
request will be forwarded to (obviously changing the node address to the router one).
The system could be useful to build 'federated' setup.
* http://uwsgi.readthedocs.io/en/latest/Changelog-2.0.1.html#resubscriptions
:param str|list[str] addresses: Forward subscriptions to the specified subscription server.
:param str|list[str] bind_to: Bind to the specified address when re-subscribing.
"""
self._set_aliased('resubscribe', addresses, multi=True)
self._set_aliased('resubscribe-bind', bind_to)
return self
def set_connections_params(self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None, defer=None):
"""Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
:param int retry_delay: Retry connections to dead static nodes after the specified
amount of seconds. Default: 30.
:param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3
:param int defer: Defer connection delay, seconds. Default: 5.
"""
super().set_connections_params(**filter_locals(locals(), ['retry_max', 'defer']))
self._set_aliased('max-retries', retry_max)
self._set_aliased('defer-connect-timeout', defer)
return self
def set_postbuffering_params(self, size=None, store_dir=None):
"""Sets buffering params.
Web-proxies like nginx are "buffered", so they wait til the whole request (and its body)
has been read, and then it sends it to the backends.
:param int size: The size (in bytes) of the request body after which the body will
be stored to disk (as a temporary file) instead of memory.
:param str store_dir: Put buffered files to the specified directory. Default: TMPDIR, /tmp/
"""
self._set_aliased('post-buffering', size)
self._set_aliased('post-buffering-dir', store_dir)
return self
def set_owner_params(self, uid=None, gid=None):
"""Drop http router privileges to specified user and group.
:param str|int uid: Set uid to the specified username or uid.
:param str|int gid: Set gid to the specified groupname or gid.
"""
self._set_aliased('uid', uid)
self._set_aliased('gid', gid)
return self
class RouterRaw(_RouterWithForwarders):
"""A pure-TCP load balancer.
Can be used to load balance between the various HTTPS routers.
"""
alias = 'rawrouter'
plugin = alias
def set_connections_params(
self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None, use_xclient=None):
"""Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
:param int retry_delay: Retry connections to dead static nodes after the specified
amount of seconds. Default: 30.
| |
from __future__ import unicode_literals
import frappe
from frappe import scrub
from textwrap import wrap
from frappe.utils import add_to_date, nowdate, nowtime
from datetime import timedelta
from frappe.model.mapper import get_mapped_doc
from frappe.utils import flt
from frappe import _
# from erpnext.accounts.utils import get_fiscal_year, now
from erpnext.setup.doctype.item_group.item_group import get_child_item_groups
@frappe.whitelist()
def create_lead(
business_name,
first_name,
last_name,
address,
city,
state,
zipcode,
website,
email_address,
telephone_number,
territory,
source=None,
organization_lead=None,
notes=None,
):
if territory:
if frappe.db.exists("Territory", territory):
territory = territory
else:
territory = "United States"
else:
territory = "United States"
if organization_lead != None:
organization_lead = organization_lead
else:
organization_lead = 1
if notes != None:
notes = notes
else:
notes = ""
if source != None:
source = source
else:
source = "Wholesale Inquiry form"
# hard coded values
lead_owner = "<EMAIL>"
# lead_owner="<EMAIL>"
request_type = "Product Enquiry"
country = "United States"
status = "Lead"
organization_lead = organization_lead
company = frappe.db.get_single_value("Global Defaults", "default_company")
address_type = "Billing"
source = source
contact_by = "<EMAIL>"
last_followup_date = add_to_date(nowdate(), months=0, days=7)
first_followup_date = add_to_date(nowdate(), months=0, days=1)
lead_map = {
"company_name": business_name,
"website": website,
"organization_lead": organization_lead,
"status": status,
"request_type": request_type,
"company": company,
"lead_owner": lead_owner,
"territory": territory,
"source": source,
"contact_by": contact_by,
"contact_date": first_followup_date,
"ends_on": last_followup_date,
"notes": notes,
}
# Check if existing lead
lead_name = None
if email_address:
lead_name = frappe.db.get_value("Lead", {"email_id": email_address})
if not lead_name:
lead_map.update({"email_id": email_address})
if not lead_name and telephone_number:
lead_name = frappe.db.get_value("Lead", {"phone": telephone_number})
if not lead_name:
lead_map.update({"phone": telephone_number})
if not lead_name:
# new
lead_name = frappe.scrub(first_name) + " " + frappe.scrub(last_name)
lead_map.update({"lead_name": lead_name})
lead = frappe.new_doc("Lead")
lead.update(lead_map)
lead.insert(ignore_permissions=True)
print(lead.name, "new lead")
else:
# existing lead
lead = frappe.get_doc("Lead", lead_name)
lead.update(lead_map)
lead.save(ignore_permissions=True)
print(lead.name, "existing lead")
frappe.db.commit()
address_list = wrap(address, 40)
print(len(address_list), "len(address_list)")
if len(address_list) > 1:
address_line2 = address_list[1]
address_line1 = address_list[0] + ","
else:
address_line1 = address_list[0]
address_line2 = None
address_map = {
"address_line1": address_line1,
"address_line2": address_line2,
"address_type": address_type,
"city": city,
"state": state,
"pincode": zipcode,
"email_id": email_address,
"phone": telephone_number,
"country": country,
}
# check if existing address
address_name = lead_name + "-" + address_type
if frappe.db.exists("Address", address_name):
address = frappe.get_doc("Address", address_name)
address.update(address_map)
address.save(ignore_permissions=True)
print(address.name, "address existing")
else:
address = frappe.new_doc("Address")
address.update(address_map)
lead_link = {
"link_doctype": "Lead",
"link_name": lead.name,
"link_title": lead.name,
}
address.append("links", lead_link)
address.insert(ignore_permissions=True)
print(address.name, "address new")
frappe.db.commit()
def copy_shipping_details_from_item_to_SI(self, method):
if self.items:
if self.items[0]:
if self.items[0].delivery_note:
delivery_note = self.items[0].delivery_note
(
shipping_method_za,
shipment_tracking_no_za,
no_of_boxes_za,
) = frappe.db.get_value(
"Delivery Note",
delivery_note,
["shipping_method_za", "shipment_tracking_no_za", "no_of_boxes_za"],
)
self.shipping_method_za = shipping_method_za
self.shipment_tracking_no_za = shipment_tracking_no_za
self.no_of_boxes_za = no_of_boxes_za
def calculate_total_tobacco_weight(self, method):
self.total_tobacco_weight_za = 0
for item in self.items:
if item.item_group == "TOBACCO":
self.total_tobacco_weight_za += item.total_weight
else:
item_group = frappe.get_doc("Item Group", item.item_group)
parent_groups = frappe.db.sql(
"""select name from `tabItem Group`
where lft <= %s and rgt >= %s
and name = 'TOBACCO'
order by lft asc""",
(item_group.lft, item_group.rgt),
as_list=True,
)
if parent_groups:
parent_tobacco_group = parent_groups[0][0]
if parent_tobacco_group:
self.total_tobacco_weight_za += item.total_weight
if (
self.doctype == "Sales Invoice"
and self.is_return == 1
and self.total_tobacco_weight_za > 0
):
self.total_tobacco_weight_za = self.total_tobacco_weight_za * -1
def update_delivery_note_workflow_state(self, method):
if (
(self.status == "Completed" and self.workflow_state != "Completed")
or (self.status == "Closed" and self.workflow_state != "Closed")
or (self.status == "To Bill" and self.workflow_state == "Closed")
):
self.db_set("workflow_state", self.status, update_modified=True)
# def delete_connected_stock_entry(self,method):
# if frappe.db.exists("Stock Entry", self.stock_entry_cf):
# frappe.delete_doc("Stock Entry", self.stock_entry_cf)
# frappe.msgprint(_("Stock Entry {0} connected with this Delivery Note is deleted.").format(self.stock_entry_cf))
def delink_connected_stock_entry(self, method):
if self.stock_entry_cf:
frappe.db.set_value("Delivery Note", self.name, "stock_entry_cf", "")
@frappe.whitelist()
def create_stock_entry(source_name, target_doc=None):
found_item = False
delivery_note = frappe.get_doc("Delivery Note", source_name)
stock_entry = frappe.new_doc("Stock Entry")
stock_entry.stock_entry_type = "Repack"
stock_entry.posting_date = nowdate()
stock_entry.posting_time = nowtime()
for source_item in delivery_note.get("items"):
master_case_item_cf = frappe.db.get_value(
"Item", source_item.item_code, "master_case_item_cf"
)
if source_item.qty > source_item.actual_qty and master_case_item_cf:
master_case_item_exists_in_source_table = False
for check_source_item in delivery_note.get("items"):
if check_source_item.item_code == master_case_item_cf:
master_case_item_exists_in_source_table = True
master_case_item_qty_status = flt(
check_source_item.actual_qty - check_source_item.qty
)
break
if (
master_case_item_exists_in_source_table == True
and master_case_item_qty_status > 1
) or (master_case_item_exists_in_source_table == False):
target_item_master = stock_entry.append("items", {})
target_item_master.item_code = master_case_item_cf
target_item_master.s_warehouse = source_item.warehouse
target_item_master.qty = flt(1)
target_item = stock_entry.append("items", {})
target_item.item_code = source_item.item_code
target_item.t_warehouse = source_item.warehouse
if source_item.uom == "BOX" and source_item.item_name.find("250GM"):
target_item.qty = flt(24)
elif source_item.uom == "CARTON" and source_item.item_name.find("50GM"):
target_item.qty = flt(12)
found_item = True
if found_item == True:
stock_entry.run_method("set_missing_values")
stock_entry.run_method("calculate_rate_and_amount")
stock_entry.save()
delivery_note.stock_entry_cf = stock_entry.name
delivery_note.save()
return stock_entry
else:
return "There are no eligibile items for making stock entry."
@frappe.whitelist()
def make_stock_entry(source_name, target_doc=None):
def update_item(obj, target, source_parent):
qty = (
flt(flt(obj.stock_qty) - flt(obj.ordered_qty)) / target.conversion_factor
if flt(obj.stock_qty) > flt(obj.ordered_qty)
else 0
)
target.qty = qty
target.transfer_qty = qty * obj.conversion_factor
target.conversion_factor = obj.conversion_factor
if (
source_parent.material_request_type == "Material Transfer"
or source_parent.material_request_type == "Customer Provided"
):
target.t_warehouse = obj.warehouse
elif source_parent.material_request_type == "Withdrawal Request":
target.t_warehouse = frappe.db.get_single_value(
"Stock Settings", "default_warehouse"
)
else:
target.s_warehouse = obj.warehouse
if source_parent.material_request_type == "Customer Provided":
target.allow_zero_valuation_rate = 1
def set_missing_values(source, target):
target.purpose = source.material_request_type
if source.job_card:
target.purpose = "Material Transfer for Manufacture"
if source.material_request_type == "Customer Provided":
target.purpose = "Material Receipt"
if source.material_request_type == "Withdrawal Request":
target.purpose = "Material Transfer"
target.run_method("calculate_rate_and_amount")
target.set_stock_entry_type()
target.set_job_card_data()
doclist = get_mapped_doc(
"Material Request",
source_name,
{
"Material Request": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
"material_request_type": [
"in",
[
"Material Transfer",
"Material Issue",
"Customer Provided",
"Withdrawal Request",
],
],
},
},
"Material Request Item": {
"doctype": "Stock Entry Detail",
"field_map": {
"name": "material_request_item",
"parent": "material_request",
"uom": "stock_uom",
},
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.stock_qty,
},
},
target_doc,
set_missing_values,
)
return doclist
# def set_title_for_material_request(self,method):
# if self.material_request_type == 'Withdrawal Request':
# self.title = _('Material Withdrawal INV {0}').format(self.sales_invoice_cf)[:100]
# if self.material_request_type == 'Withdrawal Request':
# self.title = _('Material Withdrawal INV {0}').format(self.sales_invoice_cf)[:100]
@frappe.whitelist()
def stock_entry_calculate_total_tobacoo_weight(doc):
doc = frappe._dict(frappe.parse_json(doc))
valid_item_groups = get_child_item_groups("TOBACCO")
stock_entry = frappe.get_doc("Stock Entry", doc.name)
se_items = stock_entry.get("items")
total_tobacco_weight_cf = 0
if stock_entry.purpose in ["Material Transfer","Material Receipt","Material Issue"]:
for item in se_items:
if item.item_group in valid_item_groups:
weight_per_unit = frappe.db.get_value(
"Item", item.item_code, "weight_per_unit"
)
total_tobacco_weight_cf += flt(item.qty * weight_per_unit)
stock_entry.total_tobacco_weight_cf = total_tobacco_weight_cf
stock_entry.save()
return 1
@frappe.whitelist()
def zomo_sales_invoice_validate(self, method):
calculate_total_tobacco_weight(self, method)
copy_shipping_details_from_item_to_SI(self, method)
@frappe.whitelist()
def update_repack_rates(doc):
doc = frappe._dict(frappe.parse_json(doc))
stock_entry = frappe.get_doc("Stock Entry", doc.name)
se_items = stock_entry.get("items")
if stock_entry.purpose == "Repack":
for item in se_items:
master_case_item_cf = frappe.db.get_value(
"Item", item.item_code, "master_case_item_cf"
)
if master_case_item_cf:
for master in se_items:
if master.item_code == master_case_item_cf:
master_rate = master.basic_amount
break
item.set_basic_rate_manually = 1
item.basic_rate = master_rate / item.qty
print("item.basic_rate,master_rate,item.qty")
print(item.basic_rate, master_rate, item.qty)
stock_entry.run_method("calculate_rate_and_amount")
stock_entry.save()
return 1
def set_tobacco_tax(doc, method=None):
"""
Set tobacco_taxable_total_cf and tobacco_tax_cf
on Save of Sales Order, Sales Invoice and Delivery Note
"""
from frappe.utils.nestedset import get_descendants_of
tobacco_items = get_descendants_of("Item Group", "TOBACCO", ignore_permissions=True)
tobacco_tax_collected_cf = doc.tobacco_tax_collected_cf
tobacco_taxable_total_cf, tobacco_tax_cf, tobacco_tax_to_be_printed_cf = 0, 0, 0
# NO
if tobacco_tax_collected_cf == "NO":
tobacco_taxable_total_cf = sum(
d.base_net_amount for d in doc.items if d.item_group in tobacco_items
)
# YES@SALES
elif doc.tobacco_tax_collected_cf == "YES@SALES":
# Tobacco Taxable Total = sum(base_net_amount(company. currency)) for tobacco items
tobacco_taxable_total_cf = sum(
d.base_net_amount for d in doc.items if d.item_group in tobacco_items
)
for d in doc.taxes:
if d.account_head.startswith("Tobacco.Tax"):
tobacco_tax_cf = d.get("base_tax_amount", 0)
else:
try:
statewise_excise = frappe.get_doc(
"US State wise Tobacco Excise", doc.tobacco_tax_reporting_state_cf
)
#tobacco_tax_to_be_printed_cf = statewise_excise.tobacco_tax_to_be_printed
# YES@WEIGHT
if doc.tobacco_tax_collected_cf == "YES@WEIGHT":
# Tobacco Taxable Total = sum(base_net_amount(company. currency)) for tobacco items
tobacco_taxable_total_cf = sum(
d.base_net_amount for d in doc.items if d.item_group in tobacco_items
)
tobacco_tax_cf = (
doc.total_tobacco_weight_za * 1000 / 50
) * statewise_excise.get("tax_per_50gm_weight")
# YES@COST
elif doc.tobacco_tax_collected_cf == "YES@COST":
tobacco_taxable_total_cf = (
doc.total_tobacco_weight_za * 1000 / 50
) * statewise_excise.get("cost_for_50gm")
# Set Tobacco tax as per Sales Taxes and Charges Template in US State wise Tobacco Excise
tobacco_tax_rate = frappe.db.sql(
"""
select
stc.rate
from
`tabSales Taxes and Charges` stc
where
stc.parenttype = 'Sales Taxes and Charges Template'
and stc.parent = %s
and stc.account_head like 'Tobacco.Tax%%'
order by
stc.idx limit 1
""",
(statewise_excise.sales_taxes_and_charges_template),
)
if tobacco_tax_rate:
tobacco_tax_cf = (
tobacco_taxable_total_cf * flt(tobacco_tax_rate[0][0]) * 0.01
)
except frappe.exceptions.DoesNotExistError:
print(
"No 'US State wise Tobacco Excise'. Skipping %s"
% doc.tobacco_tax_reporting_state_cf
)
doc.tobacco_taxable_total_cf = tobacco_taxable_total_cf
doc.tobacco_tax_cf = tobacco_tax_cf
#doc.tobacco_tax_to_be_printed_cf = tobacco_tax_to_be_printed_cf
if method == "UPADTE SUBMITTED":
doc.db_set("tobacco_taxable_total_cf", tobacco_taxable_total_cf)
doc.db_set("tobacco_tax_cf", tobacco_tax_cf)
#doc.db_set("tobacco_tax_to_be_printed_cf", tobacco_tax_to_be_printed_cf)
print(
"Setting tax for %s" % doc.name,
# "tobacco_taxable_total_cf",
# doc.tobacco_taxable_total_cf,
"tobacco_tax_cf",
doc.tobacco_tax_cf,
# "tobacco_tax_to_be_printed_cf",
# doc.tobacco_tax_to_be_printed_cf,
)
def update_sales_invoice_tobacco_tax():
"""method to set taxes in submitted invoices 2021-01-01 to 2021-03-31"""
frappe.db.sql(
"""
update `tabSales Invoice` si
inner join | |
<reponame>MJJojo97/openslides-backend<gh_stars>0
import builtins
import re
from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict, List, Literal, Optional, Tuple
from datastore.shared.postgresql_backend import SqlQueryHelper
from datastore.shared.util import DeletedModelsBehaviour
from ...shared.exceptions import DatastoreException
from ...shared.filters import And, Filter, FilterOperator
from ...shared.interfaces.logging import LoggingModule
from ...shared.patterns import Collection, FullQualifiedId
from ...shared.typing import DeletedModel, ModelMap
from .adapter import DatastoreAdapter
from .commands import GetManyRequest
from .handle_datastore_errors import raise_datastore_error
from .interface import Engine, LockResult, PartialModel
MODEL_FIELD_SQL = "data->>%s"
COMPARISON_VALUE_SQL = "%s::text"
MappedFieldsPerFqid = Dict[FullQualifiedId, List[str]]
class ExtendedDatastoreAdapter(DatastoreAdapter):
"""
Subclass of the datastore adapter to extend the functions with the usage of the changed_models.
Restrictions:
- get_deleted_models only works one way with the changed_models: if the model was not deleted
in the datastore, but is deleted in the changed_models. The other way around does not work
since a deleted model in the changed_models is marked via DeletedModel() and does not store
any data.
- all filter-based requests may take two calls to the datastore to succeed. The first call is
always necessary, since the changed_models are never complete. If, however, a model in the
changed_models matches the filter which it did not in the database AND some fields are
missing in the changed_models which are needed through the mapped_fields, a second request
is needed to fetch the missing fields. This can be circumvented by always storing (more or
less) "full" models in the changed_data, meaning all relevant fields which are requested in
future calls are present. This is the case for most applications in the backend.
- filters are only evaluated separately on the changed_models and the datastore. If, for
example, a model in the datastore does not fit the filter, but through a change in the
changed_models would fit it, BUT does not fit the filter from the changed_models alone, it
is not found. Example:
datastore content: {"f": 1, "g": 1}
changed_models: {"f": 2}
filter: f = 2 and g = 1
This also applies in the reverse direction: If the datastore content of a model matches the
filter, but it is invalidated through a change in the changed_models, it is still found and
returned with the new fields from the changed_models. This may lead to unexpected results by
including a model in the results which does not fit the given filter. This could be
circumvented by applying the filter again after building the result and removing all models
which do not fit it anymore.
For performance as well as practical reasons, this is not implemented. In practice, filters
are only applied to "static" fields which do not changed during a request, e.g.
`meeting_id`, `list_of_speakers_id` etc. So this should not be a problem.
"""
changed_models: ModelMap
def __init__(self, engine: Engine, logging: LoggingModule) -> None:
super().__init__(engine, logging)
self.changed_models = defaultdict(dict)
def apply_changed_model(
self, fqid: FullQualifiedId, instance: PartialModel, replace: bool = False
) -> None:
"""
Adds or replaces the model identified by fqid in the changed_models.
Automatically adds missing id field.
"""
if replace or isinstance(instance, DeletedModel):
self.changed_models[fqid] = instance
else:
self.changed_models[fqid].update(instance)
if "id" not in self.changed_models[fqid]:
self.changed_models[fqid]["id"] = fqid.id
def get(
self,
fqid: FullQualifiedId,
mapped_fields: List[str],
position: int = None,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: LockResult = True,
use_changed_models: bool = True,
raise_exception: bool = True,
) -> PartialModel:
"""
Get the given model.
changed_models serves as a kind of cache layer of all recently done
changes - all updates to any model during the action are saved in there.
The parameter use_changed_models defines whether they are searched or not.
"""
if use_changed_models:
if position:
raise DatastoreException(
"Position-based fetching is not possible with changed_models"
)
mapped_fields_per_fqid = {fqid: mapped_fields}
# fetch results from changed models
results, missing_fields_per_fqid = self._get_many_from_changed_models(
mapped_fields_per_fqid
)
changed_model = results.get(fqid.collection, {}).get(fqid.id, {})
if not missing_fields_per_fqid:
# nothing to do, we've got the full mode
return changed_model
else:
# overwrite params and fetch missing fields from db
mapped_fields = missing_fields_per_fqid[fqid]
# we only raise an exception now if the model is not present in the changed_models all
raise_exception = raise_exception and fqid not in self.changed_models
try:
if self.is_new(fqid):
# if the model is new, we know it does not exist in the datastore and can directly throw
# an exception or return an empty result
raise_datastore_error({"error": {"fqid": fqid}})
else:
result = super().get(
fqid,
mapped_fields,
position,
get_deleted_models,
lock_result,
)
except DatastoreException:
if raise_exception:
raise
else:
return {}
if use_changed_models:
result.update(changed_model)
return result
def get_many(
self,
get_many_requests: List[GetManyRequest],
position: int = None,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> Dict[Collection, Dict[int, PartialModel]]:
if use_changed_models:
if position:
raise DatastoreException(
"Position-based fetching is not possible with changed_models"
)
mapped_fields_per_fqid = defaultdict(list)
for request in get_many_requests:
if not request.mapped_fields:
raise DatastoreException("No mapped fields given")
for id in request.ids:
fqid = FullQualifiedId(request.collection, id)
mapped_fields_per_fqid[fqid].extend(list(request.mapped_fields))
# fetch results from changed models
results, missing_fields_per_fqid = self._get_many_from_changed_models(
mapped_fields_per_fqid
)
# fetch missing fields in the changed_models from the db and merge into the results
if missing_fields_per_fqid:
missing_results = self._fetch_missing_fields_from_datastore(
missing_fields_per_fqid, lock_result
)
for collection, models in missing_results.items():
for id, model in models.items():
# we can just update the model with the db fields since they must not have been
# present previously
results.setdefault(collection, {}).setdefault(id, {}).update(
model
)
else:
results = super().get_many(
get_many_requests, None, get_deleted_models, lock_result
)
return results
def filter(
self,
collection: Collection,
filter: Filter,
mapped_fields: List[str],
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> Dict[int, PartialModel]:
results = super().filter(
collection, filter, mapped_fields, get_deleted_models, lock_result
)
if use_changed_models:
# apply the changes from the changed_models to the db result
self._apply_changed_model_updates(
collection, results, mapped_fields, get_deleted_models
)
# find results which are only present in the changed_models
changed_results = self._filter_changed_models(
collection, filter, mapped_fields
)
# apply these results and find fields which are missing in the changed_models
missing_fields_per_fqid = self._update_results_and_get_missing_fields(
collection, results, changed_results, mapped_fields
)
# fetch missing fields from the db and merge both results
if missing_fields_per_fqid:
missing_results = self._fetch_missing_fields_from_datastore(
missing_fields_per_fqid, lock_result
)
for id, model in missing_results[collection].items():
# we can just update the model with the db fields since they must not have been
# present previously
results.setdefault(id, {}).update(model)
return results
def exists(
self,
collection: Collection,
filter: Filter,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> bool:
if not use_changed_models:
return super().exists(collection, filter, get_deleted_models, lock_result)
else:
return self.count(collection, filter, get_deleted_models, lock_result) > 0
def count(
self,
collection: Collection,
filter: Filter,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> int:
if not use_changed_models:
return super().count(collection, filter, get_deleted_models, lock_result)
else:
results = self.filter(
collection, filter, ["id"], get_deleted_models, lock_result
)
return len(results)
def min(
self,
collection: Collection,
filter: Filter,
field: str,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> Optional[int]:
return self._extended_minmax(
collection,
filter,
field,
get_deleted_models,
lock_result,
use_changed_models,
"min",
)
def max(
self,
collection: Collection,
filter: Filter,
field: str,
get_deleted_models: DeletedModelsBehaviour = DeletedModelsBehaviour.NO_DELETED,
lock_result: bool = True,
use_changed_models: bool = True,
) -> Optional[int]:
return self._extended_minmax(
collection,
filter,
field,
get_deleted_models,
lock_result,
use_changed_models,
"max",
)
def _extended_minmax(
self,
collection: Collection,
filter: Filter,
field: str,
get_deleted_models: DeletedModelsBehaviour,
lock_result: bool,
use_changed_models: bool,
mode: Literal["min", "max"],
) -> Optional[int]:
if not use_changed_models:
return getattr(super(), mode)(
collection, filter, field, get_deleted_models, lock_result
)
else:
full_filter = And(filter, FilterOperator(field, "!=", None))
models = self.filter(
collection, full_filter, [field], get_deleted_models, lock_result
)
comparable_results = [
model[field]
for model in models.values()
if self._comparable(model.get(field), 0)
]
if comparable_results:
return getattr(builtins, mode)(comparable_results)
else:
return None
def is_deleted(self, fqid: FullQualifiedId) -> bool:
return isinstance(self.changed_models.get(fqid), DeletedModel)
def is_new(self, fqid: FullQualifiedId) -> bool:
return self.changed_models.get(fqid, {}).get("meta_new") is True
def reset(self) -> None:
super().reset()
self.changed_models.clear()
def _filter_changed_models(
self,
collection: Collection,
filter: Filter,
mapped_fields: List[str],
) -> Dict[int, Dict[str, Any]]:
"""
Uses the datastore's SqlQueryHelper to build an SQL query for the given filter, transforms it into valid python
code and then executes it.
"""
# Build sql query for this filter. The arguments array contains the replacements | |
imu0_data.vel[ts_i]
ba_i = np.array([0.0, 0.0, 0.0])
bg_i = np.array([0.0, 0.0, 0.0])
sb_i = speed_biases_setup(ts_i, vel_i, ba_i, bg_i)
sb_i_id = graph.add_param(sb_i)
sb_est.append(sb_i_id)
for ts_idx in range(start_idx + window_size, end_idx, window_size):
# -- Pose j
ts_j = imu0_data.timestamps[ts_idx]
T_WS_j = imu0_data.poses[ts_j]
# ---- Pertrub pose j
trans_rand = np.random.rand(3)
rvec_rand = np.random.rand(3) * 0.01
T_WS_j = tf_update(T_WS_j, np.block([*trans_rand, *rvec_rand]))
# ---- Add to factor graph
pose_j = pose_setup(ts_j, T_WS_j)
pose_j_id = graph.add_param(pose_j)
# -- Speed and biases j
vel_j = imu0_data.vel[ts_j]
ba_j = np.array([0.0, 0.0, 0.0])
bg_j = np.array([0.0, 0.0, 0.0])
sb_j = speed_biases_setup(ts_j, vel_j, ba_j, bg_j)
sb_j_id = graph.add_param(sb_j)
# ---- Keep track of initial and estimate pose
poses_init.append(T_WS_j)
poses_est.append(pose_j_id)
sb_est.append(sb_j_id)
# -- Imu Factor
param_ids = [pose_i_id, sb_i_id, pose_j_id, sb_j_id]
imu_buf = imu0_data.form_imu_buffer(ts_idx - window_size, ts_idx)
factor = ImuFactor(param_ids, imu_params, imu_buf, sb_i)
graph.add_factor(factor)
# -- Update
pose_i_id = pose_j_id
pose_i = pose_j
sb_i_id = sb_j_id
sb_i = sb_j
# Solve
debug = False
# debug = True
# prof = profile_start()
graph.solve(debug)
# profile_stop(prof)
if debug:
pos_init = np.array([tf_trans(T) for T in poses_init])
pos_est = []
for pose_pid in poses_est:
pose = graph.params[pose_pid]
pos_est.append(tf_trans(pose2tf(pose.param)))
pos_est = np.array(pos_est)
sb_est = [graph.params[pid] for pid in sb_est]
sb_ts0 = sb_est[0].ts
sb_time = np.array([ts2sec(sb.ts - sb_ts0) for sb in sb_est])
vel_est = np.array([sb.param[0:3] for sb in sb_est])
ba_est = np.array([sb.param[3:6] for sb in sb_est])
bg_est = np.array([sb.param[6:9] for sb in sb_est])
plt.figure()
plt.subplot(411)
plt.plot(pos_init[:, 0], pos_init[:, 1], 'r-')
plt.plot(pos_est[:, 0], pos_est[:, 1], 'b-')
plt.xlabel("Displacement [m]")
plt.ylabel("Displacement [m]")
plt.subplot(412)
plt.plot(sb_time, vel_est[:, 0], 'r-')
plt.plot(sb_time, vel_est[:, 1], 'g-')
plt.plot(sb_time, vel_est[:, 2], 'b-')
plt.subplot(413)
plt.plot(sb_time, ba_est[:, 0], 'r-')
plt.plot(sb_time, ba_est[:, 1], 'g-')
plt.plot(sb_time, ba_est[:, 2], 'b-')
plt.subplot(414)
plt.plot(sb_time, bg_est[:, 0], 'r-')
plt.plot(sb_time, bg_est[:, 1], 'g-')
plt.plot(sb_time, bg_est[:, 2], 'b-')
plt.show()
@unittest.skip("")
def test_factor_graph_solve_vio(self):
""" Test solving a visual inertial odometry problem """
# Imu params
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup factor graph
feature_tracker = SimFeatureTracker()
tracker = Tracker(feature_tracker)
# -- Set initial pose
ts0 = self.sim_data.imu0_data.timestamps[0]
T_WB = self.sim_data.imu0_data.poses[ts0]
tracker.set_initial_pose(T_WB)
# -- Add imu
tracker.add_imu(imu_params)
# -- Add cam0
cam0_idx = 0
cam0_data = self.sim_data.mcam_data[cam0_idx]
cam0_params = cam0_data.camera
cam0_exts = extrinsics_setup(self.sim_data.T_BC0)
tracker.add_camera(cam0_idx, cam0_params, cam0_exts)
# -- Add cam1
cam1_idx = 1
cam1_data = self.sim_data.mcam_data[cam1_idx]
cam1_params = cam1_data.camera
cam1_exts = extrinsics_setup(self.sim_data.T_BC1)
tracker.add_camera(cam1_idx, cam1_params, cam1_exts)
# -- Add camera overlap
tracker.add_overlap(cam0_idx, cam1_idx)
# -- Loop through simulation data
mcam_buf = MultiCameraBuffer(2)
for ts in self.sim_data.timeline.get_timestamps():
for event in self.sim_data.timeline.get_events(ts):
if isinstance(event, ImuEvent):
tracker.inertial_callback(event.ts, event.acc, event.gyr)
elif isinstance(event, CameraEvent):
mcam_buf.add(ts, event.cam_idx, event.image)
if mcam_buf.ready():
tracker.vision_callback(ts, mcam_buf.get_data())
mcam_buf.reset()
class TestFeatureTracking(unittest.TestCase):
""" Test feature tracking functions """
@classmethod
def setUpClass(cls):
super(TestFeatureTracking, cls).setUpClass()
cls.dataset = EurocDataset(euroc_data_path)
def setUp(self):
# Setup test images
self.dataset = TestFeatureTracking.dataset
ts = self.dataset.cam0_data.timestamps[800]
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
self.img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
self.img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
def test_spread_keypoints(self):
""" Test spread_keypoints() """
# img = np.zeros((140, 160))
# kps = []
# kps.append(cv2.KeyPoint(10, 10, 0, 0.0, 0.0, 0))
# kps.append(cv2.KeyPoint(150, 130, 0, 0.0, 0.0, 1))
# kps = spread_keypoints(img, kps, 5, debug=True)
detector = cv2.FastFeatureDetector_create(threshold=50)
kwargs = {'optflow_mode': True, 'debug': False}
kps = grid_detect(detector, self.img0, **kwargs)
kps = spread_keypoints(self.img0, kps, 20, debug=False)
self.assertTrue(len(kps))
def test_feature_grid_cell_index(self):
""" Test FeatureGrid.grid_cell_index() """
grid_rows = 4
grid_cols = 4
image_shape = (280, 320)
keypoints = [[0, 0], [320, 0], [0, 280], [320, 280]]
grid = FeatureGrid(grid_rows, grid_cols, image_shape, keypoints)
self.assertEqual(grid.cell[0], 1)
self.assertEqual(grid.cell[3], 1)
self.assertEqual(grid.cell[12], 1)
self.assertEqual(grid.cell[15], 1)
def test_feature_grid_count(self):
""" Test FeatureGrid.count() """
grid_rows = 4
grid_cols = 4
image_shape = (280, 320)
pts = [[0, 0], [320, 0], [0, 280], [320, 280]]
grid = FeatureGrid(grid_rows, grid_cols, image_shape, pts)
self.assertEqual(grid.count(0), 1)
self.assertEqual(grid.count(3), 1)
self.assertEqual(grid.count(12), 1)
self.assertEqual(grid.count(15), 1)
def test_grid_detect(self):
""" Test grid_detect() """
debug = False
# detector = cv2.ORB_create(nfeatures=500)
# kps, des = grid_detect(detector, self.img0, **kwargs)
# self.assertTrue(len(kps) > 0)
# self.assertEqual(des.shape[0], len(kps))
detector = cv2.FastFeatureDetector_create(threshold=50)
kwargs = {'optflow_mode': True, 'debug': debug}
kps = grid_detect(detector, self.img0, **kwargs)
self.assertTrue(len(kps) > 0)
def test_optflow_track(self):
""" Test optflow_track() """
debug = False
# Detect
feature = cv2.ORB_create(nfeatures=100)
kps, des = grid_detect(feature, self.img0)
self.assertTrue(len(kps) == len(des))
# Track
pts_i = np.array([kp.pt for kp in kps], dtype=np.float32)
track_results = optflow_track(self.img0, self.img1, pts_i, debug=debug)
(pts_i, pts_j, inliers) = track_results
self.assertTrue(len(pts_i) == len(pts_j))
self.assertTrue(len(pts_i) == len(inliers))
class TestFeatureTracker(unittest.TestCase):
""" Test FeatureTracker """
@classmethod
def setUpClass(cls):
super(TestFeatureTracker, cls).setUpClass()
cls.dataset = EurocDataset(euroc_data_path)
def setUp(self):
# Setup test images
self.dataset = TestFeatureTracker.dataset
ts = self.dataset.cam0_data.timestamps[0]
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
self.img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
self.img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
# Setup cameras
# -- cam0
res = self.dataset.cam0_data.config.resolution
proj_params = self.dataset.cam0_data.config.intrinsics
dist_params = self.dataset.cam0_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cam0 = camera_params_setup(0, res, proj_model, dist_model, params)
# -- cam1
res = self.dataset.cam1_data.config.resolution
proj_params = self.dataset.cam1_data.config.intrinsics
dist_params = self.dataset.cam1_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cam1 = camera_params_setup(1, res, proj_model, dist_model, params)
# Setup camera extrinsics
# -- cam0
T_BC0 = self.dataset.cam0_data.config.T_BS
cam0_exts = extrinsics_setup(T_BC0)
# -- cam1
T_BC1 = self.dataset.cam1_data.config.T_BS
cam1_exts = extrinsics_setup(T_BC1)
# Setup feature tracker
self.feature_tracker = FeatureTracker()
self.feature_tracker.add_camera(0, cam0, cam0_exts)
self.feature_tracker.add_camera(1, cam1, cam1_exts)
self.feature_tracker.add_overlap(0, 1)
def test_detect(self):
""" Test FeatureTracker._detect() """
# Load and detect features from single image
kps = self.feature_tracker._detect(self.img0)
self.assertTrue(len(kps) > 0)
def test_detect_overlaps(self):
""" Test FeatureTracker._detect_overlaps() """
debug = False
# debug = True
# Feed camera images to feature tracker
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_overlaps(mcam_imgs)
# Assert
data_i = self.feature_tracker.cam_data[0]
data_j = self.feature_tracker.cam_data[1]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
overlapping_ids = self.feature_tracker.feature_overlaps
self.assertTrue(len(kps_i) == len(kps_j))
self.assertTrue(len(kps_i) == len(overlapping_ids))
# Visualize
for cam_i, overlaps in self.feature_tracker.cam_overlaps.items():
cam_j = overlaps[0]
img_i = mcam_imgs[cam_i]
img_j = mcam_imgs[cam_j]
data_i = self.feature_tracker.cam_data[cam_i]
data_j = self.feature_tracker.cam_data[cam_j]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
# viz = draw_matches(img_i, img_j, kps_i, kps_j)
matches = []
for i in range(len(kps_i)):
matches.append(cv2.DMatch(i, i, 0))
viz = cv2.drawMatches(img_i, kps_i, img_j, kps_j, matches, None)
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_detect_nonoverlaps(self):
""" Test FeatureTracker._detect_nonoverlaps() """
# Feed camera images to feature tracker
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_nonoverlaps(mcam_imgs)
# Visualize
for cam_i, overlaps in self.feature_tracker.cam_overlaps.items():
cam_j = overlaps[0]
img_i = mcam_imgs[cam_i]
img_j = mcam_imgs[cam_j]
data_i = self.feature_tracker.cam_data[cam_i]
data_j = self.feature_tracker.cam_data[cam_j]
kps_i = data_i.keypoints
kps_j = data_j.keypoints
viz_i = cv2.drawKeypoints(img_i, kps_i, None)
viz_j = cv2.drawKeypoints(img_j, kps_j, None)
viz = cv2.hconcat([viz_i, viz_j])
debug = False
# debug = True
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_detect_new(self):
""" Test FeatureTracker.detect_new() """
mcam_imgs = {0: self.img0, 1: self.img1}
self.feature_tracker._detect_new(mcam_imgs)
ft_data = self.feature_tracker.cam_data
viz = visualize_tracking(ft_data)
debug = False
# debug = True
if debug:
cv2.imshow('viz', viz)
cv2.waitKey(0)
def test_update(self):
""" Test FeatureTracker.update() """
for ts in self.dataset.cam0_data.timestamps[1000:1200]:
# for ts in self.dataset.cam0_data.timestamps:
# Load images
img0_path = self.dataset.cam0_data.image_paths[ts]
img1_path = self.dataset.cam1_data.image_paths[ts]
img0 = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
# Feed camera images to feature tracker
mcam_imgs = {0: img0, 1: img1}
ft_data = self.feature_tracker.update(ts, mcam_imgs)
# Visualize
debug = False
# debug = True
if debug:
sys.stdout.flush()
viz = visualize_tracking(ft_data)
cv2.imshow('viz', viz)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
class TestTracker(unittest.TestCase):
""" Test Tracker """
@classmethod
def setUpClass(cls):
super(TestTracker, cls).setUpClass()
# Load dataset
cls.dataset = EurocDataset(euroc_data_path)
ts0 = cls.dataset.cam0_data.timestamps[0]
cls.img0 = cls.dataset.get_camera_image(0, ts0)
cls.img1 = cls.dataset.get_camera_image(1, ts0)
# Imu params
noise_acc = 0.08 # accelerometer measurement noise stddev.
noise_gyr = 0.004 # gyroscope measurement noise stddev.
noise_ba = 0.00004 # accelerometer bias random work noise stddev.
noise_bg = 2.0e-6 # gyroscope bias random work noise stddev.
cls.imu_params = ImuParams(noise_acc, noise_gyr, noise_ba, noise_bg)
# Setup cameras
# -- cam0
res = cls.dataset.cam0_data.config.resolution
proj_params = cls.dataset.cam0_data.config.intrinsics
dist_params = cls.dataset.cam0_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cls.cam0 = camera_params_setup(0, res, proj_model, dist_model, params)
cls.cam0.fix = True
# -- cam1
res = cls.dataset.cam1_data.config.resolution
proj_params = cls.dataset.cam1_data.config.intrinsics
dist_params = cls.dataset.cam1_data.config.distortion_coefficients
proj_model = | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 17:57:09 2019
@author: xuc1
"""
import io
import os
import re
import zlib
from itertools import compress
import numpy as np
import pandas as pd
import yaml
from .utils.misc import XY2D
this_dir = os.path.dirname(__file__)
cgc_file = os.path.join(this_dir, 'Createc_global_const.yaml')
with open(cgc_file, 'rt') as f:
cgc = yaml.safe_load(f.read())
class GENERIC_FILE:
"""
Generic file class, common for .dat, .vert files etc.
Parameters
----------
file_path : str
Full file path
Returns
-------
generic_file : GENERIC_FILE
"""
def __init__(self, file_path):
self._line_list = None
self.fp = file_path
self.meta = dict()
def _read_binary(self):
"""
Open file in raw binary format
Returns
-------
_binary : bin
a binary stream of the entire file
"""
with open(self.fp, 'rb') as f:
_binary = f.read()
return _binary
def _bin2meta_dict(self, start=0, end=cgc['g_file_meta_binary_len']):
"""
Convert meta binary to meta info using ansi encoding, filling out the _meta dictionary
Here ansi means Windows-1252 extended ascii code page CP-1252
prerequisite: _binary stream
Parameters
----------
start : int
Start position
end : int
End position
Returns
-------
None : None
"""
meta_list = self._binary[start:end].decode('cp1252').split('\n')
for line in meta_list:
temp = line.split('=')
if len(temp) == 2:
self.meta[temp[0]] = temp[1][:-1]
def _extracted_meta(self):
"""
Assign meta data to easily readable properties
One can expand these at will, one may use the method meta_key() to see what keys are available
Returns
-------
None : None
it just populates all the self.properties
"""
self.file_version = self.meta['file_version']
self.file_version = ''.join(e for e in self.file_version if e.isalnum())
self.xPixel = int(self.meta['Num.X / Num.X'])
self.yPixel = int(self.meta['Num.Y / Num.Y'])
self.channels = int(self.meta['Channels / Channels'])
self.ch_zoff = float(self.meta['CHModeZoff / CHModeZoff'])
self.chmode = int(self.meta['CHMode / CHMode'])
self.rotation = float(self.meta['Rotation / Rotation'])
self.ddeltaX = int(self.meta['DX_DIV_DDelta-X / DX/DDeltaX'])
self.deltaX_dac = int(self.meta['Delta X / Delta X [Dac]'])
self.channels_code = self.meta['Channelselectval / Channelselectval']
def _file2meta_dict(self):
"""
Not in use
Open .dat file with asci encoding, read meta data directly from .dat file, fill out the meta_dict
Returns
-------
None : None
"""
with open(self.fp, 'r') as f:
for i in range(cgc['g_file_meta_total_lines']):
temp = f.readline().split('=')
if len(temp) == 2:
self.meta[temp[0]] = temp[1][:-1]
def _line_list2meta_dict(self, start, end):
"""
Fill the self.meta dict from the line list.
prerequisite: self._line_list
Returns
-------
None : None
It just fills out the self.dict.
"""
self.meta['file_version'] = self._line_list[0]
for l in self._line_list[start:end]:
temp = l.split('\n')[0].split('=')
if len(temp) == 2:
self.meta[temp[0]] = temp[1]
def _spec_meta(self, pos: int, index_header: str, vz_header: str, spec_headers: str):
"""
Extract the spec meta data from the file, it includes Number of spec pts, X_position, Y_position and Channel code.
Parameters
----------
pos : int
line position number in the file
index_header : str
which is e.g. 'idx'
vz_header : list[str]
which is e.g. ['V', 'Z']
spec_headers : list[str]
see Createc_global_const
Returns
-------
None : None
It populates: self.spec_total_pt, self.spec_pos_x, self.spec_pos_y, self.spec_channel_code, self.spec_headers
"""
result = re.findall(r'(\d+)', self._line_list[pos])
self.spec_total_pt = int(result[0])
self.spec_pos_x = int(result[1])
self.spec_pos_y = int(result[2])
self.spec_channel_code = int(result[3])
try:
self.spec_out_channel_count = 'v' + result[6]
except IndexError:
self.spec_out_channel_count = 'v2' # dummy
self._filter = [b == '1' for b in bin(self.spec_channel_code)[2:].rjust(len(cgc[spec_headers]))[::-1]]
self.spec_headers = cgc[index_header] + \
cgc[vz_header][self.file_version][self.spec_out_channel_count] + \
list(compress(cgc[spec_headers][self.file_version], self._filter))
class VERT_SPEC(GENERIC_FILE):
"""
Read the .vert file and generate useful and managable stuff
Parameters
----------
file_path : str
Full file path
Returns
-------
vert_spec : VERT_SPEC
"""
def __init__(self, file_path):
super().__init__(file_path)
with open(self.fp, 'r') as f:
self._line_list = f.readlines()
super()._line_list2meta_dict(start=0, end=cgc['g_file_meta_total_lines'])
super()._extracted_meta()
super()._spec_meta(pos=cgc['g_file_spec_meta_line'][self.file_version],
index_header='g_file_spec_index_header',
vz_header='g_file_spec_vz_header',
spec_headers='g_file_spec_headers')
f_obj = io.StringIO('\n'.join(self._line_list[cgc['g_file_spec_skip_rows'][self.file_version]:]))
self.spec = pd.read_csv(filepath_or_buffer=f_obj, sep=cgc['g_file_spec_delimiter'],
header=None,
names=self.spec_headers,
index_col=cgc['g_file_spec_index_header'],
engine='python',
usecols=range(len(self.spec_headers)))
class DAT_IMG_v2:
pass
class DAT_IMG:
"""
Read .dat file and generate meta data and images as numpy arrays.
There are two options for input:
option 1: one arg, i.e. the full path to the .dat file
option 2: two named args
a. the binary content of the file together
b. the file_name as a string
Parameters
----------
file_path : str
the full path to the .dat file
file_binary : bin
the binary content of the file together
file_name : str
the file_name as a string
Returns
-------
dat_img : DAT_IMG
dat_file_object with meta data and image numpy arrays.
Meta data is a dict, one can expand the dict at will.
Images are a list of numpy arrays.
"""
def __init__(self, file_path=None, file_binary=None, file_name=None):
self.meta = dict()
self.img_array_list = []
if file_path is not None:
self.fp = file_path
_, self.fn = os.path.split(self.fp)
self._meta_binary, self._data_binary = self._read_binary()
else:
self.fn = file_name
self._meta_binary = file_binary[:int(cgc['g_file_meta_binary_len'])]
self._data_binary = file_binary[int(cgc['g_file_data_bin_offset']):]
self._bin2meta_dict()
self._extracted_meta()
self._read_img()
# imgs are numpy arrays, with rows with only zeros cropped off
self.imgs = [self._crop_img(arr) for arr in self.img_array_list]
# assert(len(set(img.shape for img in self.imgs)) <= 1)
# Pixels = namedtuple('Pixels', ['y', 'x'])
self.img_pixels = XY2D(y=self.imgs[0].shape[0],
x=self.imgs[0].shape[1]) # size in (y, x)
def _extracted_meta(self):
"""
Assign meta data to easily readable properties.
One can expand these at will, one may use the method meta_key() to see what keys are available
Returns
-------
None : None
It just populates all the self.properties
"""
self.file_version = self.meta['file_version']
self.file_version = ''.join(e for e in self.file_version if e.isalnum())
self.xPixel = int(self.meta['Num.X / Num.X'])
self.yPixel = int(self.meta['Num.Y / Num.Y'])
self.channels = int(self.meta['Channels / Channels'])
self.ch_zoff = float(self.meta['CHModeZoff / CHModeZoff'])
self.ch_bias = float(self.meta['CHModeBias[mV] / CHModeBias[mV]'])
self.chmode = int(self.meta['CHMode / CHMode'])
self.rotation = float(self.meta['Rotation / Rotation'])
self.ddeltaX = int(self.meta['DX_DIV_DDelta-X / DX/DDeltaX'])
self.deltaX_dac = int(self.meta['Delta X / Delta X [Dac]'])
self.channels_code = self.meta['Channelselectval / Channelselectval']
self.scan_ymode = int(self.meta['ScanYMode / ScanYMode'])
self.xPiezoConst = float(self.meta['Xpiezoconst']) # Createc software error
self.yPiezoConst = float(self.meta['YPiezoconst'])
self.zPiezoConst = float(self.meta['ZPiezoconst'])
def _read_binary(self):
"""
Open .dat file in raw binary format
Returns
-------
_meta_binary : bin
meta data in binary
_data_binary : bin
data in binary
"""
with open(self.fp, 'rb') as f:
_meta_binary = f.read(int(cgc['g_file_meta_binary_len']))
f.seek(int(cgc['g_file_data_bin_offset']))
_data_binary = f.read()
return _meta_binary, _data_binary
def _bin2meta_dict(self):
"""
Convert meta binary to meta info using ansi encoding, filling out the meta dictionary
Here ansi means Windows-1252 extended ascii code page CP-1252
Returns
-------
None : None
"""
meta_list = self._meta_binary.decode('cp1252', errors='ignore').split('\n')
self.meta['file_version'] = meta_list[0]
for line in meta_list:
temp = line.split('=')
if len(temp) == 2:
self.meta[temp[0]] = temp[1][:-1]
def _read_img(self):
"""
Convert img binary to numpy array's, filling out the img_array_list.
The image was compressed using zlib. So here they are decompressed.
prerequisite: self.xPixel, self.yPixel, self.channels
Returns
-------
None : None
"""
decompressed_data = zlib.decompress(self._data_binary)
img_array = np.frombuffer(decompressed_data, np.dtype(cgc['g_file_dat_img_pixel_data_npdtype']))
img_array = np.reshape(img_array[1: self.xPixel * self.yPixel * self.channels + 1],
(self.channels * self.yPixel, self.xPixel))
for i in range(self.channels):
self.img_array_list.append(img_array[self.yPixel * i:self.yPixel * (i + 1)])
def meta_keys(self):
"""
Print all available keys in meta
Returns
-------
None : None
"""
return [k for k in self.meta]
def _file2meta_dict(self):
"""
Not in use
Open .dat file with asci encoding, read meta data directly from .dat file, fill out the meta_dict
Returns
-------
None : None
"""
with open(self.fp, 'r') as f:
for i in range(cgc['g_file_meta_total_lines']):
temp = f.readline().split('=')
if len(temp) == 2:
self.meta[temp[0]] = temp[1][:-1]
def _file2img_arrays(self):
"""
Not in use
Open .dat file in raw binary format, start from a global constant g_file_data_bin_offset = 16384
fill out the img_array_list with images in the format of numpy array's
prerequisite: self.xPixel, self.yPixel, self.channels
Returns
-------
None : None
"""
with open(self.fp, 'rb') as f:
f.seek(cgc['g_file_data_bin_offset'])
decompressed_data = zlib.decompress(f.read())
img_array = np.fromstring(decompressed_data, np.dtype(cgc['g_file_dat_img_pixel_data_npdtype']))
img_array = np.reshape(img_array[1: self.xPixel * self.yPixel * self.channels + 1],
(self.channels * self.yPixel, self.xPixel))
for i in range(self.channels):
self.img_array_list.append(img_array[self.yPixel * i:self.yPixel * (i + 1)])
@staticmethod
def _crop_img(arr):
"""
Crop an image, by removing all rows which contain only zeros.
Parameters
----------
arr : numpy array
Individual image
Returns
-------
arr : numpy array
Cropped image
"""
return arr[~np.all(arr == 0, axis=1)]
@property
def offset(self):
"""
Return offset relatvie to the whole range in angstrom in the format of namedtuple (x, y)
Returns
-------
offset : XY2D
"""
x_offset = np.float(self.meta['Scanrotoffx / OffsetX'])
y_offset = np.float(self.meta['Scanrotoffy / OffsetY'])
# x_piezo_const = np.float(self.meta['Xpiezoconst'])
# y_piezo_const = np.float(self.meta['YPiezoconst'])
x_offset = -x_offset * cgc['g_XY_volt'] * self.xPiezoConst / 2 ** cgc['g_XY_bits']
y_offset = -y_offset * cgc['g_XY_volt'] * self.yPiezoConst / | |
<reponame>uclahedp/dataView
import sys
import os
from pathlib import Path as pathlibPath
import traceback
#Used for sci notation spinbox
import re
import h5py
import numpy as np
from astropy import units
from scipy import ndimage
from PyQt5 import QtWidgets, QtGui, QtCore
import matplotlib
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.figure
import matplotlib.cm
import time
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.debug = False
self.buildGUI()
def buildGUI(self):
self._main = QtWidgets.QWidget()
self.setCentralWidget(self._main)
#This little code block makes the window open on top of other windows,
#without requiring it to stay there.
self.show()
self.activateWindow()
self.raise_()
#Playing around with a custom style sheet...
#self.setStyleSheet(open('stylesheet.css').read())
self.setWindowTitle("HEDP dataView")
#hdf5 filepath
self.filepath = ''
#plot save filepath
self.plotsave_dir = ''
#Directory for movie frames to be saved
self.movie_dir = ''
#Array of axes dictionaries
self.axes = []
#Currently selected axes, defaults to 0 for both
#Second element is only used for 2D plots
self.cur_axes = [0,0]
#Used to smoothly transfer axes info when new files loaded
self.last_cur_axes = [0,0]
self.last_axes = []
#Data range (y values for 1D or z values for 2D)
self.datarange = [None, None]
#Semi-arbitrarily chosen subset of the mathplotlib colorsmaps to include
self.colormap_dict = {'Autumn':'autumn', "Winter":"winter", "Cool":"cool",
"Ocean":"ocean", "Rainbow":"gist_rainbow",
"Seismic":"seismic", "RedGrey":"RdGy",
"Coolwarm":"coolwarm"}
#Many GUI elements are connected to function calls
#By default, Pyqt5 will trigger these functions sometimes when we don't
#want it to. Elements added to this list will be temporarily
#disconnected at such times.
self.connectedList = []
#Unit of data in file (never changes)
self.data_native_unit = ''
#Factor for correcting data for units
self.data_unit_factor = 1.0
#Storage of unit factor currently applied to data range
self.data_cur_unit = ''#Unit displayed on data range, etc.
#Plotting variables
self.data = 0
self.hax = {'ax': 0, 'name': '', 'slice': 0, 'unit': '', 'unit_factor': 0}
self.vax = {'ax': 0, 'name': '', 'slice': 0, 'unit': '', 'unit_factor': 0}
#DEFINE fonts
self.text_font = QtGui.QFont()
self.text_font.setPointSize(12)
self.subtitle_font = QtGui.QFont()
self.subtitle_font.setPointSize(16)
self.subtitle_font.setBold(True)
self.title_font = QtGui.QFont()
self.title_font.setPointSize(22)
self.title_font.setBold(True)
#This is the primary layout
self.layout = QtWidgets.QHBoxLayout(self._main)
#
#Define Actions
#
quitAct = QtWidgets.QAction(" &Quit", self)
quitAct.triggered.connect(self.close)
loadAct = QtWidgets.QAction(" &Load", self)
loadAct.triggered.connect(self.fileDialog)
savePlotAct = QtWidgets.QAction(" &Save Plot", self)
savePlotAct.triggered.connect(self.savePlot)
#Setup options menue + actions within
optionsMenu = QtWidgets.QMenu('Options', self)
self.showFilter = QtWidgets.QAction(" &Filter", self, checkable=True)
self.showFilter.setChecked(False)
self.showFilter.triggered.connect(self.showFilterAction)
self.showMovieBox = QtWidgets.QAction(" &Movie", self, checkable=True)
self.showMovieBox.setChecked(False)
self.showMovieBox.triggered.connect(self.showMovieBoxAction)
#SETUP MENU
menubar = self.menuBar()
#Necessary for OSX, which trys to put menu bar on the top of the screen
menubar.setNativeMenuBar(False)
#Add menu actions
menubar.addAction(quitAct)
menubar.addAction(loadAct)
menubar.addAction(savePlotAct)
#Add options menu and associated submenu options
menubar.addMenu(optionsMenu)
optionsMenu.addAction(self.showFilter)
optionsMenu.addAction(self.showMovieBox)
self.centerbox = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.centerbox)
self.rightbox = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.rightbox)
self.select_ax_box = QtWidgets.QVBoxLayout()
self.rightbox.addLayout(self.select_ax_box)
#Make divider line
divFrame = QtWidgets.QFrame()
divFrame.setFrameShape(QtWidgets.QFrame.HLine)
divFrame.setLineWidth(3)
self.rightbox.addWidget(divFrame)
self.axesbox = QtWidgets.QVBoxLayout()
self.rightbox.addLayout(self.axesbox)
#Create the plot-type dropdown box
self.plottype_box = QtWidgets.QHBoxLayout()
self.centerbox.addLayout(self.plottype_box)
self.plottype_label = QtWidgets.QLabel("Plot Type: ")
self.plottype_box.addWidget(self.plottype_label)
self.plottype_field = QtWidgets.QComboBox()
self.plottype_field.addItem('1D')
self.plottype_field.addItem('2D')
self.plottype_field.show()
self.plottype_box.addWidget(self.plottype_field)
self.plottype_field.currentIndexChanged.connect(self.updatePlotTypeAction)
self.connectedList.append(self.plottype_field)
self.plot_title_checkbox = QtWidgets.QCheckBox("Auto plot title?")
self.plot_title_checkbox.setChecked(True)
self.plottype_box.addWidget(self.plot_title_checkbox)
self.plot_title_checkbox.stateChanged.connect(self.makePlot)
self.connectedList.append(self.plot_title_checkbox)
self.plot_title = QtWidgets.QLineEdit("Custom title text")
self.plottype_box.addWidget(self.plot_title)
self.plot_title.editingFinished.connect(self.makePlot)
self.connectedList.append(self.plot_title)
self.fig_2d_props_box = QtWidgets.QHBoxLayout()
self.centerbox.addLayout(self.fig_2d_props_box)
self.fig_2d_label = QtWidgets.QLabel("2D Plot Parameters: ")
self.fig_2d_props_box.addWidget(self.fig_2d_label)
self.plotImageBtn = QtWidgets.QRadioButton("ImagePlot")
self.plotImageBtn.setChecked(True)
self.fig_2d_props_box.addWidget(self.plotImageBtn)
self.plotImageBtn.toggled.connect(self.makePlot)
self.plotContourBtn = QtWidgets.QRadioButton("ContourPlot")
self.fig_2d_props_box.addWidget(self.plotContourBtn)
self.plot_opts_box = QtWidgets.QHBoxLayout()
self.centerbox.addLayout(self.plot_opts_box)
self.aspect_ratio_check = QtWidgets.QCheckBox("Fix Aspect Ratio?")
self.aspect_ratio_check.setChecked(False)
self.aspect_ratio_check.toggled.connect(self.makePlot)
self.plot_opts_box.addWidget(self.aspect_ratio_check)
#Make colormap selection bar
self.colormap_lbl = QtWidgets.QLabel("Colormap: ")
self.fig_2d_props_box.addWidget(self.colormap_lbl)
self.colormap_field = QtWidgets.QComboBox()
self.fig_2d_props_box.addWidget(self.colormap_field)
for k in self.colormap_dict.keys():
self.colormap_field.addItem(k)
self.colormap_field.currentIndexChanged.connect(self.makePlot)
self.connectedList.append(self.colormap_field)
#This label shows warnings to explain why plots weren't made
self.warninglabel = QtWidgets.QLabel('')
self.centerbox.addWidget(self.warninglabel)
#Create the figure that plots will be made into
self.figure = matplotlib.figure.Figure(figsize=(5, 3))
self.canvas = FigureCanvas(self.figure)
self.canvas.setMinimumSize(500, 500)
self.centerbox.addWidget(self.canvas)
#Create the datarange box
self.datarange_box = QtWidgets.QHBoxLayout()
self.centerbox.addLayout(self.datarange_box)
self.datarange_auto = QtWidgets.QCheckBox("Autorange?")
self.datarange_auto.setChecked(True)
self.datarange_box.addWidget(self.datarange_auto)
self.datarange_auto.stateChanged.connect(self.updateDataRange)
self.connectedList.append(self.datarange_auto)
self.datarange_center = QtWidgets.QCheckBox("Center zero?")
self.datarange_center.setChecked(False)
self.datarange_box.addWidget(self.datarange_center)
self.datarange_center.stateChanged.connect(self.updateDataRange)
self.connectedList.append(self.datarange_center)
self.datarange_lbl = QtWidgets.QLabel("Data Range: ")
self.datarange_lbl.setFixedWidth(80)
self.datarange_box.addWidget(self.datarange_lbl)
self.datarange_a = ScientificDoubleSpinBox()
self.datarange_a.setRange(-1e100, 1e100)
self.datarange_box.addWidget(self.datarange_a )
self.datarange_a.editingFinished.connect(self.updateDataRange)
self.connectedList.append(self.datarange_a)
self.datarange_b = ScientificDoubleSpinBox()
self.datarange_b.setRange(-1e100, 1e100)
self.datarange_box.addWidget(self.datarange_b )
self.datarange_b.editingFinished.connect(self.updateDataRange)
self.connectedList.append(self.datarange_b)
self.datarange_unitlbl = QtWidgets.QLabel("Data Unit: ")
self.datarange_unitlbl.setFixedWidth(60)
self.datarange_box.addWidget(self.datarange_unitlbl)
self.data_unit_field = QtWidgets.QLineEdit('')
self.data_unit_field.setFixedWidth(40)
self.datarange_box.addWidget(self.data_unit_field)
self.data_unit_field.editingFinished.connect(self.updateDataUnits)
self.connectedList.append(self.data_unit_field)
#CREATE AND FILL THE FILTER OPTIONS BOX
self.filterbox = QtWidgets.QHBoxLayout()
self.centerbox.addLayout(self.filterbox)
self.filterbox_widgets = []
self.nofilter_checkbox = QtWidgets.QRadioButton("No Filter")
self.nofilter_checkbox.setChecked(False)
self.filterbox.addWidget(self.nofilter_checkbox)
self.filterbox_widgets.append(self.nofilter_checkbox)
self.nofilter_checkbox.toggled.connect(self.makePlot)
self.lowpass_checkbox = QtWidgets.QRadioButton("Lowpass")
self.lowpass_checkbox.setChecked(False)
self.filterbox.addWidget(self.lowpass_checkbox)
self.filterbox_widgets.append(self.lowpass_checkbox)
self.lowpass_checkbox.toggled.connect(self.makePlot)
self.highpass_checkbox = QtWidgets.QRadioButton("Highpass")
self.highpass_checkbox.setChecked(False)
self.filterbox.addWidget(self.highpass_checkbox)
self.filterbox_widgets.append(self.highpass_checkbox)
self.highpass_checkbox.toggled.connect(self.makePlot)
self.filter_sigma_lbl = QtWidgets.QLabel("Filter Sigma: ")
self.filter_sigma_lbl.setFixedWidth(100)
self.filterbox_widgets.append(self.filter_sigma_lbl)
self.filterbox.addWidget(self.filter_sigma_lbl)
self.filter_sigma = ScientificDoubleSpinBox()
self.filter_sigma.setRange(0.01, 10000)
self.filter_sigma.setSingleStep(.01)
self.filter_sigma.setFixedWidth(80)
self.filter_sigma.setValue(1)
self.filter_sigma.setWrapping(False)
self.filterbox.addWidget(self.filter_sigma)
self.filterbox_widgets.append(self.filter_sigma)
self.filter_sigma.editingFinished.connect(self.makePlot)
for x in self.filterbox_widgets:
x.hide()
#CREATE AND FILL THE MOVIE OPTIONS BOX
self.moviebox = QtWidgets.QVBoxLayout()
self.rightbox.addLayout(self.moviebox)
self.moviebox_widgets = []
width = 150
#Make divider line
self.movie_div = QtWidgets.QFrame()
self.movie_div.setFrameShape(QtWidgets.QFrame.HLine)
self.movie_div.setLineWidth(3)
self.moviebox.addWidget(self.movie_div)
self.moviebox_widgets.append(self.movie_div)
self.movie_box_lbl = QtWidgets.QLabel("Movie Settings")
self.movie_box_lbl.setFont(self.title_font)
self.movie_box_lbl.setAlignment(QtCore.Qt.AlignCenter)
self.moviebox.addWidget(self.movie_box_lbl)
self.moviebox_widgets.append(self.movie_box_lbl)
#Save dir
self.movie_dir_box = QtWidgets.QHBoxLayout()
self.moviebox.addLayout(self.movie_dir_box)
self.movie_dir_button = QtWidgets.QPushButton("Set Save Dir:")
self.movie_dir_button.clicked.connect(self.setMovieDir)
self.movie_dir_box.addWidget(self.movie_dir_button)
self.moviebox_widgets.append(self.movie_dir_button)
self.movie_dir_line = QtWidgets.QLineEdit()
self.movie_dir_line.setFixedWidth(3*width)
self.movie_dir_line.editingFinished.connect(self.modifyMovieDir)
self.movie_dir_box.addWidget(self.movie_dir_line)
self.moviebox_widgets.append(self.movie_dir_line)
#Select movie axis
self.movie_ax_box = QtWidgets.QHBoxLayout()
self.moviebox.addLayout(self.movie_ax_box)
self.movie_axis_lbl = QtWidgets.QLabel("Movie Axis:")
self.movie_ax_box.addWidget(self.movie_axis_lbl)
self.moviebox_widgets.append(self.movie_axis_lbl)
self.movie_ax = QtWidgets.QComboBox()
self.movie_ax_box.addWidget(self.movie_ax)
self.moviebox_widgets.append(self.movie_ax)
#Movie range box
self.movie_range_box = QtWidgets.QHBoxLayout()
self.moviebox.addLayout(self.movie_range_box)
self.movie_range_lbl = QtWidgets.QLabel("Start, Stop, NFrames:")
self.movie_range_box.addWidget(self.movie_range_lbl)
self.moviebox_widgets.append(self.movie_range_lbl)
self.movie_start = ScientificDoubleSpinBox()
self.movie_start.setFixedWidth(width)
self.movie_start.setWrapping(True)
self.movie_range_box.addWidget(self.movie_start)
self.moviebox_widgets.append(self.movie_start)
self.movie_stop = ScientificDoubleSpinBox()
self.movie_stop.setFixedWidth(width)
self.movie_stop.setWrapping(True)
self.movie_range_box.addWidget(self.movie_stop)
self.moviebox_widgets.append(self.movie_stop)
self.movie_num = ScientificDoubleSpinBox()
self.movie_num.setFixedWidth(width)
self.movie_num.setWrapping(True)
self.movie_range_box.addWidget(self.movie_num)
self.moviebox_widgets.append(self.movie_num)
#Control Buttons
self.movie_ctl_box = QtWidgets.QHBoxLayout()
self.moviebox.addLayout(self.movie_ctl_box)
self.movie_run_button = QtWidgets.QPushButton("Run")
self.movie_run_button.setCheckable(True)
self.movie_run_button.clicked.connect(self.runMovie)
self.movie_ctl_box.addWidget(self.movie_run_button)
self.moviebox_widgets.append(self.movie_run_button)
#editingFinished.connect(self.updateAxesFieldsAction)
for x in self.moviebox_widgets:
x.hide()
self.axis_box_label = QtWidgets.QLabel("Chose Axis/Axes")
self.axis_box_label.setFont(self.title_font)
self.axis_box_label.setAlignment(QtCore.Qt.AlignCenter)
self.select_ax_box.addWidget(self.axis_box_label)
#Create the first axis dropdown menu
self.dropdown1_box = QtWidgets.QHBoxLayout()
self.select_ax_box.addLayout(self.dropdown1_box)
self.dropdown1_label = QtWidgets.QLabel("Axis 1: ")
self.dropdown1_box.addWidget(self.dropdown1_label)
self.dropdown1 = QtWidgets.QComboBox()
self.dropdown1_box.addWidget(self.dropdown1)
self.dropdown1.currentIndexChanged.connect(self.updateAxesFieldsAction)
self.connectedList.append(self.dropdown1)
#Create the second axis dropdown menu
self.dropdown2_box = QtWidgets.QHBoxLayout()
self.select_ax_box.addLayout(self.dropdown2_box)
self.dropdown2_label = QtWidgets.QLabel("Axis 2: ")
self.dropdown2_box.addWidget(self.dropdown2_label)
self.dropdown2 = QtWidgets.QComboBox()
self.dropdown2_box.addWidget(self.dropdown2)
self.dropdown2.currentIndexChanged.connect(self.updateAxesFieldsAction)
self.connectedList.append(self.dropdown2)
def freezeGUI(self):
if self.debug:
print("Freezing GUI elements")
for elm in self.connectedList:
elm.blockSignals(True)
def unfreezeGUI(self):
if self.debug:
print("Unfreezing GUI elements")
for elm in self.connectedList:
elm.blockSignals(False)
def fileDialog(self):
if self.debug:
print("Beginning file dialog")
filedir = os.path.dirname(self.filepath)
opendialog = QtWidgets.QFileDialog()
opendialog.setNameFilter("HDF5 Files (*.hf5, *.h5)")
userinput = pathlibPath( opendialog.getOpenFileName(self,
"Select file to open",
filedir,
"hdf5 Files (*.hdf5)")[0] )
if not userinput.is_file():
print("Invalid input (ignoring): " + str(userinput) )
else:
print("Loading file: " + str(userinput) )
self.filepath = userinput
#Saving old settings and resetting arrays to default
self.last_axes = self.axes #Copy over any axes to memory
self.axes = []
self.last_cur_axes = self.cur_axes
self.cur_axes = [0,0]
with h5py.File(self.filepath, 'r') as f:
temp_axes = ( f['data'].attrs['dimensions'] )
self.data_unit_field.setText( f['data'].attrs['unit'])
self.data_native_unit = self.data_unit_field.text()
self.data_cur_unit = self.data_native_unit
for ind, axis in enumerate(temp_axes):
ax = {}
name = axis.decode("utf-8")
ax['name'] = name
ax['ax'] = f[name][:]
ax['axind'] = ind
ax['native_unit'] = f[name].attrs['unit']
ax['indminmax'] = ( 0 , len(f[name]) -1 )
ax['valminmax'] = ( f[name][0] , f[name][-1] )
try:
ax['step'] = np.mean(np.gradient(ax['ax']))
except ValueError:
ax['step'] = 1
self.axes.append(ax)
self.freezeGUI()
self.initAxesBoxes()
self.unfreezeGUI()
self.makePlot()
def initAxesBoxes(self):
if self.debug:
print("Initializing Axes Boxes")
#Remove old widgets
self.clearLayout(self.axesbox)
#Create Controls title
self.axis_settings_label = QtWidgets.QLabel("Axis Settings")
self.axis_settings_label.setFont(self.title_font)
self.axis_settings_label.setAlignment(QtCore.Qt.AlignCenter)
self.axesbox.addWidget(self.axis_settings_label)
#Remove old items from dropdown menus
self.dropdown1.clear()
self.dropdown2.clear()
for i, ax in enumerate(self.axes):
#Take the ax out of the axes array
ax = self.axes[i]
#Add the axes names to the dropdown menus
self.dropdown1.addItem(ax['name'])
self.dropdown2.addItem(ax['name'])
self.movie_ax.addItem(ax['name'])
#Create the top level box for this axis
ax['box'] = QtWidgets.QVBoxLayout()
self.axesbox.addLayout(ax['box'])
#Create a list of all objects in this axis that involve real values
#When units get changed, we can iteratively modify all of these
#fields at once
ax['value_list'] = []
#CREATE AND FILL THE MAIN BOX
#This box always shows
ax['mainbox'] = QtWidgets.QHBoxLayout()
ax['box'].addLayout(ax['mainbox'])
ax['namelabel'] = QtWidgets.QLabel('')
ax['namelabel'].setFixedWidth(60)
ax['namelabel'].setFont(self.subtitle_font)
ax['namelabel'].setText(ax['name'] + | |
"""
Module to allow Plotly graphs to interact with IPython widgets.
"""
import uuid
from collections import deque
from pkg_resources import resource_string
from requests.compat import json as _json
# TODO: protected imports?
from IPython.html import widgets
from IPython.utils.traitlets import Unicode
from IPython.display import Javascript, display
import plotly.plotly.plotly as py
from plotly import utils, tools
from plotly.graph_objs import Figure
# Load JS widget code
# No officially recommended way to do this in any other way
# http://mail.scipy.org/pipermail/ipython-dev/2014-April/013835.html
js_widget_code = resource_string('plotly',
'package_data/graphWidget.js').decode('utf-8')
display(Javascript(js_widget_code))
__all__ = None
class GraphWidget(widgets.DOMWidget):
"""An interactive Plotly graph widget for use in IPython
Notebooks.
"""
_view_name = Unicode('GraphView', sync=True)
_message = Unicode(sync=True)
_graph_url = Unicode(sync=True)
_new_url = Unicode(sync=True)
_filename = ''
_flags = {
'save_pending': False
}
# TODO: URL for offline enterprise
def __init__(self, graph_url='https://plot.ly/~playground/7', **kwargs):
"""Initialize a plotly graph widget
Args:
graph_url: The url of a Plotly graph
Example:
```
GraphWidget('https://plot.ly/~chris/3375')
```
"""
super(GraphWidget, self).__init__(**kwargs)
# TODO: Validate graph_url
self._graph_url = graph_url
self._listener_set = set()
self._event_handlers = {
'click': widgets.CallbackDispatcher(),
'hover': widgets.CallbackDispatcher(),
'zoom': widgets.CallbackDispatcher()
}
self._graphId = ''
self.on_msg(self._handle_msg)
# messages to the iframe client need to wait for the
# iframe to communicate that it is ready
# unfortunately, this two-way blocking communication
# isn't possible
# (https://github.com/ipython/ipython/wiki/IPEP-21:-Widget-Messages#caveats)
# so we'll just cue up messages until they're ready to be sent
self._clientMessages = deque()
@property
def url(self):
return self._new_url or ''
def _handle_msg(self, message):
"""Handle a msg from the front-end.
Args:
content (dict): Content of the msg.
"""
content = message['content']['data']['content']
if content.get('event', '') == 'pong':
self._graphId = content['graphId']
# ready to recieve - pop out all of the items in the deque
while self._clientMessages:
_message = self._clientMessages.popleft()
_message['graphId'] = self._graphId
_message = _json.dumps(_message)
self._message = _message
if content.get('event', '') in ['click', 'hover', 'zoom']:
# De-nest the message
if content['event'] == 'click' or content['event'] == 'hover':
message = content['message']['points']
elif content['event'] == 'zoom':
message = content['message']['ranges']
self._event_handlers[content['event']](self, message)
if content.get('event', '') == 'getAttributes':
self._attributes = content.get('response', {})
# there might be a save pending, use the plotly module to save
if self._flags['save_pending']:
self._flags['save_pending'] = False
url = py.plot(self._attributes, auto_open=False,
filename=self._filename, validate=False)
self._new_url = url
self._fade_to('slow', 1)
def _handle_registration(self, event_type, callback, remove):
self._event_handlers[event_type].register_callback(callback,
remove=remove)
event_callbacks = self._event_handlers[event_type].callbacks
if (len(event_callbacks) and event_type not in self._listener_set):
self._listener_set.add(event_type)
message = {'task': 'listen', 'events': list(self._listener_set)}
self._handle_outgoing_message(message)
def _handle_outgoing_message(self, message):
if self._graphId == '':
self._clientMessages.append(message)
else:
message['graphId'] = self._graphId
message['uid'] = str(uuid.uuid4())
self._message = _json.dumps(message, cls=utils.PlotlyJSONEncoder)
def on_click(self, callback, remove=False):
""" Assign a callback to click events propagated
by clicking on point(s) in the Plotly graph.
Args:
callback (function): Callback function this is called
on click events with the signature:
callback(widget, hover_obj) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
click_obj (dict): a nested dict that describes
which point(s) were clicked on.
click_obj example:
[
{
'curveNumber': 1,
'pointNumber': 2,
'x': 4,
'y': 14
}
]
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, msg):
display(widget._graph_url)
display(msg)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_click(message_handler)
```
"""
self._handle_registration('click', callback, remove)
def on_hover(self, callback, remove=False):
""" Assign a callback to hover events propagated
by hovering over points in the Plotly graph.
Args:
callback (function): Callback function this is called
on hover events with the signature:
callback(widget, hover_obj) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
hover_obj (dict): a nested dict that describes
which point(s) was hovered over.
hover_obj example:
[
{
'curveNumber': 1,
'pointNumber': 2,
'x': 4,
'y': 14
}
]
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, hover_msg):
display(widget._graph_url)
display(hover_msg)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_hover(message_handler)
```
"""
self._handle_registration('hover', callback, remove)
def on_zoom(self, callback, remove=False):
""" Assign a callback to zoom events propagated
by zooming in regions in the Plotly graph.
Args:
callback (function): Callback function this is called
on zoom events with the signature:
callback(widget, ranges) -> None
Args:
widget (GraphWidget): The current instance
of the graph widget that this callback is assigned to.
ranges (dict): A description of the
region that was zoomed into.
ranges example:
{
'x': [1.8399058038561549, 2.16443359662],
'y': [4.640902872777017, 7.855677154582]
}
remove (bool, optional): If False, attach the callback.
If True, remove the callback. Defaults to False.
Returns:
None
Example:
```
from IPython.display import display
def message_handler(widget, ranges):
display(widget._graph_url)
display(ranges)
g = GraphWidget('https://plot.ly/~chris/3375')
display(g)
g.on_zoom(message_handler)
```
"""
self._handle_registration('zoom', callback, remove)
def plot(self, figure_or_data, validate=True):
"""Plot figure_or_data in the Plotly graph widget.
Args:
figure_or_data (dict, list, or plotly.graph_obj object):
The standard Plotly graph object that describes Plotly
graphs as used in `plotly.plotly.plot`. See examples
of the figure_or_data in https://plot.ly/python/
Returns: None
Example 1 - Graph a scatter plot:
```
from plotly.graph_objs import Scatter
g = GraphWidget()
g.plot([Scatter(x=[1, 2, 3], y=[10, 15, 13])])
```
Example 2 - Graph a scatter plot with a title:
```
from plotly.graph_objs import Scatter, Figure, Data
fig = Figure(
data = Data([
Scatter(x=[1, 2, 3], y=[20, 15, 13])
]),
layout = Layout(title='Experimental Data')
)
g = GraphWidget()
g.plot(fig)
```
Example 3 - Clear a graph widget
```
from plotly.graph_objs import Scatter, Figure
g = GraphWidget()
g.plot([Scatter(x=[1, 2, 3], y=[10, 15, 13])])
# Now clear it
g.plot({}) # alternatively, g.plot(Figure())
```
"""
if figure_or_data == {} or figure_or_data == Figure():
validate = False
figure = tools.return_figure_from_figure_or_data(figure_or_data,
validate)
message = {
'task': 'newPlot',
'data': figure.get('data', []),
'layout': figure.get('layout', {}),
'graphId': self._graphId
}
self._handle_outgoing_message(message)
def restyle(self, update, indices=None):
"""Update the style of existing traces in the Plotly graph.
Args:
update (dict):
dict where keys are the graph attribute strings
and values are the value of the graph attribute.
To update graph objects that are nested, like
a marker's color, combine the keys with a period,
e.g. `marker.color`. To replace an entire nested object,
like `marker`, set the value to the object.
See Example 2 below.
To update an attribute of multiple traces, set the
value to an list of values. If the list is shorter
than the number of traces, the values will wrap around.
Note: this means that for values that are naturally an array,
like `x` or `colorscale`, you need to wrap the value
in an extra array,
i.e. {'colorscale': [[[0, 'red'], [1, 'green']]]}
You can also supply values to different traces with the
indices argument.
See all of the graph attributes in our reference documentation
here: https://plot.ly/python/reference or by calling `help` on
graph objects in `plotly.graph_objs`.
indices (list, optional):
Specify which traces to apply the update dict to.
Negative indices are supported.
If indices are not given, the update will apply to
*all* traces.
Examples:
Initialization - Start each example below with this setup:
```
from plotly.widgets import GraphWidget
from IPython.display import display
graph = GraphWidget()
display(graph)
```
Example 1 - Set `marker.color` to red in every trace in the graph
```
graph.restyle({'marker.color': 'red'})
```
Example 2 - Replace `marker` with {'color': 'red'}
```
graph.restyle({'marker': {'color': red'}})
```
Example 3 - Set `marker.color` to red
in the first trace of the graph
```
graph.restyle({'marker.color': 'red'}, indices=[0])
```
Example 4 - Set `marker.color` of all of the traces to
alternating sequences of red and green
```
graph.restyle({'marker.color': ['red', 'green']})
```
Example 5 - Set just `marker.color` of the first two traces
to red and green
```
graph.restyle({'marker.color': ['red', 'green']}, indices=[0, 1])
```
Example 6 - Set multiple attributes of all of the traces
```
graph.restyle({
'marker.color': 'red',
'line.color': 'green'
})
```
Example 7 - Update the data of the first trace
```
graph.restyle({
'x': [[1, 2, 3]],
'y': [[10, 20, 30]],
}, indices=[0])
```
Example 8 - Update the data of the first two traces
```
graph.restyle({
'x': [[1, 2, 3],
[1, 2, 4]],
'y': [[10, 20, 30],
[5, 8, 14]],
}, indices=[0, 1])
```
"""
# TODO: Add flat | |
<filename>instructor/real_data/catgan_instructor.py
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : catgan_instructor.py
# @Time : Created at 2019-07-23
# @Blog : http://zhiweil.ml/
# @Description : CatGAN for category text generation
# Copyrights (C) 2018. All Rights Reserved.
import copy
import numpy as np
import random
import torch
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from metrics.nll import NLL
from models.CatGAN_D import CatGAN_D, CatGAN_C
from models.CatGAN_G import CatGAN_G
from utils.cat_data_loader import CatGenDataIter
from utils.data_loader import GenDataIter
from utils.gan_loss import GANLoss
from utils.helpers import get_fixed_temperature
from utils.text_process import tensor_to_tokens, write_tokens
class CatGANInstructor(BasicInstructor):
def __init__(self, opt):
super(CatGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = CatGAN_G(cfg.k_label, cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim,
cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.parents = [CatGAN_G(cfg.k_label, cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim,
cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx,
gpu=cfg.CUDA).state_dict()
for _ in range(cfg.n_parent)] # list of Generator state_dict
self.dis = CatGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size,
cfg.padding_idx, gpu=cfg.CUDA)
self.clas = CatGAN_C(cfg.k_label, cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.extend_vocab_size,
cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.clas_opt = optim.Adam(self.clas.parameters(), lr=cfg.clas_lr)
self.parent_mle_opts = [copy.deepcopy(self.gen_opt.state_dict())
for _ in range(cfg.n_parent)]
self.parent_adv_opts = [copy.deepcopy(self.gen_adv_opt.state_dict())
for _ in range(cfg.n_parent)] # list of optimizer state dict
# Criterion
self.G_criterion = [GANLoss(loss_mode, 'G', cfg.d_type, CUDA=cfg.CUDA) for loss_mode in cfg.mu_type.split()]
self.D_criterion = GANLoss(cfg.loss_type, 'D', cfg.d_type, CUDA=cfg.CUDA)
# DataLoader
self.all_train_data = CatGenDataIter(self.train_samples_list)
# Metrics
self.all_metrics.append(self.clas_acc)
def init_model(self):
if cfg.gen_pretrain:
for i in range(cfg.n_parent):
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path + '%d' % i))
self.parents[i] = torch.load(cfg.pretrained_gen_path + '%d' % 0, map_location='cpu')
if cfg.CUDA:
self.gen = self.gen.cuda()
if cfg.multi_gpu:
self.dis = torch.nn.parallel.DataParallel(self.dis, device_ids=cfg.devices)
self.dis = self.dis.cuda()
self.clas = self.clas.cuda()
def load_gen(self, parent, parent_opt, mle=False):
self.gen.load_state_dict(copy.deepcopy(parent))
if mle:
self.gen_opt.load_state_dict(copy.deepcopy(parent_opt))
self.gen_opt.zero_grad()
else:
self.gen_adv_opt.load_state_dict(copy.deepcopy(parent_opt))
self.gen_adv_opt.zero_grad()
def _run(self):
# ===Pre-train Classifier with real data===
if cfg.use_clas_acc:
self.log.info('Start training Classifier...')
self.train_classifier(cfg.PRE_clas_epoch)
# ===Pre-train Generator===
if not cfg.gen_pretrain:
for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_mle_opts)):
self.log.info('Starting Generator-{} MLE Training...'.format(i))
self.load_gen(parent, parent_opt, mle=True) # load state dict
self.pretrain_generator(cfg.MLE_train_epoch)
self.parents[i] = copy.deepcopy(self.gen.state_dict()) # save state dict
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path + '%d' % i)
self.log.info('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path + '%d' % i))
# ===Adv-train===
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
if cfg.temperature == 1:
score, fit_score, select_mu = self.evolve_generator(cfg.ADV_g_step)
else: # evolve with temperature
score, fit_score, select_mu = self.evolve_generator_with_temp(adv_epoch, cfg.ADV_g_step)
d_loss = self.evolve_discriminator(cfg.ADV_d_step)
best_id = int(np.argmax(score))
progress.set_description('mu: %s, d_loss = %.4f, temp = %.4f' % (
' '.join(select_mu), d_loss, self.parents[best_id]['temperature'].item()))
# ===Test===
if adv_epoch % cfg.adv_log_step == 0 or adv_epoch == cfg.ADV_train_epoch - 1:
best_id = int(np.argmax(score))
self.load_gen(self.parents[best_id], self.parent_adv_opts[best_id])
self.log.info('[ADV] epoch %d: temp = %.4f, d_loss: %.4f, %s' % (
adv_epoch, self.gen.temperature.item(), d_loss, self.comb_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
for label_i in range(cfg.k_label):
self._save('ADV', adv_epoch, label_i)
def _test(self):
self.log.debug('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.all_train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (
epoch, pre_loss, self.comb_metrics(fmt_str=True)))
if not cfg.if_test and cfg.if_save:
for label_i in range(cfg.k_label):
self._save('MLE', epoch, label_i)
def evolve_generator(self, evo_g_step):
# evaluation real data
self.prepare_eval_real_data()
best_score = np.zeros(cfg.n_parent)
best_fit = []
best_child = []
best_child_opt = []
best_fake_samples = []
selected_mutation = []
count = 0
# all child share the same real data output from Discriminator
with torch.no_grad():
real_samples = [F.one_hot(self.train_data_list[i].random_batch()['target'], cfg.vocab_size).float()
for i in range(cfg.k_label)]
if cfg.CUDA:
real_samples = [real_samples[i].cuda() for i in range(cfg.k_label)]
self.d_out_real = [self.dis(real_samples[i]) for i in range(cfg.k_label)] # d_out_real for each label
for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_adv_opts)):
for j, criterionG in enumerate(self.G_criterion):
# Variation
self.load_gen(parent, parent_opt) # load state dict to self.gen
self.variation(evo_g_step, criterionG)
# Evaluation
self.prepare_eval_fake_data() # evaluation fake data
Fq, Fd, score = self.evaluation(cfg.eval_type)
# Selection
if count < cfg.n_parent:
best_score[count] = score
best_fit.append([Fq, Fd, score])
best_child.append(copy.deepcopy(self.gen.state_dict()))
best_child_opt.append(copy.deepcopy(self.gen_adv_opt.state_dict()))
best_fake_samples.append(self.eval_fake_samples)
selected_mutation.append(criterionG.loss_mode)
else: # larger than previous child, replace it
fit_com = score - best_score
if max(fit_com) > 0:
id_replace = np.where(fit_com == max(fit_com))[0][0]
best_score[id_replace] = score
best_fit[id_replace] = [Fq, Fd, score]
best_child[id_replace] = copy.deepcopy(self.gen.state_dict())
best_child_opt[id_replace] = copy.deepcopy(self.gen_adv_opt.state_dict())
best_fake_samples[id_replace] = self.eval_fake_samples
selected_mutation[id_replace] = criterionG.loss_mode
count += 1
self.parents = copy.deepcopy(best_child)
self.parent_adv_opts = copy.deepcopy(best_child_opt)
self.best_fake_samples = best_fake_samples
return best_score, np.array(best_fit), selected_mutation
def evolve_generator_with_temp(self, cur_adv_step, evo_g_step):
# evaluation real data
self.prepare_eval_real_data()
best_score = np.zeros(cfg.n_parent)
best_fit = []
best_child = []
best_child_opt = []
best_fake_samples = []
selected_mutation = []
count = 0
# all children share the same real data output from Discriminator
with torch.no_grad():
real_samples = [F.one_hot(self.train_data_list[i].random_batch()['target'], cfg.vocab_size).float()
for i in range(cfg.k_label)]
if cfg.CUDA:
real_samples = [real_samples[i].cuda() for i in range(cfg.k_label)]
self.d_out_real = [self.dis(real_samples[i]) for i in range(cfg.k_label)] # d_out_real for each label
for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_adv_opts)):
for j, criterionG in enumerate(self.G_criterion):
all_temp = self.get_evo_temp(cur_adv_step)
temp_score = float('-inf')
temp_fit = None
temp_child = None
temp_child_opt = None
temp_fake_samples = None
# Selection based on temperature, use eval_type=nll
for temp in all_temp:
# Variation
self.load_gen(parent, parent_opt) # load state dict to self.gen
self.gen.temperature.data = temp
self.variation(evo_g_step, criterionG)
# Evaluation
self.prepare_eval_fake_data() # evaluation fake data
_, _, t_score = self.evaluation('Ra') # for temp evolutionary
loss_Fq, loss_Fd, loss_score = self.evaluation(cfg.eval_type) # for loss evolutionary
if t_score > temp_score:
temp_score = loss_score
temp_fit = [loss_Fq, loss_Fd, loss_score]
temp_child = copy.deepcopy(self.gen.state_dict())
temp_child_opt = copy.deepcopy(self.gen_adv_opt.state_dict())
temp_fake_samples = copy.deepcopy(self.eval_fake_samples)
# Selection based on mu_type, use eval_type=cfg.eval_type
if count < cfg.n_parent:
best_score[count] = temp_score
best_fit.append(temp_fit)
best_child.append(temp_child)
best_child_opt.append(temp_child_opt)
best_fake_samples.append(temp_fake_samples)
selected_mutation.append(criterionG.loss_mode)
else: # larger than previous child, replace it
fit_com = temp_score - best_score
if max(fit_com) > 0:
id_replace = np.where(fit_com == max(fit_com))[0][0]
best_score[id_replace] = temp_score
best_fit[id_replace] = temp_fit
best_child[id_replace] = temp_child
best_child_opt[id_replace] = temp_child_opt
best_fake_samples[id_replace] = temp_fake_samples
selected_mutation[id_replace] = criterionG.loss_mode
count += 1
self.parents = copy.deepcopy(best_child)
self.parent_adv_opts = copy.deepcopy(best_child_opt)
self.best_fake_samples = best_fake_samples
return best_score, np.array(best_fit), selected_mutation
def evolve_discriminator(self, evo_d_step):
global dc_loss, dd_loss, d_loss
total_loss = []
all_gen_samples_list = list(map(self.merge, *self.best_fake_samples)) # merge each label of data
self.all_gen_samples_list = self.shuffle_eval_samples(all_gen_samples_list) # shuffle data
for step in range(evo_d_step):
dis_real_samples, dis_gen_samples = self.prepare_train_data('D', step)
d_loss = 0
all_d_out_real = []
all_d_out_fake = []
for (real_samples, fake_samples) in zip(dis_real_samples, dis_gen_samples): # for each label samples
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(fake_samples)
d_loss += self.D_criterion(d_out_real, d_out_fake)
all_d_out_real.append(d_out_real.view(cfg.batch_size, -1))
all_d_out_fake.append(d_out_fake.view(cfg.batch_size, -1))
if cfg.use_all_real_fake:
all_d_out_real = torch.cat(all_d_out_real, dim=0)
all_d_out_fake = torch.cat(all_d_out_fake, dim=0)
all_d_out_real = all_d_out_real[torch.randperm(all_d_out_real.size(0))]
all_d_out_fake = all_d_out_fake[torch.randperm(all_d_out_fake.size(0))]
d_loss += self.D_criterion(all_d_out_real, all_d_out_fake)
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss.append(d_loss.item())
if evo_d_step == 0:
return 0
return np.mean(total_loss)
def variation(self, g_step, criterionG):
"""Optimize one child (Generator)"""
total_loss = []
for step in range(g_step):
dis_real_samples, dis_gen_samples = self.prepare_train_data('G')
# ===Train===
g_loss = 0
all_d_out_real = []
all_d_out_fake = []
# for i, (real_samples, fake_samples) in enumerate(zip(dis_real_samples, dis_gen_samples)):
for i, (d_out_real, fake_samples) in enumerate(zip(self.d_out_real, dis_gen_samples)): # share real
# d_out_real = self.dis(real_samples)
d_out_fake = self.dis(fake_samples)
g_loss += criterionG(d_out_real, d_out_fake)
all_d_out_real.append(d_out_real.view(cfg.batch_size, -1))
all_d_out_fake.append(d_out_fake.view(cfg.batch_size, -1))
if cfg.use_all_real_fake:
all_d_out_real = torch.cat(all_d_out_real, dim=0)
all_d_out_fake = torch.cat(all_d_out_fake, dim=0)
all_d_out_real = all_d_out_real[torch.randperm(all_d_out_real.size(0))]
all_d_out_fake = all_d_out_fake[torch.randperm(all_d_out_fake.size(0))]
g_loss += criterionG(all_d_out_real, all_d_out_fake)
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss.append(g_loss.item())
if g_step == 0:
return 0
return np.mean(total_loss)
def evaluation(self, eval_type):
"""Evaluation all children, update child score. Note that the eval data should be the same"""
eval_samples = [self.gen.sample(cfg.eval_b_num * cfg.batch_size, cfg.max_bn * cfg.batch_size, label_i=i) for i
in range(cfg.k_label)]
# Fd
if cfg.lambda_fd != 0:
nll_div = []
for label_i in range(cfg.k_label):
gen_data = GenDataIter(eval_samples[label_i])
nll_div.append(NLL.cal_nll_with_label(self.gen, gen_data.loader, label_i, self.mle_criterion))
Fd = sum(nll_div)
else:
Fd = 0
# Fq
if 'bleu' in eval_type:
bleu_score = []
for i in range(cfg.k_label):
bleu_score.append(self.bleu[i].get_score(given_gram=int(eval_type[-1])))
Fq = sum(bleu_score)
elif 'Ra' in eval_type:
g_loss = 0
for i in range(cfg.k_label):
g_loss += torch.sigmoid(self.eval_d_out_fake[i] - torch.mean(self.eval_d_out_real[i])).sum()
Fq = g_loss.item()
else:
raise NotImplementedError("Evaluation '%s' is not implemented" % eval_type)
score = cfg.lambda_fq * Fq + cfg.lambda_fd * Fd
return Fq, Fd, score
def train_gen_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
for i, data in enumerate(data_loader):
inp, target, label = data['input'], data['target'], data['label']
if cfg.CUDA:
inp, target, label = inp.cuda(), target.cuda(), label.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden, label)
loss = criterion(pred, target.view(-1))
self.optimize(optimizer, loss, model)
total_loss | |
<filename>atropos/commands/trim/modifiers.py<gh_stars>0
# coding: utf-8
"""This module implements all the read modifications that atropos supports.
A modifier must be callable. It is implemented as a function if no parameters
need to be stored, and as a class with a __call__ method if there are parameters
(or statistics).
"""
from collections import OrderedDict
import copy
import re
from atropos import AtroposError
from atropos.align import (
Aligner, InsertAligner, SEMIGLOBAL, START_WITHIN_SEQ1, STOP_WITHIN_SEQ2)
from atropos.util import (
BASE_COMPLEMENTS, reverse_complement, mean, quals2ints)
from .qualtrim import quality_trim_index, nextseq_trim_index
# Base classes
class Modifier(object):
"""Base clas for modifiers.
"""
@property
def name(self):
"""Modifier name.
"""
return self.__class__.__name__
@property
def description(self):
"""Modifier description (for display).
"""
return getattr(self, 'display_str', self.name)
def summarize(self):
"""Returns a summary of the modifier's activity as a dict.
"""
return {}
class ReadPairModifier(Modifier):
"""Base class of modifiers that edit a pair of reads simultaneously.
"""
def __call__(self, read1, read2):
raise NotImplementedError()
class Trimmer(Modifier):
"""Base class of modifiers that trim bases from reads.
"""
def __init__(self):
self.trimmed_bases = 0
def __call__(self, read):
raise NotImplementedError()
def subseq(self, read, begin=0, end=None):
"""Returns a subsequence of a read.
Args:
read: The read to trim.
begin: The first base of the subsequnce.
end: The last base of the subsequence, or None for len(read).
"""
if begin or (end is not None):
front_bases, back_bases, new_read = read.subseq(begin, end)
self.trimmed_bases += front_bases + back_bases
return new_read
else:
return read
def clip(self, read, front=0, back=0):
"""Returns a read with bases trimmed off the front/back.
Args:
read: The read to trim.
front: The number of bases to trim from the front.
back: The (negative) number of bases to trim from the back.
"""
if (front or back) and len(read) > 0:
front_bases, back_bases, new_read = read.clip(front, back)
self.trimmed_bases += front_bases + back_bases
return new_read
else:
return read
def summarize(self):
"""Returns a summary dict.
"""
return dict(bp_trimmed=self.trimmed_bases)
# Modifiers
class AdapterCutter(Modifier):
"""Repeatedly find one of multiple adapters in reads. The number of times
the search is repeated is specified by the times parameter.
Args:
adapters: List of Adapter objects.
times: Number of times to trim.
action: What to do with a found adapter: None, 'trim', or 'mask'
"""
def __init__(self, adapters=None, times=1, action='trim'):
super(AdapterCutter, self).__init__()
self.adapters = adapters or []
self.times = times
self.action = action
self.with_adapters = 0
def _best_match(self, read):
"""Find the best matching adapter in the given read.
Returns:
Either a Match instance or None if there are no matches.
"""
best = None
for adapter in self.adapters:
match = adapter.match_to(read)
if match is None:
continue
# the no. of matches determines which adapter fits best
if best is None or match.matches > best.matches:
best = match
return best
def __call__(self, read):
"""Determine the adapter that best matches the given read.
Since the best adapter is searched repeatedly, a list
of Match instances is returned, which
need to be applied consecutively to the read.
The list is empty if there are no adapter matches.
The read is converted to uppercase before it is compared to the adapter
sequences.
Cut found adapters from a single read. Return modified read.
"""
if len(read) == 0:
return read
matches = []
# try at most self.times times to remove an adapter
trimmed_read = read
for _ in range(self.times):
match = self._best_match(trimmed_read)
if match is None:
# nothing found
break
matches.append(match)
trimmed_read = match.adapter.trimmed(match)
if not matches:
trimmed_read.match = None
trimmed_read.match_info = None
return trimmed_read
if __debug__:
assert len(trimmed_read) < len(read), \
"Trimmed read isn't shorter than original"
if self.action == 'trim':
# read is already trimmed, nothing to do
pass
elif self.action == 'mask':
# add N from last modification
masked_sequence = trimmed_read.sequence
for match in sorted(matches, reverse=True, key=lambda m: m.astart):
nstr = 'N' * (
len(match.read.sequence) -
len(match.adapter.trimmed(match).sequence))
# add N depending on match position
if match.front:
masked_sequence = nstr + masked_sequence
else:
masked_sequence += nstr
# set masked sequence as sequence with original quality
trimmed_read.sequence = masked_sequence
trimmed_read.qualities = matches[0].read.qualities
assert len(trimmed_read.sequence) == len(read)
elif self.action is None:
trimmed_read = read
trimmed_read.match = matches[-1]
trimmed_read.match_info = [match.get_info_record() for match in matches]
self.with_adapters += 1
return trimmed_read
def summarize(self):
adapters_summary = OrderedDict()
for adapter in self.adapters:
adapters_summary[adapter.name] = adapter.summarize()
return dict(
records_with_adapters=self.with_adapters,
adapters=adapters_summary)
# Other error correction approaches:
# https://www.ncbi.nlm.nih.gov/pubmed/25161220
# https://github.com/Malfoy/BCOOL
class ErrorCorrectorMixin(object):
"""Provides a method for error correction.
Args:
mismatch_action: The action to take when a mismatch between the
overlapping portions of read1 and read2 is encountered. Valid
values are 'liberal', 'conservative', 'N'.
min_qual_difference: When mismatch_action=='conservative', the minimum
difference in base quality between read1 and read2 required to
perform the correction.
"""
def __init__(self, mismatch_action=None, min_qual_difference=1):
self.mismatch_action = mismatch_action
self.r1r2_min_qual_difference = min_qual_difference
self.r2r1_min_qual_difference = -1 * min_qual_difference
self.corrected_pairs = 0
self.corrected_bp = [0, 0]
def correct_errors(self, read1, read2, insert_match, truncate_seqs=False):
"""Correct errors in overlapping reads.
Args:
read1: The read1 to correct.
read2: The read2 to correct.
insert_match: The match info telling where the reads overlap.
truncate_seqs: Whether to truncate the sequences to equal size
before correcting. This is necessary when the insert match is
based on truncated sequences (e.g. when it was generated by
InsertAligner).
"""
# Do not attempt to correct an already corrected read
if read1.corrected > 0 or read2.corrected > 0:
return
# read2 reverse-complement is the reference, read1 is the query
r1_seq = list(read1.sequence)
r2_seq = list(read2.sequence)
len1 = len(r1_seq)
len2 = len(r2_seq)
has_quals = read1.qualities and read2.qualities
if has_quals:
r1_qual = list(read1.qualities)
r2_qual = list(read2.qualities)
elif self.mismatch_action in ('liberal', 'conservative'):
raise ValueError(
"Cannot perform quality-based error correction on reads "
"lacking quality information")
if truncate_seqs:
if len1 > len2:
r1_seq = r1_seq[:len2]
if has_quals:
r1_qual = r1_qual[:len2]
elif len2 > len1:
r2_seq = r2_seq[:len1]
if has_quals:
r2_qual = r2_qual[:len1]
len2 = len1
r1_start = insert_match[2]
r1_end = insert_match[3]
r1_changed = 0
r2_start = len2 - insert_match[1]
r2_end = len2 - insert_match[0]
r2_changed = 0
quals_equal = []
for i, j in zip(
range(r1_start, r1_end), range(r2_end - 1, r2_start - 1, -1)):
base1 = r1_seq[i]
base2 = BASE_COMPLEMENTS[r2_seq[j]]
if base1 == base2:
continue
if self.mismatch_action == 'N':
r1_seq[i] = 'N'
r2_seq[j] = 'N'
r1_changed += 1
r2_changed += 1
elif base1 == 'N':
r1_seq[i] = base2
if has_quals:
r1_qual[i] = r2_qual[j]
r1_changed += 1
elif base2 == 'N':
r2_seq[j] = BASE_COMPLEMENTS[base1]
if has_quals:
r2_qual[j] = r1_qual[i]
r2_changed += 1
elif has_quals:
diff = ord(r1_qual[i]) - ord(r2_qual[j])
if diff >= self.r1r2_min_qual_difference:
r2_seq[j] = BASE_COMPLEMENTS[base1]
r2_qual[j] = r1_qual[i]
r2_changed += 1
elif diff <= self.r2r1_min_qual_difference:
r1_seq[i] = base2
r1_qual[i] = r2_qual[j]
r1_changed += 1
elif self.mismatch_action == 'liberal':
quals_equal.append((i, j, base1, base2))
if quals_equal:
mean_qual1 = mean([ord(b) for b in r1_qual[r1_start:r1_end]])
mean_qual2 = mean([ord(b) for b in r2_qual[r2_start:r2_end]])
# Only make the corrections if one read is significantly better
# than the other.
# TODO: this method of determining whether one read is better
# than the other is crude - come up with something better.
diff = mean_qual1 - mean_qual2
if diff > 1:
# read1 is better than read2
for i, j, base1, base2 in quals_equal:
r2_seq[j] = BASE_COMPLEMENTS[base1]
r2_qual[j] = r1_qual[i]
r2_changed += 1
elif diff < -1:
# read2 is better than read1
for i, j, base1, base2 in quals_equal:
r1_seq[i] = base2
r1_qual[i] = r2_qual[j]
r1_changed += 1
if r1_changed or r2_changed:
self.corrected_pairs += 1
def update_read(read, seq, qual, seq_len, read_num, num_changed):
self.corrected_bp[read_num] += num_changed
read.corrected = num_changed
new_seq = ''.join(seq)
partial = truncate_seqs and len(read.sequence) > seq_len
if partial:
read.sequence = new_seq + read.sequence[seq_len:]
else:
read.sequence = new_seq
if has_quals:
new_qual = ''.join(qual)
if partial:
read.qualities = new_qual + read.qualities[seq_len:]
else:
read.qualities = new_qual
if r1_changed:
update_read(
read1, r1_seq, r1_qual if has_quals else None, len1, 0,
r1_changed)
if r2_changed:
update_read(
read2, r2_seq, r2_qual if has_quals else None, len2, 1,
r2_changed)
def summarize(self):
"""Returns a summary dict.
"""
| |
"""
CCT 建模优化代码
GPU CUDA 加速 cctpy 束流跟踪
注意测试代码中的 ga32 和 ga64 定义为
ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32)
ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64,block_dim_x=512)
2021年6月17日 增加 CPU 模式
作者:赵润晓
日期:2021年5月4日
"""
# 是否采用 CPU 模式运行
from packages.beamline import Beamline
from packages.cct import CCT
from packages.magnets import *
from packages.particles import *
from packages.trajectory import Trajectory
from packages.line2s import *
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.base_utils import BaseUtils
from packages.constants import *
from packages.point import *
import warnings # since v0.1.1 提醒方法过时
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import numpy
import os # since v0.1.1 查看CPU核心数
import sys
import random # since v0.1.1 随机数
import math
import matplotlib.pyplot as plt
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import time # since v0.1.1 统计计算时长
import multiprocessing # since v0.1.1 多线程计算
__CPU_MODE__: bool = False
try:
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ModuleNotFoundError as e:
__CPU_MODE__ = True
class GPU_ACCELERATOR:
# CUDA 浮点数类型,可以选择 32 位和 64 位浮点数,前者计算速度快,精度较低
# 使用 32 位浮点数计算时,典型误差为 0.05 mm 和 0.01 mrad
FLOAT32: str = "FLOAT32"
FLOAT64: str = "FLOAT64"
QS_DATA_LENGTH = 16
def __init__(self,
float_number_type: str = FLOAT32,
block_dim_x: int = 1024,
# 电流元数目最多 2000*120,如果 120 段 1匝,匝最多2000匝
max_current_element_number: int = 2000*120,
max_qs_datas_length: int = 10000, # 最多 10000 个 qs
cpu_mode: bool = False
) -> None:
"""
启动一个 GPU 加速器,用于加速 cctpy 束线的粒子跟踪
还有一些其他功能,效率不高,仅用作测试
float_number_type 浮点数类型,取值为 FLOAT32 或 FLOAT64,即 32 位运行或 64 位,默认 32 位。
64 位浮点数精度更高,但是计算的速度可能比 32 位慢 2-10 倍
block_dim_x 块线程数目,默认 1024 个,必须是 2 的幂次。如果采用 64 位浮点数,取 1024 可能会报错,应取 512 或更低
不同大小的 block_dim_x,可能对计算效率有影响
在抽象上,GPU 分为若干线程块,每个块内有若干线程
块内线程,可以使用 __shared__ 使用共享内存(访问速度快),同时具有同步机制,因此可以方便的分工合作
块之间,没有同步机制,所以线程通讯无从谈起
max_current_element_number 最大电流元数目,在 GPU 加速中,CCT 数据以电流元的形式传入显存。
默认值 2000*120 (可以看作一共 2000 匝,每匝分 120 段)
max_qs_datas_length 最大 qs 磁铁数目,默认为 10000,取这么大考虑到切片
cpu_mode 采用 CPU 模式运行
"""
self.float_number_type = float_number_type
self.max_current_element_number = int(max_current_element_number)
self.max_qs_datas_length = int(max_qs_datas_length)
self.cpu_mode:bool = __CPU_MODE__ or cpu_mode # 只要两者中有一个为 True 则采用 cpu 模式
if __CPU_MODE__:
print("未安装 pycuda,GPU 加速功能将以 CPU 模式运行")
elif self.cpu_mode:
print("GPU 加速功能将以 CPU 模式运行")
# 检查 block_dim_x 合法性
if block_dim_x > 1024 or block_dim_x < 0:
raise ValueError(
f"block_dim_x 应 >=1 and <=1024 内取,不能是{block_dim_x}")
if block_dim_x & (block_dim_x-1) != 0:
raise ValueError(f"block_dim_x 应该取 2 的幂次,不能为{block_dim_x}")
self.block_dim_x: int = int(block_dim_x)
# 头文件导入
cuda_code_00_include = """
// 只导入 stdio,用于标准输出 printf() 函数
// CUDA 中的一些内置函数和方法,无需导入
#include <stdio.h>
"""
# 定义浮点类型
cuda_code_01_float_type_define: str = None
if float_number_type == GPU_ACCELERATOR.FLOAT32:
cuda_code_01_float_type_define = """
// 定义为 32 位浮点数模式
#define FLOAT32
"""
self.numpy_dtype = numpy.float32
elif float_number_type == GPU_ACCELERATOR.FLOAT64:
cuda_code_01_float_type_define = """
// 定义为 64 位浮点数模式
#define FLOAT64
"""
self.numpy_dtype = numpy.float64
if self.block_dim_x > 512:
print(f"当前 GPU 设置为 64 位模式,块线程数({self.block_dim_x})可能过多,内核可能无法启动\n" +
"典型异常为 pycuda._driver.LaunchError: cuLaunchKernel failed: too many resources requested for launch\n" +
"遇到此情况,可酌情调小块线程数")
else:
raise ValueError(
"float_number_type 必须是 GPU_ACCELERATOR.FLOAT32 或 GPU_ACCELERATOR.FLOAT64")
# 宏定义
# CUDA 代码和 C 语言几乎一模一样。只要有 C/C++ 基础,就能看懂 CUDA 代码
cuda_code_02_define = """
// 根据定义的浮点数模式,将 FLOAT 宏替换为 float 或 double
#ifdef FLOAT32
#define FLOAT float
#else
#define FLOAT double
#endif
// 维度 三维
#define DIM (3)
// 维度索引 0 1 2 表示 X Y Z,这样对一个数组取值,看起来清晰一些
#define X (0)
#define Y (1)
#define Z (2)
// 粒子参数索引 (px0, py1, pz2, vx3, vy4, vz5, rm6 相对质量, e7 电荷量, speed8 速率, distance9 运动距离)
#define PARTICLE_DIM (10)
#define PX (0)
#define PY (1)
#define PZ (2)
#define VX (3)
#define VY (4)
#define VZ (5)
#define RM (6)
#define E (7)
#define SPEED (8)
#define DISTANCE (9)
// 块线程数目
#define BLOCK_DIM_X ({block_dim_x})
#define QS_DATA_LENGTH (16)
#define MAX_CURRENT_ELEMENT_NUMBER ({max_current_element_number})
#define MAX_QS_DATAS_LENGTH ({max_qs_datas_length})
""".format(
block_dim_x=self.block_dim_x,
max_current_element_number=self.max_current_element_number,
max_qs_datas_length=self.max_qs_datas_length
)
# 向量运算内联函数
cuda_code_03_vct_functions = """
// 向量叉乘
// 传入 a b ret 三个数组,将 a × b 的结果传入 ret 中
// 仔细阅读具体实现,发现 ret 不能是 a 或者 b,这样会导致结果出错
__device__ __forceinline__ void vct_cross(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[Y] * b[Z] - a[Z] * b[Y];
ret[Y] = -a[X] * b[Z] + a[Z] * b[X];
ret[Z] = a[X] * b[Y] - a[Y] * b[X];
}
// 向量原地加法
// 传入两个数组 a_local 和 b,将 a_local + b 的结果放入 a_local 中
__device__ __forceinline__ void vct_add_local(FLOAT *a_local, FLOAT *b) {
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
// 向量原地加法
// 函数意义同上,但是完成的是 6 维加法
__device__ __forceinline__ void vct6_add_local(FLOAT *a_local, FLOAT *b) {
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
a_local[X+DIM] += b[X+DIM];
a_local[Y+DIM] += b[Y+DIM];
a_local[Z+DIM] += b[Z+DIM];
}
// 向量加法
// 传入 a b ret 三个数组,将 a + b 的结果传入 ret 中
__device__ __forceinline__ void vct_add(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] + b[X];
ret[Y] = a[Y] + b[Y];
ret[Z] = a[Z] + b[Z];
}
// 向量加法
// 函数意义同上,但是完成的是 6 维加法
__device__ __forceinline__ void vct6_add(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] + b[X];
ret[Y] = a[Y] + b[Y];
ret[Z] = a[Z] + b[Z];
ret[X+DIM] = a[X+DIM] + b[X+DIM];
ret[Y+DIM] = a[Y+DIM] + b[Y+DIM];
ret[Z+DIM] = a[Z+DIM] + b[Z+DIM];
}
// 向量*常数,原地操作
__device__ __forceinline__ void vct_dot_a_v(FLOAT a, FLOAT *v) {
v[X] *= a;
v[Y] *= a;
v[Z] *= a;
}
// 向量*常数,原地操作。六维
__device__ __forceinline__ void vct6_dot_a_v(FLOAT a, FLOAT *v) {
v[X] *= a;
v[Y] *= a;
v[Z] *= a;
v[X+DIM] *= a;
v[Y+DIM] *= a;
v[Z+DIM] *= a;
}
// 向量*常数。结果写入 ret 中
__device__ __forceinline__ void vct_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) {
ret[X] = v[X] * a;
ret[Y] = v[Y] * a;
ret[Z] = v[Z] * a;
}
// 向量*常数。六维。结果写入 ret 中
__device__ __forceinline__ void vct6_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) {
ret[X] = v[X] * a;
ret[Y] = v[Y] * a;
ret[Z] = v[Z] * a;
ret[X+DIM] = v[X+DIM] * a;
ret[Y+DIM] = v[Y+DIM] * a;
ret[Z+DIM] = v[Z+DIM] * a;
}
// 向量内积,直接返回标量值
__device__ __forceinline__ FLOAT vct_dot_v_v(FLOAT *v,FLOAT *w){
return v[X] * w[X] + v[Y] * w[Y] + v[Z] * w[Z];
}
// 向量拷贝赋值,源 src,宿 des
__device__ __forceinline__ void vct_copy(FLOAT *src, FLOAT *des) {
des[X] = src[X];
des[Y] = src[Y];
des[Z] = src[Z];
}
// 向量拷贝赋值,六维,源 src,宿 des
__device__ __forceinline__ void vct6_copy(FLOAT *src, FLOAT *des) {
des[X] = src[X];
des[Y] = src[Y];
des[Z] = src[Z];
des[X+DIM] = src[X+DIM];
des[Y+DIM] = src[Y+DIM];
des[Z+DIM] = src[Z+DIM];
}
// 求向量长度,直接返回计算结果
__device__ __forceinline__ FLOAT vct_len(FLOAT *v) {
// 根据 32 位还是 64 位有不同的实现
#ifdef FLOAT32
return sqrtf(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]);
#else
return sqrt(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]);
#endif
}
// 将矢量 v 置为 0
__device__ __forceinline__ void vct_zero(FLOAT *v) {
v[X] = 0.0;
v[Y] = 0.0;
v[Z] = 0.0;
}
// 打印矢量,一般用于 debug
__device__ __forceinline__ void vct_print(FLOAT *v) {
#ifdef FLOAT32
printf("%.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z]);
#else
printf("%.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z]);
#endif
}
// 打印六维矢量,一般用于 debug
__device__ __forceinline__ void vct6_print(FLOAT *v) {
#ifdef FLOAT32
printf("%.15f, %.15f, %.15f, %.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z], v[X+DIM], v[Y+DIM], v[Z+DIM]);
#else
printf("%.15lf, %.15lf, %.15lf, %.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z] ,v[X+DIM], v[Y+DIM], v[Z+DIM]);
#endif
}
// 矢量减法,结果放在 ret 中
__device__ __forceinline__ void vct_sub(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] - b[X];
ret[Y] = a[Y] - b[Y];
ret[Z] = a[Z] - b[Z];
}
"""
cuda_code_04_dB = """
// 计算电流元在 p 点产生的磁场
// 其中 p0 表示电流元的位置
// kl 含义见下
// 返回值放在 ret 中
//
// 原本电流元的计算公式如下:
// dB = (miu0/4pi) * Idl × r / (r^3)
// 其中 r = p - p0,p0 是电流元的位置
//
// 如果考虑极小一段电流(起点s0,终点s1)则产生的磁场为
// ΔB = (miu0/4pi) * I * (s1-s2)*r / (r^3)
// 同样的,r = p - p0,p0 = (s1+s2)/2
//
// 因为 (miu0/4pi) * I * (s1-s2) 整体已知,所以提前计算为 kl
// p0 提前已知,即 (s1+s2)/2,也提前给出
// 这样可以减少无意义的重复计算
//
// 补充:坐标均是全局坐标
__device__ __forceinline__ void dB(FLOAT *kl, FLOAT *p0, FLOAT *p, FLOAT *ret){
FLOAT r[DIM];
FLOAT rr;
vct_sub(p, p0, r); // r = p - p0
rr = vct_len(r); // rr = abs(r)
rr = rr*rr*rr; // rr = rr^3
vct_cross(kl, r, ret); // ret = kl × r
vct_dot_a_v(1.0/rr, ret); // ret = (kl × r)/(rr^3)
}
// 计算所有的电流元在 p 点产生的磁场
// number 表示电流元数目
// kls 每 DIM = 3 组表示一个 kl
// p0s 每 DIM = 3 组表示一个 p0
// shared_ret 是一个 shared 量,保存返回值
// 调用该方法后,应进行同步处理 __syncthreads();
__device__ void current_element_B(FLOAT *kls, FLOAT *p0s, int number, FLOAT *p, FLOAT *shared_ret){
int tid | |
<filename>heat/engine/resources/wait_condition.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from heat.common import exception
from heat.common import identifier
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import signal_responder
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class BaseWaitConditionHandle(signal_responder.SignalResponder):
'''
Base WaitConditionHandle resource.
The main point of this class is to :
- have no dependencies (so the instance can reference it)
- create credentials to allow for signalling from the instance.
- handle signals from the instance, validate and store result
'''
properties_schema = {}
WAIT_STATUSES = (
STATUS_FAILURE,
STATUS_SUCCESS,
) = (
'FAILURE',
'SUCCESS',
)
def handle_create(self):
super(BaseWaitConditionHandle, self).handle_create()
self.resource_id_set(self._get_user_id())
def _status_ok(self, status):
return status in self.WAIT_STATUSES
def _metadata_format_ok(self, metadata):
if sorted(tuple(metadata.keys())) == sorted(self.METADATA_KEYS):
return self._status_ok(metadata[self.STATUS])
def handle_signal(self, metadata=None):
signal_reason = None
if self._metadata_format_ok(metadata):
rsrc_metadata = self.metadata_get(refresh=True)
if metadata[self.UNIQUE_ID] in rsrc_metadata:
LOG.warning(_("Overwriting Metadata item for id %s!")
% metadata[self.UNIQUE_ID])
safe_metadata = {}
for k in self.METADATA_KEYS:
if k == self.UNIQUE_ID:
continue
safe_metadata[k] = metadata[k]
rsrc_metadata.update({metadata[self.UNIQUE_ID]: safe_metadata})
self.metadata_set(rsrc_metadata)
signal_reason = ('status:%s reason:%s' %
(safe_metadata[self.STATUS],
safe_metadata[self.REASON]))
else:
LOG.error(_("Metadata failed validation for %s") % self.name)
raise ValueError(_("Metadata format invalid"))
return signal_reason
def get_status(self):
'''
Return a list of the Status values for the handle signals
'''
return [v[self.STATUS]
for v in self.metadata_get(refresh=True).values()]
def get_status_reason(self, status):
'''
Return a list of reasons associated with a particular status
'''
return [v[self.REASON]
for v in self.metadata_get(refresh=True).values()
if v[self.STATUS] == status]
class HeatWaitConditionHandle(BaseWaitConditionHandle):
METADATA_KEYS = (
DATA, REASON, STATUS, UNIQUE_ID
) = (
'data', 'reason', 'status', 'id'
)
ATTRIBUTES = (
TOKEN,
ENDPOINT,
CURL_CLI,
) = (
'token',
'endpoint',
'curl_cli',
)
attributes_schema = {
TOKEN: attributes.Schema(
_('Token for stack-user which can be used for signalling handle'),
cache_mode=attributes.Schema.CACHE_NONE
),
ENDPOINT: attributes.Schema(
_('Endpoint/url which can be used for signalling handle'),
cache_mode=attributes.Schema.CACHE_NONE
),
CURL_CLI: attributes.Schema(
_('Convenience attribute, provides curl CLI command '
'prefix, which can be used for signalling handle completion or '
'failure. You can signal success by adding '
'--data-binary \'{"status": "SUCCESS"}\' '
', or signal failure by adding '
'--data-binary \'{"status": "FAILURE"}\''),
cache_mode=attributes.Schema.CACHE_NONE
),
}
def handle_create(self):
password = uuid.uuid4().hex
self.data_set('password', password, True)
self._create_user()
self.resource_id_set(self._get_user_id())
# FIXME(shardy): The assumption here is that token expiry > timeout
# but we probably need a check here to fail fast if that's not true
# Also need to implement an update property, such that the handle
# can be replaced on update which will replace the token
token = self._user_token()
self.data_set('token', token, True)
self.data_set('endpoint', '%s/signal' % self._get_resource_endpoint())
def _get_resource_endpoint(self):
# Get the endpoint from stack.clients then replace the context
# project_id with the path to the resource (which includes the
# context project_id), then replace the context project with
# the one needed for signalling from the stack_user_project
heat_client_plugin = self.stack.clients.client_plugin('heat')
endpoint = heat_client_plugin.get_heat_url()
rsrc_ep = endpoint.replace(self.context.tenant_id,
self.identifier().url_path())
return rsrc_ep.replace(self.context.tenant_id,
self.stack.stack_user_project_id)
def handle_delete(self):
self._delete_user()
@property
def password(self):
return self.data().get('password')
def _resolve_attribute(self, key):
if self.resource_id:
if key == self.TOKEN:
return self.data().get('token')
elif key == self.ENDPOINT:
return self.data().get('endpoint')
elif key == self.CURL_CLI:
# Construct curl command for template-author convenience
return ('curl -i -X POST '
'-H \'X-Auth-Token: %(token)s\' '
'-H \'Content-Type: application/json\' '
'-H \'Accept: application/json\' '
'%(endpoint)s' %
dict(token=self.data().get('token'),
endpoint=self.data().get('endpoint')))
def handle_signal(self, details=None):
'''
Validate and update the resource metadata.
metadata is not mandatory, but if passed it must use the following
format:
{
"status" : "Status (must be SUCCESS or FAILURE)",
"data" : "Arbitrary data",
"reason" : "Reason string"
}
Optionally "id" may also be specified, but if missing the index
of the signal received will be used.
'''
rsrc_metadata = self.metadata_get(refresh=True)
signal_num = len(rsrc_metadata) + 1
reason = 'Signal %s received' % signal_num
# Tolerate missing values, default to success
metadata = details or {}
metadata.setdefault(self.REASON, reason)
metadata.setdefault(self.DATA, None)
metadata.setdefault(self.UNIQUE_ID, signal_num)
metadata.setdefault(self.STATUS, self.STATUS_SUCCESS)
return super(HeatWaitConditionHandle, self).handle_signal(metadata)
class WaitConditionHandle(BaseWaitConditionHandle):
'''
the main point of this class is to :
have no dependencies (so the instance can reference it)
generate a unique url (to be returned in the reference)
then the cfn-signal will use this url to post to and
WaitCondition will poll it to see if has been written to.
'''
METADATA_KEYS = (
DATA, REASON, STATUS, UNIQUE_ID
) = (
'Data', 'Reason', 'Status', 'UniqueId'
)
def handle_create(self):
super(WaitConditionHandle, self).handle_create()
self.resource_id_set(self._get_user_id())
def FnGetRefId(self):
'''
Override the default resource FnGetRefId so we return the signed URL
'''
if self.resource_id:
wc = signal_responder.WAITCONDITION
return unicode(self._get_signed_url(signal_type=wc))
else:
return unicode(self.name)
def metadata_update(self, new_metadata=None):
"""DEPRECATED. Should use handle_signal instead."""
self.handle_signal(details=new_metadata)
def handle_signal(self, details=None):
'''
Validate and update the resource metadata
metadata must use the following format:
{
"Status" : "Status (must be SUCCESS or FAILURE)",
"UniqueId" : "Some ID, should be unique for Count>1",
"Data" : "Arbitrary Data",
"Reason" : "Reason String"
}
'''
if details is None:
return
return super(WaitConditionHandle, self).handle_signal(details)
class UpdateWaitConditionHandle(WaitConditionHandle):
'''
This works identically to a regular WaitConditionHandle, except that
on update it clears all signals received and changes the handle. Using
this handle means that you must setup the signal senders to send their
signals again any time the update handle changes. This allows us to roll
out new configurations and be confident that they are rolled out once
UPDATE COMPLETE is reached.
'''
def update(self, after, before=None, prev_resource=None):
raise resource.UpdateReplace(self.name)
class WaitConditionFailure(exception.Error):
def __init__(self, wait_condition, handle):
reasons = handle.get_status_reason(handle.STATUS_FAILURE)
super(WaitConditionFailure, self).__init__(';'.join(reasons))
class WaitConditionTimeout(exception.Error):
def __init__(self, wait_condition, handle):
reasons = handle.get_status_reason(handle.STATUS_SUCCESS)
vals = {'len': len(reasons),
'count': wait_condition.properties[wait_condition.COUNT]}
if reasons:
vals['reasons'] = ';'.join(reasons)
message = (_('%(len)d of %(count)d received - %(reasons)s') % vals)
else:
message = (_('%(len)d of %(count)d received') % vals)
super(WaitConditionTimeout, self).__init__(message)
class HeatWaitCondition(resource.Resource):
PROPERTIES = (
HANDLE, TIMEOUT, COUNT,
) = (
'handle', 'timeout', 'count',
)
ATTRIBUTES = (
DATA,
) = (
'data',
)
properties_schema = {
HANDLE: properties.Schema(
properties.Schema.STRING,
_('A reference to the wait condition handle used to signal this '
'wait condition.'),
required=True
),
TIMEOUT: properties.Schema(
properties.Schema.NUMBER,
_('The number of seconds to wait for the correct number of '
'signals to arrive.'),
required=True,
constraints=[
constraints.Range(1, 43200),
]
),
COUNT: properties.Schema(
properties.Schema.NUMBER,
_('The number of success signals that must be received before '
'the stack creation process continues.'),
constraints=[
constraints.Range(min=1),
],
default=1,
update_allowed=True
),
}
attributes_schema = {
DATA: attributes.Schema(
_('JSON serialized dict containing data associated with wait '
'condition signals sent to the handle.'),
cache_mode=attributes.Schema.CACHE_NONE
),
}
def __init__(self, name, definition, stack):
super(HeatWaitCondition, self).__init__(name, definition, stack)
def _get_handle_resource(self):
return self.stack.resource_by_refid(self.properties[self.HANDLE])
def _wait(self, handle):
while True:
try:
yield
except scheduler.Timeout:
timeout = WaitConditionTimeout(self, handle)
LOG.info(_('%(name)s Timed out (%(timeout)s)')
% {'name': str(self), 'timeout': str(timeout)})
raise timeout
handle_status = handle.get_status()
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = WaitConditionFailure(self, handle)
LOG.info(_('%(name)s Failed (%(failure)s)')
% {'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info(_("%s Succeeded") % str(self))
return
def handle_create(self):
handle = self._get_handle_resource()
runner = scheduler.TaskRunner(self._wait, handle)
runner.start(timeout=float(self.properties[self.TIMEOUT]))
return runner
def check_create_complete(self, runner):
return runner.step()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
handle = self._get_handle_resource()
runner = scheduler.TaskRunner(self._wait, handle)
runner.start(timeout=float(self.properties[self.TIMEOUT]))
return runner
def check_update_complete(self, runner):
return runner.step()
def handle_delete(self):
handle = self._get_handle_resource()
if handle:
handle.metadata_set({})
def _resolve_attribute(self, key):
res = {}
handle = self._get_handle_resource()
if key == self.DATA:
meta = handle.metadata_get(refresh=True)
# Note, can't use a dict generator on python 2.6, hence:
res = dict([(k, meta[k][handle.DATA]) for k in meta])
LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
% {'name': self.name,
'key': key,
'res': res})
return unicode(json.dumps(res))
class WaitCondition(HeatWaitCondition):
PROPERTIES = (
HANDLE, TIMEOUT, COUNT,
) = (
'Handle', 'Timeout', 'Count',
)
ATTRIBUTES = (
DATA,
) = (
'Data',
)
properties_schema = {
HANDLE: properties.Schema(
properties.Schema.STRING,
| |
== 3:
self.previewTile[i][j].Visible = False
j = 3
for i in range(0, 3):
self.previewTile[i] += [RichTile(0.75 * self.normalTileDimension)]
y = i * 0.75 * self.normalTileDimension
self.previewTile[i][j].Location = Point(x, y)
self.previewTile[i][j].BackColor = Color.Black
self.panelCreate.Controls.Add(self.previewTile[i][j])
if self.previewState[2] == 3:
self.previewTile[i][j].Visible = False
if self.previewState[2] == 4:
self.radio4x4.Checked = True
self.PopulatePreviewTiles()
# attach event after create gui
self.scrollZoom.ValueChanged += self.scrollZoomChanged
### populate Load panel
self.panelGames = LoadGamePanel()
self.panelGames.Location = Point(10, 10)
self.panelGames.Size = Size(270, self.panelLoad.Size.Height - 23 - 20)
self.panelGames.Anchor = AnchorStyles.Left | AnchorStyles.Top | AnchorStyles.Bottom
self.panelLoad.Controls.Add(self.panelGames)
for game in self.savedPuzzles:
b = SavedGameButton(game[0], game[2], game[3], game[1], game[4], game[5])
self.panelGames.AddButton(b)
self.buttonLoad = Button()
self.buttonLoad.BackColor = Color.White
self.buttonLoad.Text = "Load Puzzle"
self.buttonLoad.Font = Font("Verdana", 10)
self.buttonLoad.Size = Size(100, 23)
x = self.panelGames.Location.X
y = self.panelLoad.Size.Height - self.buttonLoad.Size.Height - 10
self.buttonLoad.Location = Point(x, y)
self.buttonLoad.Anchor = AnchorStyles.Left | AnchorStyles.Bottom
self.buttonLoad.Click += self.LoadGame
self.panelLoad.Controls.Add(self.buttonLoad)
### populate Options panel
self.labelTileSize = Label()
self.labelTileSize.AutoSize = True
self.labelTileSize.Font = Font("Verdana", 10, FontStyle.Bold)
self.labelTileSize.Text = "Tile Size"
self.labelTileSize.Location = Point(25, 15)
self.panelOptions.Controls.Add(self.labelTileSize)
self.radio50 = RadioButton()
self.radio50.Font = Font("Verdana", 10)
self.radio50.Text = "50%"
self.radio50.Size = Size(55, 20)
x = self.labelTileSize.Location.X + self.labelTileSize.Size.Width + 15
y = self.labelTileSize.Location.Y
self.radio50.Location = Point(x, y)
self.radio50.Checked = True
self.radio50.CheckedChanged += self.tileSizeChanged
self.panelOptions.Controls.Add(self.radio50)
self.radio75 = RadioButton()
self.radio75.Font = Font("Verdana", 10)
self.radio75.Text = "75%"
self.radio75.Size = Size(55, 20)
x = self.radio50.Location.X + self.radio50.Size.Width + 2
self.radio75.Location = Point(x, y)
self.radio75.CheckedChanged += self.tileSizeChanged
self.panelOptions.Controls.Add(self.radio75)
self.radio100 = RadioButton()
self.radio100.Font = Font("Verdana", 10)
self.radio100.Text = "100%"
self.radio100.Size = Size(65, 20)
x = self.radio75.Location.X + self.radio75.Size.Width + 2
self.radio100.Location = Point(x, y)
self.radio100.CheckedChanged += self.tileSizeChanged
self.panelOptions.Controls.Add(self.radio100)
self.checkCache = CheckBox()
self.checkCache.AutoSize = True
self.checkCache.Font = Font("Verdana", 10, FontStyle.Bold)
self.checkCache.Text = "Allow caching"
x = self.labelTileSize.Location.X
y = self.labelTileSize.Location.Y + self.labelTileSize.Size.Height + 20
self.checkCache.Location = Point(x, y)
if self.savedSettings[2] == True:
self.checkCache.Checked = True
self.checkCache.CheckedChanged += self.toggleCache
self.panelOptions.Controls.Add(self.checkCache)
self.buttonClearCache = Button()
self.buttonClearCache.BackColor = Color.White
self.buttonClearCache.AutoSize = True
self.buttonClearCache.Font = Font("Verdana", 10)
self.buttonClearCache.Text = "Clear Cache"
x = self.checkCache.Location.X + self.checkCache.Size.Width + 10
y -= 4
self.buttonClearCache.Location = Point(x, y)
self.buttonClearCache.Click += self.buttonClearCacheClicked
self.panelOptions.Controls.Add(self.buttonClearCache)
### populate About panel
text = Label()
text.Text = "<NAME>"
text.Text += "\<EMAIL>"
text.Text += "\n"
text.Text += "\nwritten in IronPython on .NET 2.0"
text.Text += "\ntiles served by Virtual Earth"
text.Text += "\n"
text.Text += "\nVirtual Earth terms of use:"
text.Text += "\nwww.microsoft.com/virtualearth/control/terms.mspx"
text.Text += "\n"
text.Text += "\nimage providers:"
text.Text += "\nlocal.live.com/Help/en-us/Credits.htm"
text.TextAlign = ContentAlignment.MiddleCenter
text.Size = self.panelAbout.Size
text.Font = Font("Verdana", 10)
text.Anchor = AnchorStyles.Top | AnchorStyles.Left | AnchorStyles.Bottom | AnchorStyles.Right
self.panelAbout.Controls.Add(text)
def ResizeEverything(self, tileDim):
self.tileDimension = self.normalTileDimension = tileDim
if self.gridDimension == 4:
self.tileDimension = 0.75 * self.tileDimension
w = self.gridDimension * self.tileDimension + 2 * self.SIDE_BORDER + self.menu.Size.Width
h = self.gridDimension * self.tileDimension + self.TOP_BORDER + self.BOTTOM_BORDER
self.ClientSize = Size(w, h)
self.wllPicture.Location = Point(self.ClientSize.Width - self.wllPicture.Size.Width, self.ClientSize.Height - self.wllPicture.Size.Height)
for panel in self.panels:
w = self.gridDimension * self.tileDimension
panel.Size = Size(w, w)
for i in range(0, self.gridDimension):
for j in range(0, self.gridDimension):
if not self.grid[i][j] is None:
self.grid[i][j].Size = Size(self.tileDimension, self.tileDimension)
self.grid[i][j].Location = self.GetLocation(i, j)
### resize preview tiles
if self.previewState[2] == 3:
tileDim = self.normalTileDimension
else:
tileDim = 0.75 * self.normalTileDimension
for i in range(0, 3):
for j in range(0, 3):
x = j * tileDim
y = i * tileDim
self.previewTile[i][j].Location = Point(x, y)
self.previewTile[i][j].Size = Size(tileDim, tileDim)
i = 3
y = 3 * 0.75 * self.normalTileDimension
for j in range(0, 4):
x = j * 0.75 * self.normalTileDimension
self.previewTile[i][j].Location = Point(x, y)
self.previewTile[i][j].Size = Size(tileDim, tileDim)
j = 3
for i in range(0, 3):
self.previewTile[i] += [RichTile(0.75 * self.normalTileDimension)]
y = i * 0.75 * self.normalTileDimension
self.previewTile[i][j].Location = Point(x, y)
self.previewTile[i][j].Size = Size(tileDim, tileDim)
self.linkShuffle.Size = Size(self.tileDimension, self.tileDimension)
self.linkShuffle.Location = self.GetLocation(self.gridDimension - 1, self.gridDimension - 1)
self.RelocateCaptionPlay(None, None)
self.RelocateCaptionCreate(None, None)
def RelocateCaptionPlay(self, sender, e):
self.captionPlay.Location = Point(self.panelBoard.Size.Width - self.captionPlay.Size.Width, 0)
def RelocateCaptionCreate(self, sender, e):
self.captionCreate.Location = Point(self.panelCreate.Size.Width - self.captionCreate.Size.Width, 0)
### EVENT HANDLERS ###
def formClosing(self, sender, e):
if self.exitFromMenu is True:
return
if self.gameInProgress is True:
dr = MessageBox.Show("Quit current game?", "", MessageBoxButtons.YesNo)
if dr == DialogResult.No:
e.Cancel = True
return
if self.clearCacheOnExit is True:
ClearCache()
self.SaveSettings()
def menuItemClicked(self, sender, e):
if sender is self.menuExit:
self.ExitGame()
return
for i in range(0, self.menu.Items.Count):
self.menu.Items[i].BackColor = Color.White
self.menu.Items[i].ForeColor = Color.Black
sender.BackColor = Color.CornflowerBlue
sender.ForeColor = Color.White
for i in range(0, len(self.panels)):
self.panels[i].Visible = False
if sender is self.menuPlay: self.panels[0].Visible = True
elif sender is self.menuCreate: self.panels[1].Visible = True
elif sender is self.menuLoad: self.panels[2].Visible = True
elif sender is self.menuOptions: self.panels[3].Visible = True
elif sender is self.menuAbout: self.panels[4].Visible = True
def mouseDown(self, sender, e):
if self.gameInProgress is False:
return
x = sender.currentRow
y = sender.currentCol
if x > 0 and self.grid[x-1][y] is None:
sender.direction = "up"
elif x < self.gridDimension - 1 and self.grid[x+1][y] is None:
sender.direction = "down"
elif y > 0 and self.grid[x][y-1] is None:
sender.direction = "left"
elif y < self.gridDimension - 1 and self.grid[x][y+1] is None:
sender.direction = "right"
if not sender.direction is None:
sender.beingDragged = True
sender.initialClick = e.Location
def mouseUp(self, sender, e):
if not sender.initialClick is None and e.X is sender.initialClick.X and e.Y is sender.initialClick.Y:
if sender.direction is "right":
self.slideRight(sender)
elif sender.direction is "left":
self.slideLeft(sender)
elif sender.direction is "up":
self.slideUp(sender)
else:
self.slideDown(sender)
elif sender.direction is "right":
if sender.Location.X < (sender.currentCol + 0.5) * self.tileDimension:
self.snapBack(sender)
else:
self.slideRight(sender)
elif sender.direction is "left":
if sender.Location.X > (sender.currentCol - 0.5) * self.tileDimension:
sender.Location = Point((sender.currentCol) * self.tileDimension, sender.Location.Y)
else:
self.slideLeft(sender)
elif sender.direction is "up":
if sender.Location.Y > (sender.currentRow - 0.5) * self.tileDimension:
sender.Location = Point(sender.Location.X, sender.currentRow * self.tileDimension)
else:
self.slideUp(sender)
elif sender.direction is "down":
if sender.Location.Y < (sender.currentRow + 0.5) * self.tileDimension:
sender.Location = Point(sender.Location.X, sender.currentRow * self.tileDimension)
else:
self.slideDown(sender)
sender.direction = None
sender.beingDragged = False
sender.initialClick = None
def mouseMove(self, sender, e):
x = sender.Location.X
y = sender.Location.Y
if sender.beingDragged:
if sender.direction is "right":
dX = e.X - sender.initialClick.X
if x + dX >= (sender.currentCol) * self.tileDimension:
if x + dX <= (sender.currentCol + 1) * self.tileDimension:
x += dX
elif sender.direction is "left":
dX = e.X - sender.initialClick.X
if x + dX >= (sender.currentCol - 1) * self.tileDimension:
if x + dX <= (sender.currentCol) * self.tileDimension:
x += dX
elif sender.direction is "up":
dY = e.Y - sender.initialClick.Y
if y + dY >= (sender.currentRow - 1) * self.tileDimension:
if y + dY <= (sender.currentRow) * self.tileDimension:
y = sender.Location.Y + e.Y - sender.initialClick.Y
elif sender.direction is "down":
dY = e.Y - sender.initialClick.Y
if y + dY >= (sender.currentRow) * self.tileDimension:
if y + dY <= (sender.currentRow + 1) * self.tileDimension:
y += dY
sender.Location = Point(x, y)
def snapBack(self, sender):
sender.Location = Point(sender.currentCol * self.tileDimension, sender.currentRow * self.tileDimension)
def slideLeft(self, sender):
self.grid[sender.currentRow][sender.currentCol] = None
sender.currentCol -= 1
self.grid[sender.currentRow][sender.currentCol] = sender
sender.Location = Point(sender.currentCol * self.tileDimension, sender.currentRow * self.tileDimension)
self.CheckGrid()
def slideRight(self, sender):
self.grid[sender.currentRow][sender.currentCol] = None
sender.currentCol += 1
self.grid[sender.currentRow][sender.currentCol] = sender
sender.Location = Point(sender.currentCol * self.tileDimension, sender.currentRow * self.tileDimension)
self.CheckGrid()
def slideUp(self, sender):
self.grid[sender.currentRow][sender.currentCol] = None
sender.currentRow -= 1
self.grid[sender.currentRow][sender.currentCol] = sender
sender.Location = Point(sender.currentCol * self.tileDimension, sender.currentRow * self.tileDimension)
self.CheckGrid()
def slideDown(self, sender):
self.grid[sender.currentRow][sender.currentCol] = None
sender.currentRow += 1
self.grid[sender.currentRow][sender.currentCol] = sender
sender.Location = Point(sender.currentCol * self.tileDimension, sender.currentRow * self.tileDimension)
self.CheckGrid()
def keyDown(self, sender, e):
if self.panels[0].Visible is True:
self.keyDownForGame(sender, e)
elif self.panels[1].Visible is True:
self.keyDownForCreate(sender, e)
def keyDownForGame(self, sender, e):
if self.gameInProgress is False:
return
if e.KeyCode == Keys.Left:
for i in range(0, self.gridDimension):
for j in range(0, self.gridDimension - 1):
if self.grid[i][j] is None:
self.slideLeft(self.grid[i][j+1])
return
elif e.KeyCode == Keys.Right:
for i in range(0, self.gridDimension):
for j in range(1, self.gridDimension):
if self.grid[i][j] is None:
self.slideRight(self.grid[i][j-1])
return
elif e.KeyCode == Keys.Up:
for i in range(0, self.gridDimension - 1):
for j in range(0, self.gridDimension):
if self.grid[i][j] is None:
self.slideUp(self.grid[i+1][j])
return
elif e.KeyCode == Keys.Down:
for i in range(1, self.gridDimension):
for j in range(0, self.gridDimension):
if self.grid[i][j] is None:
self.slideDown(self.grid[i-1][j])
return
def keyDownForCreate(self, sender, e):
e.SuppressKeyPress = True
e.Handled = True
if e.KeyCode == Keys.Left:
self.previewMoveLeft()
elif e.KeyCode == Keys.Right:
self.previewMoveRight()
elif e.KeyCode == Keys.Up:
self.previewMoveUp()
elif e.KeyCode == Keys.Down:
self.previewMoveDown()
def arrowButtonMouseDown(self, sender, e):
sender.BackColor = Color.SteelBlue
def arrowButtonMouseUp(self, sender, e):
sender.BackColor = Color.White
def tileSizeChanged(self, sender, e):
# two CheckedChanged events are fired when choosing another radio button
# so ignore one
if sender.Checked is False:
return
if self.radio50.Checked:
self.ResizeEverything(0.50 * 256)
elif self.radio75.Checked:
self.ResizeEverything(0.75 * 256)
elif self.radio100.Checked:
self.ResizeEverything(256)
def previewTypeChanged(self, sender, e):
type = self.comboType.SelectedItem[0].lower()
for i in range(0, 4):
for j in range(0, 4):
self.previewTile[i][j].Image = GetImage(type, self.previewTile[i][j].quadkey)
self.PopulateCaptionCreate()
def previewGridSizeChanged(self, sender, e):
if sender is False:
return
if sender is self.radio3x3:
tileDim = self.normalTileDimension
else:
tileDim = 0.75 * self.normalTileDimension
i = 3
for j in range(0, 4):
self.previewTile[i][j].Visible = True
j = 3
for i in range(0, 3):
self.previewTile[i][j].Visible = True
for i in range(0, 3):
for j in range(0, 3):
self.previewTile[i][j].Size = Size(tileDim, tileDim)
x = j * tileDim
y = i * tileDim
self.previewTile[i][j].Location = Point(x, y)
if self.radio3x3 is sender:
self.previewState[2] = 3
else:
self.previewState[2] = 4
def scrollZoomChanged(self, sender, e):
oldLevel = self.previewState[3]
newLevel = sender.Value
deltaLevel = newLevel - oldLevel
if deltaLevel > 0:
#self.previewState[0] = self.previewState[0] * 2 + int(0.5 * (MaxTiles(newLevel) - MaxTiles(oldLevel)))
#self.previewState[1] = self.previewState[1] * 2 + int(0.5 * (MaxTiles(newLevel) - MaxTiles(oldLevel)))
self.previewState[0] = (self.previewState[0] + 1) * 2 * deltaLevel - 1
self.previewState[1] = (self.previewState[1] + 1) * 2 * deltaLevel - 1
elif deltaLevel < 0:
self.previewState[0] = (self.previewState[0]) / (-2 * deltaLevel)
self.previewState[1] = (self.previewState[1]) / (-2 * deltaLevel)
self.previewState[0] = self.previewState[0] / MaxTiles(newLevel)
self.previewState[1] = self.previewState[1] / MaxTiles(newLevel)
self.previewState[3] = newLevel
self.PopulatePreviewTiles()
def previewMoveRight(self, sender = None, e = None):
for i in range(0, 4):
for j in range(0, 3):
self.previewTile[i][j].Image = self.previewTile[i][j+1].Image
self.previewTile[i][j].quadkey = self.previewTile[i][j+1].quadkey
self.previewTile[i][j].tile = self.previewTile[i][j+1].tile
type = self.comboType.SelectedItem
for i in range(0, 4):
tile = (self.previewTile[i][2].tile[0] + 1, self.previewTile[i][2].tile[1])
quadkey = TileToQuadkey(tile, self.previewState[3])
self.previewTile[i][3].Image = GetImage(type[0].lower(), quadkey)
self.previewTile[i][3].quadkey = quadkey
self.previewTile[i][3].tile = tile
def previewMoveLeft(self, sender = None, e = None):
for i in range(0, 4):
for j in range(3, 0, -1):
self.previewTile[i][j].Image = self.previewTile[i][j-1].Image
self.previewTile[i][j].quadkey = self.previewTile[i][j-1].quadkey
self.previewTile[i][j].tile = self.previewTile[i][j-1].tile
type = self.comboType.SelectedItem
for i in range(0, 4):
tile = (self.previewTile[i][1].tile[0] - 1, self.previewTile[i][1].tile[1])
quadkey = TileToQuadkey(tile, self.previewState[3])
self.previewTile[i][0].Image = GetImage(type[0].lower(), quadkey)
self.previewTile[i][0].quadkey = quadkey
self.previewTile[i][0].tile = tile
def previewMoveDown(self, sender = None, e = None):
for i in range(0, 3):
for j in range(0, 4):
self.previewTile[i][j].Image = self.previewTile[i+1][j].Image
self.previewTile[i][j].quadkey = self.previewTile[i+1][j].quadkey
self.previewTile[i][j].tile = self.previewTile[i+1][j].tile
type = self.comboType.SelectedItem
for j in range(0, 4):
tile = (self.previewTile[3][j].tile[0], |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.